1 /******************************************************************************* 2 * 3 * Intel Ethernet Controller XL710 Family Linux Driver 4 * Copyright(c) 2013 - 2016 Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along 16 * with this program. If not, see <http://www.gnu.org/licenses/>. 17 * 18 * The full GNU General Public License is included in this distribution in 19 * the file called "COPYING". 20 * 21 * Contact Information: 22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 * 25 ******************************************************************************/ 26 27 #include "i40e_type.h" 28 #include "i40e_adminq.h" 29 #include "i40e_prototype.h" 30 #include <linux/avf/virtchnl.h> 31 32 /** 33 * i40e_set_mac_type - Sets MAC type 34 * @hw: pointer to the HW structure 35 * 36 * This function sets the mac type of the adapter based on the 37 * vendor ID and device ID stored in the hw structure. 38 **/ 39 static i40e_status i40e_set_mac_type(struct i40e_hw *hw) 40 { 41 i40e_status status = 0; 42 43 if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { 44 switch (hw->device_id) { 45 case I40E_DEV_ID_SFP_XL710: 46 case I40E_DEV_ID_QEMU: 47 case I40E_DEV_ID_KX_B: 48 case I40E_DEV_ID_KX_C: 49 case I40E_DEV_ID_QSFP_A: 50 case I40E_DEV_ID_QSFP_B: 51 case I40E_DEV_ID_QSFP_C: 52 case I40E_DEV_ID_10G_BASE_T: 53 case I40E_DEV_ID_10G_BASE_T4: 54 case I40E_DEV_ID_20G_KR2: 55 case I40E_DEV_ID_20G_KR2_A: 56 case I40E_DEV_ID_25G_B: 57 case I40E_DEV_ID_25G_SFP28: 58 hw->mac.type = I40E_MAC_XL710; 59 break; 60 case I40E_DEV_ID_KX_X722: 61 case I40E_DEV_ID_QSFP_X722: 62 case I40E_DEV_ID_SFP_X722: 63 case I40E_DEV_ID_1G_BASE_T_X722: 64 case I40E_DEV_ID_10G_BASE_T_X722: 65 case I40E_DEV_ID_SFP_I_X722: 66 hw->mac.type = I40E_MAC_X722; 67 break; 68 default: 69 hw->mac.type = I40E_MAC_GENERIC; 70 break; 71 } 72 } else { 73 status = I40E_ERR_DEVICE_NOT_SUPPORTED; 74 } 75 76 hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n", 77 hw->mac.type, status); 78 return status; 79 } 80 81 /** 82 * i40e_aq_str - convert AQ err code to a string 83 * @hw: pointer to the HW structure 84 * @aq_err: the AQ error code to convert 85 **/ 86 const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) 87 { 88 switch (aq_err) { 89 case I40E_AQ_RC_OK: 90 return "OK"; 91 case I40E_AQ_RC_EPERM: 92 return "I40E_AQ_RC_EPERM"; 93 case I40E_AQ_RC_ENOENT: 94 return "I40E_AQ_RC_ENOENT"; 95 case I40E_AQ_RC_ESRCH: 96 return "I40E_AQ_RC_ESRCH"; 97 case I40E_AQ_RC_EINTR: 98 return "I40E_AQ_RC_EINTR"; 99 case I40E_AQ_RC_EIO: 100 return "I40E_AQ_RC_EIO"; 101 case I40E_AQ_RC_ENXIO: 102 return "I40E_AQ_RC_ENXIO"; 103 case I40E_AQ_RC_E2BIG: 104 return "I40E_AQ_RC_E2BIG"; 105 case I40E_AQ_RC_EAGAIN: 106 return "I40E_AQ_RC_EAGAIN"; 107 case I40E_AQ_RC_ENOMEM: 108 return "I40E_AQ_RC_ENOMEM"; 109 case I40E_AQ_RC_EACCES: 110 return "I40E_AQ_RC_EACCES"; 111 case I40E_AQ_RC_EFAULT: 112 return "I40E_AQ_RC_EFAULT"; 113 case I40E_AQ_RC_EBUSY: 114 return "I40E_AQ_RC_EBUSY"; 115 case I40E_AQ_RC_EEXIST: 116 return "I40E_AQ_RC_EEXIST"; 117 case I40E_AQ_RC_EINVAL: 118 return "I40E_AQ_RC_EINVAL"; 119 case I40E_AQ_RC_ENOTTY: 120 return "I40E_AQ_RC_ENOTTY"; 121 case I40E_AQ_RC_ENOSPC: 122 return "I40E_AQ_RC_ENOSPC"; 123 case I40E_AQ_RC_ENOSYS: 124 return "I40E_AQ_RC_ENOSYS"; 125 case I40E_AQ_RC_ERANGE: 126 return "I40E_AQ_RC_ERANGE"; 127 case I40E_AQ_RC_EFLUSHED: 128 return "I40E_AQ_RC_EFLUSHED"; 129 case I40E_AQ_RC_BAD_ADDR: 130 return "I40E_AQ_RC_BAD_ADDR"; 131 case I40E_AQ_RC_EMODE: 132 return "I40E_AQ_RC_EMODE"; 133 case I40E_AQ_RC_EFBIG: 134 return "I40E_AQ_RC_EFBIG"; 135 } 136 137 snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err); 138 return hw->err_str; 139 } 140 141 /** 142 * i40e_stat_str - convert status err code to a string 143 * @hw: pointer to the HW structure 144 * @stat_err: the status error code to convert 145 **/ 146 const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err) 147 { 148 switch (stat_err) { 149 case 0: 150 return "OK"; 151 case I40E_ERR_NVM: 152 return "I40E_ERR_NVM"; 153 case I40E_ERR_NVM_CHECKSUM: 154 return "I40E_ERR_NVM_CHECKSUM"; 155 case I40E_ERR_PHY: 156 return "I40E_ERR_PHY"; 157 case I40E_ERR_CONFIG: 158 return "I40E_ERR_CONFIG"; 159 case I40E_ERR_PARAM: 160 return "I40E_ERR_PARAM"; 161 case I40E_ERR_MAC_TYPE: 162 return "I40E_ERR_MAC_TYPE"; 163 case I40E_ERR_UNKNOWN_PHY: 164 return "I40E_ERR_UNKNOWN_PHY"; 165 case I40E_ERR_LINK_SETUP: 166 return "I40E_ERR_LINK_SETUP"; 167 case I40E_ERR_ADAPTER_STOPPED: 168 return "I40E_ERR_ADAPTER_STOPPED"; 169 case I40E_ERR_INVALID_MAC_ADDR: 170 return "I40E_ERR_INVALID_MAC_ADDR"; 171 case I40E_ERR_DEVICE_NOT_SUPPORTED: 172 return "I40E_ERR_DEVICE_NOT_SUPPORTED"; 173 case I40E_ERR_MASTER_REQUESTS_PENDING: 174 return "I40E_ERR_MASTER_REQUESTS_PENDING"; 175 case I40E_ERR_INVALID_LINK_SETTINGS: 176 return "I40E_ERR_INVALID_LINK_SETTINGS"; 177 case I40E_ERR_AUTONEG_NOT_COMPLETE: 178 return "I40E_ERR_AUTONEG_NOT_COMPLETE"; 179 case I40E_ERR_RESET_FAILED: 180 return "I40E_ERR_RESET_FAILED"; 181 case I40E_ERR_SWFW_SYNC: 182 return "I40E_ERR_SWFW_SYNC"; 183 case I40E_ERR_NO_AVAILABLE_VSI: 184 return "I40E_ERR_NO_AVAILABLE_VSI"; 185 case I40E_ERR_NO_MEMORY: 186 return "I40E_ERR_NO_MEMORY"; 187 case I40E_ERR_BAD_PTR: 188 return "I40E_ERR_BAD_PTR"; 189 case I40E_ERR_RING_FULL: 190 return "I40E_ERR_RING_FULL"; 191 case I40E_ERR_INVALID_PD_ID: 192 return "I40E_ERR_INVALID_PD_ID"; 193 case I40E_ERR_INVALID_QP_ID: 194 return "I40E_ERR_INVALID_QP_ID"; 195 case I40E_ERR_INVALID_CQ_ID: 196 return "I40E_ERR_INVALID_CQ_ID"; 197 case I40E_ERR_INVALID_CEQ_ID: 198 return "I40E_ERR_INVALID_CEQ_ID"; 199 case I40E_ERR_INVALID_AEQ_ID: 200 return "I40E_ERR_INVALID_AEQ_ID"; 201 case I40E_ERR_INVALID_SIZE: 202 return "I40E_ERR_INVALID_SIZE"; 203 case I40E_ERR_INVALID_ARP_INDEX: 204 return "I40E_ERR_INVALID_ARP_INDEX"; 205 case I40E_ERR_INVALID_FPM_FUNC_ID: 206 return "I40E_ERR_INVALID_FPM_FUNC_ID"; 207 case I40E_ERR_QP_INVALID_MSG_SIZE: 208 return "I40E_ERR_QP_INVALID_MSG_SIZE"; 209 case I40E_ERR_QP_TOOMANY_WRS_POSTED: 210 return "I40E_ERR_QP_TOOMANY_WRS_POSTED"; 211 case I40E_ERR_INVALID_FRAG_COUNT: 212 return "I40E_ERR_INVALID_FRAG_COUNT"; 213 case I40E_ERR_QUEUE_EMPTY: 214 return "I40E_ERR_QUEUE_EMPTY"; 215 case I40E_ERR_INVALID_ALIGNMENT: 216 return "I40E_ERR_INVALID_ALIGNMENT"; 217 case I40E_ERR_FLUSHED_QUEUE: 218 return "I40E_ERR_FLUSHED_QUEUE"; 219 case I40E_ERR_INVALID_PUSH_PAGE_INDEX: 220 return "I40E_ERR_INVALID_PUSH_PAGE_INDEX"; 221 case I40E_ERR_INVALID_IMM_DATA_SIZE: 222 return "I40E_ERR_INVALID_IMM_DATA_SIZE"; 223 case I40E_ERR_TIMEOUT: 224 return "I40E_ERR_TIMEOUT"; 225 case I40E_ERR_OPCODE_MISMATCH: 226 return "I40E_ERR_OPCODE_MISMATCH"; 227 case I40E_ERR_CQP_COMPL_ERROR: 228 return "I40E_ERR_CQP_COMPL_ERROR"; 229 case I40E_ERR_INVALID_VF_ID: 230 return "I40E_ERR_INVALID_VF_ID"; 231 case I40E_ERR_INVALID_HMCFN_ID: 232 return "I40E_ERR_INVALID_HMCFN_ID"; 233 case I40E_ERR_BACKING_PAGE_ERROR: 234 return "I40E_ERR_BACKING_PAGE_ERROR"; 235 case I40E_ERR_NO_PBLCHUNKS_AVAILABLE: 236 return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE"; 237 case I40E_ERR_INVALID_PBLE_INDEX: 238 return "I40E_ERR_INVALID_PBLE_INDEX"; 239 case I40E_ERR_INVALID_SD_INDEX: 240 return "I40E_ERR_INVALID_SD_INDEX"; 241 case I40E_ERR_INVALID_PAGE_DESC_INDEX: 242 return "I40E_ERR_INVALID_PAGE_DESC_INDEX"; 243 case I40E_ERR_INVALID_SD_TYPE: 244 return "I40E_ERR_INVALID_SD_TYPE"; 245 case I40E_ERR_MEMCPY_FAILED: 246 return "I40E_ERR_MEMCPY_FAILED"; 247 case I40E_ERR_INVALID_HMC_OBJ_INDEX: 248 return "I40E_ERR_INVALID_HMC_OBJ_INDEX"; 249 case I40E_ERR_INVALID_HMC_OBJ_COUNT: 250 return "I40E_ERR_INVALID_HMC_OBJ_COUNT"; 251 case I40E_ERR_INVALID_SRQ_ARM_LIMIT: 252 return "I40E_ERR_INVALID_SRQ_ARM_LIMIT"; 253 case I40E_ERR_SRQ_ENABLED: 254 return "I40E_ERR_SRQ_ENABLED"; 255 case I40E_ERR_ADMIN_QUEUE_ERROR: 256 return "I40E_ERR_ADMIN_QUEUE_ERROR"; 257 case I40E_ERR_ADMIN_QUEUE_TIMEOUT: 258 return "I40E_ERR_ADMIN_QUEUE_TIMEOUT"; 259 case I40E_ERR_BUF_TOO_SHORT: 260 return "I40E_ERR_BUF_TOO_SHORT"; 261 case I40E_ERR_ADMIN_QUEUE_FULL: 262 return "I40E_ERR_ADMIN_QUEUE_FULL"; 263 case I40E_ERR_ADMIN_QUEUE_NO_WORK: 264 return "I40E_ERR_ADMIN_QUEUE_NO_WORK"; 265 case I40E_ERR_BAD_IWARP_CQE: 266 return "I40E_ERR_BAD_IWARP_CQE"; 267 case I40E_ERR_NVM_BLANK_MODE: 268 return "I40E_ERR_NVM_BLANK_MODE"; 269 case I40E_ERR_NOT_IMPLEMENTED: 270 return "I40E_ERR_NOT_IMPLEMENTED"; 271 case I40E_ERR_PE_DOORBELL_NOT_ENABLED: 272 return "I40E_ERR_PE_DOORBELL_NOT_ENABLED"; 273 case I40E_ERR_DIAG_TEST_FAILED: 274 return "I40E_ERR_DIAG_TEST_FAILED"; 275 case I40E_ERR_NOT_READY: 276 return "I40E_ERR_NOT_READY"; 277 case I40E_NOT_SUPPORTED: 278 return "I40E_NOT_SUPPORTED"; 279 case I40E_ERR_FIRMWARE_API_VERSION: 280 return "I40E_ERR_FIRMWARE_API_VERSION"; 281 case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR: 282 return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR"; 283 } 284 285 snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err); 286 return hw->err_str; 287 } 288 289 /** 290 * i40e_debug_aq 291 * @hw: debug mask related to admin queue 292 * @mask: debug mask 293 * @desc: pointer to admin queue descriptor 294 * @buffer: pointer to command buffer 295 * @buf_len: max length of buffer 296 * 297 * Dumps debug log about adminq command with descriptor contents. 298 **/ 299 void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, 300 void *buffer, u16 buf_len) 301 { 302 struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; 303 u16 len; 304 u8 *buf = (u8 *)buffer; 305 306 if ((!(mask & hw->debug_mask)) || (desc == NULL)) 307 return; 308 309 len = le16_to_cpu(aq_desc->datalen); 310 311 i40e_debug(hw, mask, 312 "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", 313 le16_to_cpu(aq_desc->opcode), 314 le16_to_cpu(aq_desc->flags), 315 le16_to_cpu(aq_desc->datalen), 316 le16_to_cpu(aq_desc->retval)); 317 i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n", 318 le32_to_cpu(aq_desc->cookie_high), 319 le32_to_cpu(aq_desc->cookie_low)); 320 i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n", 321 le32_to_cpu(aq_desc->params.internal.param0), 322 le32_to_cpu(aq_desc->params.internal.param1)); 323 i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n", 324 le32_to_cpu(aq_desc->params.external.addr_high), 325 le32_to_cpu(aq_desc->params.external.addr_low)); 326 327 if ((buffer != NULL) && (aq_desc->datalen != 0)) { 328 i40e_debug(hw, mask, "AQ CMD Buffer:\n"); 329 if (buf_len < len) 330 len = buf_len; 331 /* write the full 16-byte chunks */ 332 if (hw->debug_mask & mask) { 333 char prefix[27]; 334 335 snprintf(prefix, sizeof(prefix), 336 "i40e %02x:%02x.%x: \t0x", 337 hw->bus.bus_id, 338 hw->bus.device, 339 hw->bus.func); 340 341 print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 342 16, 1, buf, len, false); 343 } 344 } 345 } 346 347 /** 348 * i40e_check_asq_alive 349 * @hw: pointer to the hw struct 350 * 351 * Returns true if Queue is enabled else false. 352 **/ 353 bool i40e_check_asq_alive(struct i40e_hw *hw) 354 { 355 if (hw->aq.asq.len) 356 return !!(rd32(hw, hw->aq.asq.len) & 357 I40E_PF_ATQLEN_ATQENABLE_MASK); 358 else 359 return false; 360 } 361 362 /** 363 * i40e_aq_queue_shutdown 364 * @hw: pointer to the hw struct 365 * @unloading: is the driver unloading itself 366 * 367 * Tell the Firmware that we're shutting down the AdminQ and whether 368 * or not the driver is unloading as well. 369 **/ 370 i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, 371 bool unloading) 372 { 373 struct i40e_aq_desc desc; 374 struct i40e_aqc_queue_shutdown *cmd = 375 (struct i40e_aqc_queue_shutdown *)&desc.params.raw; 376 i40e_status status; 377 378 i40e_fill_default_direct_cmd_desc(&desc, 379 i40e_aqc_opc_queue_shutdown); 380 381 if (unloading) 382 cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING); 383 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); 384 385 return status; 386 } 387 388 /** 389 * i40e_aq_get_set_rss_lut 390 * @hw: pointer to the hardware structure 391 * @vsi_id: vsi fw index 392 * @pf_lut: for PF table set true, for VSI table set false 393 * @lut: pointer to the lut buffer provided by the caller 394 * @lut_size: size of the lut buffer 395 * @set: set true to set the table, false to get the table 396 * 397 * Internal function to get or set RSS look up table 398 **/ 399 static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw, 400 u16 vsi_id, bool pf_lut, 401 u8 *lut, u16 lut_size, 402 bool set) 403 { 404 i40e_status status; 405 struct i40e_aq_desc desc; 406 struct i40e_aqc_get_set_rss_lut *cmd_resp = 407 (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw; 408 409 if (set) 410 i40e_fill_default_direct_cmd_desc(&desc, 411 i40e_aqc_opc_set_rss_lut); 412 else 413 i40e_fill_default_direct_cmd_desc(&desc, 414 i40e_aqc_opc_get_rss_lut); 415 416 /* Indirect command */ 417 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 418 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 419 420 cmd_resp->vsi_id = 421 cpu_to_le16((u16)((vsi_id << 422 I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) & 423 I40E_AQC_SET_RSS_LUT_VSI_ID_MASK)); 424 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID); 425 426 if (pf_lut) 427 cmd_resp->flags |= cpu_to_le16((u16) 428 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF << 429 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & 430 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); 431 else 432 cmd_resp->flags |= cpu_to_le16((u16) 433 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI << 434 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & 435 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); 436 437 status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL); 438 439 return status; 440 } 441 442 /** 443 * i40e_aq_get_rss_lut 444 * @hw: pointer to the hardware structure 445 * @vsi_id: vsi fw index 446 * @pf_lut: for PF table set true, for VSI table set false 447 * @lut: pointer to the lut buffer provided by the caller 448 * @lut_size: size of the lut buffer 449 * 450 * get the RSS lookup table, PF or VSI type 451 **/ 452 i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id, 453 bool pf_lut, u8 *lut, u16 lut_size) 454 { 455 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, 456 false); 457 } 458 459 /** 460 * i40e_aq_set_rss_lut 461 * @hw: pointer to the hardware structure 462 * @vsi_id: vsi fw index 463 * @pf_lut: for PF table set true, for VSI table set false 464 * @lut: pointer to the lut buffer provided by the caller 465 * @lut_size: size of the lut buffer 466 * 467 * set the RSS lookup table, PF or VSI type 468 **/ 469 i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, 470 bool pf_lut, u8 *lut, u16 lut_size) 471 { 472 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true); 473 } 474 475 /** 476 * i40e_aq_get_set_rss_key 477 * @hw: pointer to the hw struct 478 * @vsi_id: vsi fw index 479 * @key: pointer to key info struct 480 * @set: set true to set the key, false to get the key 481 * 482 * get the RSS key per VSI 483 **/ 484 static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw, 485 u16 vsi_id, 486 struct i40e_aqc_get_set_rss_key_data *key, 487 bool set) 488 { 489 i40e_status status; 490 struct i40e_aq_desc desc; 491 struct i40e_aqc_get_set_rss_key *cmd_resp = 492 (struct i40e_aqc_get_set_rss_key *)&desc.params.raw; 493 u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data); 494 495 if (set) 496 i40e_fill_default_direct_cmd_desc(&desc, 497 i40e_aqc_opc_set_rss_key); 498 else 499 i40e_fill_default_direct_cmd_desc(&desc, 500 i40e_aqc_opc_get_rss_key); 501 502 /* Indirect command */ 503 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 504 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 505 506 cmd_resp->vsi_id = 507 cpu_to_le16((u16)((vsi_id << 508 I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) & 509 I40E_AQC_SET_RSS_KEY_VSI_ID_MASK)); 510 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID); 511 512 status = i40e_asq_send_command(hw, &desc, key, key_size, NULL); 513 514 return status; 515 } 516 517 /** 518 * i40e_aq_get_rss_key 519 * @hw: pointer to the hw struct 520 * @vsi_id: vsi fw index 521 * @key: pointer to key info struct 522 * 523 **/ 524 i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw, 525 u16 vsi_id, 526 struct i40e_aqc_get_set_rss_key_data *key) 527 { 528 return i40e_aq_get_set_rss_key(hw, vsi_id, key, false); 529 } 530 531 /** 532 * i40e_aq_set_rss_key 533 * @hw: pointer to the hw struct 534 * @vsi_id: vsi fw index 535 * @key: pointer to key info struct 536 * 537 * set the RSS key per VSI 538 **/ 539 i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw, 540 u16 vsi_id, 541 struct i40e_aqc_get_set_rss_key_data *key) 542 { 543 return i40e_aq_get_set_rss_key(hw, vsi_id, key, true); 544 } 545 546 /* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the 547 * hardware to a bit-field that can be used by SW to more easily determine the 548 * packet type. 549 * 550 * Macros are used to shorten the table lines and make this table human 551 * readable. 552 * 553 * We store the PTYPE in the top byte of the bit field - this is just so that 554 * we can check that the table doesn't have a row missing, as the index into 555 * the table should be the PTYPE. 556 * 557 * Typical work flow: 558 * 559 * IF NOT i40e_ptype_lookup[ptype].known 560 * THEN 561 * Packet is unknown 562 * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP 563 * Use the rest of the fields to look at the tunnels, inner protocols, etc 564 * ELSE 565 * Use the enum i40e_rx_l2_ptype to decode the packet type 566 * ENDIF 567 */ 568 569 /* macro to make the table lines short */ 570 #define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ 571 { PTYPE, \ 572 1, \ 573 I40E_RX_PTYPE_OUTER_##OUTER_IP, \ 574 I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \ 575 I40E_RX_PTYPE_##OUTER_FRAG, \ 576 I40E_RX_PTYPE_TUNNEL_##T, \ 577 I40E_RX_PTYPE_TUNNEL_END_##TE, \ 578 I40E_RX_PTYPE_##TEF, \ 579 I40E_RX_PTYPE_INNER_PROT_##I, \ 580 I40E_RX_PTYPE_PAYLOAD_LAYER_##PL } 581 582 #define I40E_PTT_UNUSED_ENTRY(PTYPE) \ 583 { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 } 584 585 /* shorter macros makes the table fit but are terse */ 586 #define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG 587 #define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG 588 #define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC 589 590 /* Lookup table mapping the HW PTYPE to the bit field for decoding */ 591 struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = { 592 /* L2 Packet types */ 593 I40E_PTT_UNUSED_ENTRY(0), 594 I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 595 I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2), 596 I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 597 I40E_PTT_UNUSED_ENTRY(4), 598 I40E_PTT_UNUSED_ENTRY(5), 599 I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 600 I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 601 I40E_PTT_UNUSED_ENTRY(8), 602 I40E_PTT_UNUSED_ENTRY(9), 603 I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 604 I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), 605 I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 606 I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 607 I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 608 I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 609 I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 610 I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 611 I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 612 I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 613 I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 614 I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 615 616 /* Non Tunneled IPv4 */ 617 I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), 618 I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), 619 I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), 620 I40E_PTT_UNUSED_ENTRY(25), 621 I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), 622 I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), 623 I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), 624 625 /* IPv4 --> IPv4 */ 626 I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), 627 I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), 628 I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), 629 I40E_PTT_UNUSED_ENTRY(32), 630 I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), 631 I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), 632 I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), 633 634 /* IPv4 --> IPv6 */ 635 I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), 636 I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), 637 I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), 638 I40E_PTT_UNUSED_ENTRY(39), 639 I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), 640 I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), 641 I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), 642 643 /* IPv4 --> GRE/NAT */ 644 I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), 645 646 /* IPv4 --> GRE/NAT --> IPv4 */ 647 I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), 648 I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), 649 I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), 650 I40E_PTT_UNUSED_ENTRY(47), 651 I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), 652 I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), 653 I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), 654 655 /* IPv4 --> GRE/NAT --> IPv6 */ 656 I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), 657 I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), 658 I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), 659 I40E_PTT_UNUSED_ENTRY(54), 660 I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), 661 I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), 662 I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), 663 664 /* IPv4 --> GRE/NAT --> MAC */ 665 I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), 666 667 /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ 668 I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), 669 I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), 670 I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), 671 I40E_PTT_UNUSED_ENTRY(62), 672 I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), 673 I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), 674 I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), 675 676 /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ 677 I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), 678 I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), 679 I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), 680 I40E_PTT_UNUSED_ENTRY(69), 681 I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), 682 I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), 683 I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), 684 685 /* IPv4 --> GRE/NAT --> MAC/VLAN */ 686 I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), 687 688 /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ 689 I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), 690 I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), 691 I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), 692 I40E_PTT_UNUSED_ENTRY(77), 693 I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), 694 I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), 695 I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), 696 697 /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ 698 I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), 699 I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), 700 I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), 701 I40E_PTT_UNUSED_ENTRY(84), 702 I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), 703 I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), 704 I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), 705 706 /* Non Tunneled IPv6 */ 707 I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), 708 I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), 709 I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), 710 I40E_PTT_UNUSED_ENTRY(91), 711 I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), 712 I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), 713 I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), 714 715 /* IPv6 --> IPv4 */ 716 I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), 717 I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), 718 I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), 719 I40E_PTT_UNUSED_ENTRY(98), 720 I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), 721 I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), 722 I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), 723 724 /* IPv6 --> IPv6 */ 725 I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), 726 I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), 727 I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), 728 I40E_PTT_UNUSED_ENTRY(105), 729 I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), 730 I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), 731 I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), 732 733 /* IPv6 --> GRE/NAT */ 734 I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), 735 736 /* IPv6 --> GRE/NAT -> IPv4 */ 737 I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), 738 I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), 739 I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), 740 I40E_PTT_UNUSED_ENTRY(113), 741 I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), 742 I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), 743 I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), 744 745 /* IPv6 --> GRE/NAT -> IPv6 */ 746 I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), 747 I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), 748 I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), 749 I40E_PTT_UNUSED_ENTRY(120), 750 I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), 751 I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), 752 I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), 753 754 /* IPv6 --> GRE/NAT -> MAC */ 755 I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), 756 757 /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ 758 I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), 759 I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), 760 I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), 761 I40E_PTT_UNUSED_ENTRY(128), 762 I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), 763 I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), 764 I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), 765 766 /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ 767 I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), 768 I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), 769 I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), 770 I40E_PTT_UNUSED_ENTRY(135), 771 I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), 772 I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), 773 I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), 774 775 /* IPv6 --> GRE/NAT -> MAC/VLAN */ 776 I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), 777 778 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ 779 I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), 780 I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), 781 I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), 782 I40E_PTT_UNUSED_ENTRY(143), 783 I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), 784 I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), 785 I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), 786 787 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ 788 I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), 789 I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), 790 I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), 791 I40E_PTT_UNUSED_ENTRY(150), 792 I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), 793 I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), 794 I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), 795 796 /* unused entries */ 797 I40E_PTT_UNUSED_ENTRY(154), 798 I40E_PTT_UNUSED_ENTRY(155), 799 I40E_PTT_UNUSED_ENTRY(156), 800 I40E_PTT_UNUSED_ENTRY(157), 801 I40E_PTT_UNUSED_ENTRY(158), 802 I40E_PTT_UNUSED_ENTRY(159), 803 804 I40E_PTT_UNUSED_ENTRY(160), 805 I40E_PTT_UNUSED_ENTRY(161), 806 I40E_PTT_UNUSED_ENTRY(162), 807 I40E_PTT_UNUSED_ENTRY(163), 808 I40E_PTT_UNUSED_ENTRY(164), 809 I40E_PTT_UNUSED_ENTRY(165), 810 I40E_PTT_UNUSED_ENTRY(166), 811 I40E_PTT_UNUSED_ENTRY(167), 812 I40E_PTT_UNUSED_ENTRY(168), 813 I40E_PTT_UNUSED_ENTRY(169), 814 815 I40E_PTT_UNUSED_ENTRY(170), 816 I40E_PTT_UNUSED_ENTRY(171), 817 I40E_PTT_UNUSED_ENTRY(172), 818 I40E_PTT_UNUSED_ENTRY(173), 819 I40E_PTT_UNUSED_ENTRY(174), 820 I40E_PTT_UNUSED_ENTRY(175), 821 I40E_PTT_UNUSED_ENTRY(176), 822 I40E_PTT_UNUSED_ENTRY(177), 823 I40E_PTT_UNUSED_ENTRY(178), 824 I40E_PTT_UNUSED_ENTRY(179), 825 826 I40E_PTT_UNUSED_ENTRY(180), 827 I40E_PTT_UNUSED_ENTRY(181), 828 I40E_PTT_UNUSED_ENTRY(182), 829 I40E_PTT_UNUSED_ENTRY(183), 830 I40E_PTT_UNUSED_ENTRY(184), 831 I40E_PTT_UNUSED_ENTRY(185), 832 I40E_PTT_UNUSED_ENTRY(186), 833 I40E_PTT_UNUSED_ENTRY(187), 834 I40E_PTT_UNUSED_ENTRY(188), 835 I40E_PTT_UNUSED_ENTRY(189), 836 837 I40E_PTT_UNUSED_ENTRY(190), 838 I40E_PTT_UNUSED_ENTRY(191), 839 I40E_PTT_UNUSED_ENTRY(192), 840 I40E_PTT_UNUSED_ENTRY(193), 841 I40E_PTT_UNUSED_ENTRY(194), 842 I40E_PTT_UNUSED_ENTRY(195), 843 I40E_PTT_UNUSED_ENTRY(196), 844 I40E_PTT_UNUSED_ENTRY(197), 845 I40E_PTT_UNUSED_ENTRY(198), 846 I40E_PTT_UNUSED_ENTRY(199), 847 848 I40E_PTT_UNUSED_ENTRY(200), 849 I40E_PTT_UNUSED_ENTRY(201), 850 I40E_PTT_UNUSED_ENTRY(202), 851 I40E_PTT_UNUSED_ENTRY(203), 852 I40E_PTT_UNUSED_ENTRY(204), 853 I40E_PTT_UNUSED_ENTRY(205), 854 I40E_PTT_UNUSED_ENTRY(206), 855 I40E_PTT_UNUSED_ENTRY(207), 856 I40E_PTT_UNUSED_ENTRY(208), 857 I40E_PTT_UNUSED_ENTRY(209), 858 859 I40E_PTT_UNUSED_ENTRY(210), 860 I40E_PTT_UNUSED_ENTRY(211), 861 I40E_PTT_UNUSED_ENTRY(212), 862 I40E_PTT_UNUSED_ENTRY(213), 863 I40E_PTT_UNUSED_ENTRY(214), 864 I40E_PTT_UNUSED_ENTRY(215), 865 I40E_PTT_UNUSED_ENTRY(216), 866 I40E_PTT_UNUSED_ENTRY(217), 867 I40E_PTT_UNUSED_ENTRY(218), 868 I40E_PTT_UNUSED_ENTRY(219), 869 870 I40E_PTT_UNUSED_ENTRY(220), 871 I40E_PTT_UNUSED_ENTRY(221), 872 I40E_PTT_UNUSED_ENTRY(222), 873 I40E_PTT_UNUSED_ENTRY(223), 874 I40E_PTT_UNUSED_ENTRY(224), 875 I40E_PTT_UNUSED_ENTRY(225), 876 I40E_PTT_UNUSED_ENTRY(226), 877 I40E_PTT_UNUSED_ENTRY(227), 878 I40E_PTT_UNUSED_ENTRY(228), 879 I40E_PTT_UNUSED_ENTRY(229), 880 881 I40E_PTT_UNUSED_ENTRY(230), 882 I40E_PTT_UNUSED_ENTRY(231), 883 I40E_PTT_UNUSED_ENTRY(232), 884 I40E_PTT_UNUSED_ENTRY(233), 885 I40E_PTT_UNUSED_ENTRY(234), 886 I40E_PTT_UNUSED_ENTRY(235), 887 I40E_PTT_UNUSED_ENTRY(236), 888 I40E_PTT_UNUSED_ENTRY(237), 889 I40E_PTT_UNUSED_ENTRY(238), 890 I40E_PTT_UNUSED_ENTRY(239), 891 892 I40E_PTT_UNUSED_ENTRY(240), 893 I40E_PTT_UNUSED_ENTRY(241), 894 I40E_PTT_UNUSED_ENTRY(242), 895 I40E_PTT_UNUSED_ENTRY(243), 896 I40E_PTT_UNUSED_ENTRY(244), 897 I40E_PTT_UNUSED_ENTRY(245), 898 I40E_PTT_UNUSED_ENTRY(246), 899 I40E_PTT_UNUSED_ENTRY(247), 900 I40E_PTT_UNUSED_ENTRY(248), 901 I40E_PTT_UNUSED_ENTRY(249), 902 903 I40E_PTT_UNUSED_ENTRY(250), 904 I40E_PTT_UNUSED_ENTRY(251), 905 I40E_PTT_UNUSED_ENTRY(252), 906 I40E_PTT_UNUSED_ENTRY(253), 907 I40E_PTT_UNUSED_ENTRY(254), 908 I40E_PTT_UNUSED_ENTRY(255) 909 }; 910 911 /** 912 * i40e_init_shared_code - Initialize the shared code 913 * @hw: pointer to hardware structure 914 * 915 * This assigns the MAC type and PHY code and inits the NVM. 916 * Does not touch the hardware. This function must be called prior to any 917 * other function in the shared code. The i40e_hw structure should be 918 * memset to 0 prior to calling this function. The following fields in 919 * hw structure should be filled in prior to calling this function: 920 * hw_addr, back, device_id, vendor_id, subsystem_device_id, 921 * subsystem_vendor_id, and revision_id 922 **/ 923 i40e_status i40e_init_shared_code(struct i40e_hw *hw) 924 { 925 i40e_status status = 0; 926 u32 port, ari, func_rid; 927 928 i40e_set_mac_type(hw); 929 930 switch (hw->mac.type) { 931 case I40E_MAC_XL710: 932 case I40E_MAC_X722: 933 break; 934 default: 935 return I40E_ERR_DEVICE_NOT_SUPPORTED; 936 } 937 938 hw->phy.get_link_info = true; 939 940 /* Determine port number and PF number*/ 941 port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) 942 >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT; 943 hw->port = (u8)port; 944 ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >> 945 I40E_GLPCI_CAPSUP_ARI_EN_SHIFT; 946 func_rid = rd32(hw, I40E_PF_FUNC_RID); 947 if (ari) 948 hw->pf_id = (u8)(func_rid & 0xff); 949 else 950 hw->pf_id = (u8)(func_rid & 0x7); 951 952 if (hw->mac.type == I40E_MAC_X722) 953 hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE | 954 I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK; 955 956 status = i40e_init_nvm(hw); 957 return status; 958 } 959 960 /** 961 * i40e_aq_mac_address_read - Retrieve the MAC addresses 962 * @hw: pointer to the hw struct 963 * @flags: a return indicator of what addresses were added to the addr store 964 * @addrs: the requestor's mac addr store 965 * @cmd_details: pointer to command details structure or NULL 966 **/ 967 static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw, 968 u16 *flags, 969 struct i40e_aqc_mac_address_read_data *addrs, 970 struct i40e_asq_cmd_details *cmd_details) 971 { 972 struct i40e_aq_desc desc; 973 struct i40e_aqc_mac_address_read *cmd_data = 974 (struct i40e_aqc_mac_address_read *)&desc.params.raw; 975 i40e_status status; 976 977 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read); 978 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF); 979 980 status = i40e_asq_send_command(hw, &desc, addrs, 981 sizeof(*addrs), cmd_details); 982 *flags = le16_to_cpu(cmd_data->command_flags); 983 984 return status; 985 } 986 987 /** 988 * i40e_aq_mac_address_write - Change the MAC addresses 989 * @hw: pointer to the hw struct 990 * @flags: indicates which MAC to be written 991 * @mac_addr: address to write 992 * @cmd_details: pointer to command details structure or NULL 993 **/ 994 i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw, 995 u16 flags, u8 *mac_addr, 996 struct i40e_asq_cmd_details *cmd_details) 997 { 998 struct i40e_aq_desc desc; 999 struct i40e_aqc_mac_address_write *cmd_data = 1000 (struct i40e_aqc_mac_address_write *)&desc.params.raw; 1001 i40e_status status; 1002 1003 i40e_fill_default_direct_cmd_desc(&desc, 1004 i40e_aqc_opc_mac_address_write); 1005 cmd_data->command_flags = cpu_to_le16(flags); 1006 cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]); 1007 cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) | 1008 ((u32)mac_addr[3] << 16) | 1009 ((u32)mac_addr[4] << 8) | 1010 mac_addr[5]); 1011 1012 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1013 1014 return status; 1015 } 1016 1017 /** 1018 * i40e_get_mac_addr - get MAC address 1019 * @hw: pointer to the HW structure 1020 * @mac_addr: pointer to MAC address 1021 * 1022 * Reads the adapter's MAC address from register 1023 **/ 1024 i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr) 1025 { 1026 struct i40e_aqc_mac_address_read_data addrs; 1027 i40e_status status; 1028 u16 flags = 0; 1029 1030 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); 1031 1032 if (flags & I40E_AQC_LAN_ADDR_VALID) 1033 ether_addr_copy(mac_addr, addrs.pf_lan_mac); 1034 1035 return status; 1036 } 1037 1038 /** 1039 * i40e_get_port_mac_addr - get Port MAC address 1040 * @hw: pointer to the HW structure 1041 * @mac_addr: pointer to Port MAC address 1042 * 1043 * Reads the adapter's Port MAC address 1044 **/ 1045 i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr) 1046 { 1047 struct i40e_aqc_mac_address_read_data addrs; 1048 i40e_status status; 1049 u16 flags = 0; 1050 1051 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); 1052 if (status) 1053 return status; 1054 1055 if (flags & I40E_AQC_PORT_ADDR_VALID) 1056 ether_addr_copy(mac_addr, addrs.port_mac); 1057 else 1058 status = I40E_ERR_INVALID_MAC_ADDR; 1059 1060 return status; 1061 } 1062 1063 /** 1064 * i40e_pre_tx_queue_cfg - pre tx queue configure 1065 * @hw: pointer to the HW structure 1066 * @queue: target PF queue index 1067 * @enable: state change request 1068 * 1069 * Handles hw requirement to indicate intention to enable 1070 * or disable target queue. 1071 **/ 1072 void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable) 1073 { 1074 u32 abs_queue_idx = hw->func_caps.base_queue + queue; 1075 u32 reg_block = 0; 1076 u32 reg_val; 1077 1078 if (abs_queue_idx >= 128) { 1079 reg_block = abs_queue_idx / 128; 1080 abs_queue_idx %= 128; 1081 } 1082 1083 reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); 1084 reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; 1085 reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 1086 1087 if (enable) 1088 reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK; 1089 else 1090 reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; 1091 1092 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val); 1093 } 1094 1095 /** 1096 * i40e_read_pba_string - Reads part number string from EEPROM 1097 * @hw: pointer to hardware structure 1098 * @pba_num: stores the part number string from the EEPROM 1099 * @pba_num_size: part number string buffer length 1100 * 1101 * Reads the part number string from the EEPROM. 1102 **/ 1103 i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, 1104 u32 pba_num_size) 1105 { 1106 i40e_status status = 0; 1107 u16 pba_word = 0; 1108 u16 pba_size = 0; 1109 u16 pba_ptr = 0; 1110 u16 i = 0; 1111 1112 status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word); 1113 if (status || (pba_word != 0xFAFA)) { 1114 hw_dbg(hw, "Failed to read PBA flags or flag is invalid.\n"); 1115 return status; 1116 } 1117 1118 status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr); 1119 if (status) { 1120 hw_dbg(hw, "Failed to read PBA Block pointer.\n"); 1121 return status; 1122 } 1123 1124 status = i40e_read_nvm_word(hw, pba_ptr, &pba_size); 1125 if (status) { 1126 hw_dbg(hw, "Failed to read PBA Block size.\n"); 1127 return status; 1128 } 1129 1130 /* Subtract one to get PBA word count (PBA Size word is included in 1131 * total size) 1132 */ 1133 pba_size--; 1134 if (pba_num_size < (((u32)pba_size * 2) + 1)) { 1135 hw_dbg(hw, "Buffer to small for PBA data.\n"); 1136 return I40E_ERR_PARAM; 1137 } 1138 1139 for (i = 0; i < pba_size; i++) { 1140 status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word); 1141 if (status) { 1142 hw_dbg(hw, "Failed to read PBA Block word %d.\n", i); 1143 return status; 1144 } 1145 1146 pba_num[(i * 2)] = (pba_word >> 8) & 0xFF; 1147 pba_num[(i * 2) + 1] = pba_word & 0xFF; 1148 } 1149 pba_num[(pba_size * 2)] = '\0'; 1150 1151 return status; 1152 } 1153 1154 /** 1155 * i40e_get_media_type - Gets media type 1156 * @hw: pointer to the hardware structure 1157 **/ 1158 static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw) 1159 { 1160 enum i40e_media_type media; 1161 1162 switch (hw->phy.link_info.phy_type) { 1163 case I40E_PHY_TYPE_10GBASE_SR: 1164 case I40E_PHY_TYPE_10GBASE_LR: 1165 case I40E_PHY_TYPE_1000BASE_SX: 1166 case I40E_PHY_TYPE_1000BASE_LX: 1167 case I40E_PHY_TYPE_40GBASE_SR4: 1168 case I40E_PHY_TYPE_40GBASE_LR4: 1169 case I40E_PHY_TYPE_25GBASE_LR: 1170 case I40E_PHY_TYPE_25GBASE_SR: 1171 media = I40E_MEDIA_TYPE_FIBER; 1172 break; 1173 case I40E_PHY_TYPE_100BASE_TX: 1174 case I40E_PHY_TYPE_1000BASE_T: 1175 case I40E_PHY_TYPE_10GBASE_T: 1176 media = I40E_MEDIA_TYPE_BASET; 1177 break; 1178 case I40E_PHY_TYPE_10GBASE_CR1_CU: 1179 case I40E_PHY_TYPE_40GBASE_CR4_CU: 1180 case I40E_PHY_TYPE_10GBASE_CR1: 1181 case I40E_PHY_TYPE_40GBASE_CR4: 1182 case I40E_PHY_TYPE_10GBASE_SFPP_CU: 1183 case I40E_PHY_TYPE_40GBASE_AOC: 1184 case I40E_PHY_TYPE_10GBASE_AOC: 1185 case I40E_PHY_TYPE_25GBASE_CR: 1186 case I40E_PHY_TYPE_25GBASE_AOC: 1187 case I40E_PHY_TYPE_25GBASE_ACC: 1188 media = I40E_MEDIA_TYPE_DA; 1189 break; 1190 case I40E_PHY_TYPE_1000BASE_KX: 1191 case I40E_PHY_TYPE_10GBASE_KX4: 1192 case I40E_PHY_TYPE_10GBASE_KR: 1193 case I40E_PHY_TYPE_40GBASE_KR4: 1194 case I40E_PHY_TYPE_20GBASE_KR2: 1195 case I40E_PHY_TYPE_25GBASE_KR: 1196 media = I40E_MEDIA_TYPE_BACKPLANE; 1197 break; 1198 case I40E_PHY_TYPE_SGMII: 1199 case I40E_PHY_TYPE_XAUI: 1200 case I40E_PHY_TYPE_XFI: 1201 case I40E_PHY_TYPE_XLAUI: 1202 case I40E_PHY_TYPE_XLPPI: 1203 default: 1204 media = I40E_MEDIA_TYPE_UNKNOWN; 1205 break; 1206 } 1207 1208 return media; 1209 } 1210 1211 #define I40E_PF_RESET_WAIT_COUNT_A0 200 1212 #define I40E_PF_RESET_WAIT_COUNT 200 1213 /** 1214 * i40e_pf_reset - Reset the PF 1215 * @hw: pointer to the hardware structure 1216 * 1217 * Assuming someone else has triggered a global reset, 1218 * assure the global reset is complete and then reset the PF 1219 **/ 1220 i40e_status i40e_pf_reset(struct i40e_hw *hw) 1221 { 1222 u32 cnt = 0; 1223 u32 cnt1 = 0; 1224 u32 reg = 0; 1225 u32 grst_del; 1226 1227 /* Poll for Global Reset steady state in case of recent GRST. 1228 * The grst delay value is in 100ms units, and we'll wait a 1229 * couple counts longer to be sure we don't just miss the end. 1230 */ 1231 grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) & 1232 I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >> 1233 I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; 1234 1235 /* It can take upto 15 secs for GRST steady state. 1236 * Bump it to 16 secs max to be safe. 1237 */ 1238 grst_del = grst_del * 20; 1239 1240 for (cnt = 0; cnt < grst_del; cnt++) { 1241 reg = rd32(hw, I40E_GLGEN_RSTAT); 1242 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) 1243 break; 1244 msleep(100); 1245 } 1246 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { 1247 hw_dbg(hw, "Global reset polling failed to complete.\n"); 1248 return I40E_ERR_RESET_FAILED; 1249 } 1250 1251 /* Now Wait for the FW to be ready */ 1252 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) { 1253 reg = rd32(hw, I40E_GLNVM_ULD); 1254 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1255 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK); 1256 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1257 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) { 1258 hw_dbg(hw, "Core and Global modules ready %d\n", cnt1); 1259 break; 1260 } 1261 usleep_range(10000, 20000); 1262 } 1263 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1264 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) { 1265 hw_dbg(hw, "wait for FW Reset complete timedout\n"); 1266 hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg); 1267 return I40E_ERR_RESET_FAILED; 1268 } 1269 1270 /* If there was a Global Reset in progress when we got here, 1271 * we don't need to do the PF Reset 1272 */ 1273 if (!cnt) { 1274 u32 reg2 = 0; 1275 if (hw->revision_id == 0) 1276 cnt = I40E_PF_RESET_WAIT_COUNT_A0; 1277 else 1278 cnt = I40E_PF_RESET_WAIT_COUNT; 1279 reg = rd32(hw, I40E_PFGEN_CTRL); 1280 wr32(hw, I40E_PFGEN_CTRL, 1281 (reg | I40E_PFGEN_CTRL_PFSWR_MASK)); 1282 for (; cnt; cnt--) { 1283 reg = rd32(hw, I40E_PFGEN_CTRL); 1284 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK)) 1285 break; 1286 reg2 = rd32(hw, I40E_GLGEN_RSTAT); 1287 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { 1288 hw_dbg(hw, "Core reset upcoming. Skipping PF reset request.\n"); 1289 hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg2); 1290 return I40E_ERR_NOT_READY; 1291 } 1292 usleep_range(1000, 2000); 1293 } 1294 if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) { 1295 hw_dbg(hw, "PF reset polling failed to complete.\n"); 1296 return I40E_ERR_RESET_FAILED; 1297 } 1298 } 1299 1300 i40e_clear_pxe_mode(hw); 1301 1302 return 0; 1303 } 1304 1305 /** 1306 * i40e_clear_hw - clear out any left over hw state 1307 * @hw: pointer to the hw struct 1308 * 1309 * Clear queues and interrupts, typically called at init time, 1310 * but after the capabilities have been found so we know how many 1311 * queues and msix vectors have been allocated. 1312 **/ 1313 void i40e_clear_hw(struct i40e_hw *hw) 1314 { 1315 u32 num_queues, base_queue; 1316 u32 num_pf_int; 1317 u32 num_vf_int; 1318 u32 num_vfs; 1319 u32 i, j; 1320 u32 val; 1321 u32 eol = 0x7ff; 1322 1323 /* get number of interrupts, queues, and VFs */ 1324 val = rd32(hw, I40E_GLPCI_CNF2); 1325 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >> 1326 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT; 1327 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >> 1328 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT; 1329 1330 val = rd32(hw, I40E_PFLAN_QALLOC); 1331 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >> 1332 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; 1333 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >> 1334 I40E_PFLAN_QALLOC_LASTQ_SHIFT; 1335 if (val & I40E_PFLAN_QALLOC_VALID_MASK) 1336 num_queues = (j - base_queue) + 1; 1337 else 1338 num_queues = 0; 1339 1340 val = rd32(hw, I40E_PF_VT_PFALLOC); 1341 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >> 1342 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT; 1343 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >> 1344 I40E_PF_VT_PFALLOC_LASTVF_SHIFT; 1345 if (val & I40E_PF_VT_PFALLOC_VALID_MASK) 1346 num_vfs = (j - i) + 1; 1347 else 1348 num_vfs = 0; 1349 1350 /* stop all the interrupts */ 1351 wr32(hw, I40E_PFINT_ICR0_ENA, 0); 1352 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; 1353 for (i = 0; i < num_pf_int - 2; i++) 1354 wr32(hw, I40E_PFINT_DYN_CTLN(i), val); 1355 1356 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */ 1357 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; 1358 wr32(hw, I40E_PFINT_LNKLST0, val); 1359 for (i = 0; i < num_pf_int - 2; i++) 1360 wr32(hw, I40E_PFINT_LNKLSTN(i), val); 1361 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT; 1362 for (i = 0; i < num_vfs; i++) 1363 wr32(hw, I40E_VPINT_LNKLST0(i), val); 1364 for (i = 0; i < num_vf_int - 2; i++) 1365 wr32(hw, I40E_VPINT_LNKLSTN(i), val); 1366 1367 /* warn the HW of the coming Tx disables */ 1368 for (i = 0; i < num_queues; i++) { 1369 u32 abs_queue_idx = base_queue + i; 1370 u32 reg_block = 0; 1371 1372 if (abs_queue_idx >= 128) { 1373 reg_block = abs_queue_idx / 128; 1374 abs_queue_idx %= 128; 1375 } 1376 1377 val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); 1378 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; 1379 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 1380 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; 1381 1382 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val); 1383 } 1384 udelay(400); 1385 1386 /* stop all the queues */ 1387 for (i = 0; i < num_queues; i++) { 1388 wr32(hw, I40E_QINT_TQCTL(i), 0); 1389 wr32(hw, I40E_QTX_ENA(i), 0); 1390 wr32(hw, I40E_QINT_RQCTL(i), 0); 1391 wr32(hw, I40E_QRX_ENA(i), 0); 1392 } 1393 1394 /* short wait for all queue disables to settle */ 1395 udelay(50); 1396 } 1397 1398 /** 1399 * i40e_clear_pxe_mode - clear pxe operations mode 1400 * @hw: pointer to the hw struct 1401 * 1402 * Make sure all PXE mode settings are cleared, including things 1403 * like descriptor fetch/write-back mode. 1404 **/ 1405 void i40e_clear_pxe_mode(struct i40e_hw *hw) 1406 { 1407 u32 reg; 1408 1409 if (i40e_check_asq_alive(hw)) 1410 i40e_aq_clear_pxe_mode(hw, NULL); 1411 1412 /* Clear single descriptor fetch/write-back mode */ 1413 reg = rd32(hw, I40E_GLLAN_RCTL_0); 1414 1415 if (hw->revision_id == 0) { 1416 /* As a work around clear PXE_MODE instead of setting it */ 1417 wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK))); 1418 } else { 1419 wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK)); 1420 } 1421 } 1422 1423 /** 1424 * i40e_led_is_mine - helper to find matching led 1425 * @hw: pointer to the hw struct 1426 * @idx: index into GPIO registers 1427 * 1428 * returns: 0 if no match, otherwise the value of the GPIO_CTL register 1429 */ 1430 static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx) 1431 { 1432 u32 gpio_val = 0; 1433 u32 port; 1434 1435 if (!hw->func_caps.led[idx]) 1436 return 0; 1437 1438 gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx)); 1439 port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >> 1440 I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT; 1441 1442 /* if PRT_NUM_NA is 1 then this LED is not port specific, OR 1443 * if it is not our port then ignore 1444 */ 1445 if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) || 1446 (port != hw->port)) 1447 return 0; 1448 1449 return gpio_val; 1450 } 1451 1452 #define I40E_COMBINED_ACTIVITY 0xA 1453 #define I40E_FILTER_ACTIVITY 0xE 1454 #define I40E_LINK_ACTIVITY 0xC 1455 #define I40E_MAC_ACTIVITY 0xD 1456 #define I40E_LED0 22 1457 1458 /** 1459 * i40e_led_get - return current on/off mode 1460 * @hw: pointer to the hw struct 1461 * 1462 * The value returned is the 'mode' field as defined in the 1463 * GPIO register definitions: 0x0 = off, 0xf = on, and other 1464 * values are variations of possible behaviors relating to 1465 * blink, link, and wire. 1466 **/ 1467 u32 i40e_led_get(struct i40e_hw *hw) 1468 { 1469 u32 current_mode = 0; 1470 u32 mode = 0; 1471 int i; 1472 1473 /* as per the documentation GPIO 22-29 are the LED 1474 * GPIO pins named LED0..LED7 1475 */ 1476 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { 1477 u32 gpio_val = i40e_led_is_mine(hw, i); 1478 1479 if (!gpio_val) 1480 continue; 1481 1482 /* ignore gpio LED src mode entries related to the activity 1483 * LEDs 1484 */ 1485 current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) 1486 >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT); 1487 switch (current_mode) { 1488 case I40E_COMBINED_ACTIVITY: 1489 case I40E_FILTER_ACTIVITY: 1490 case I40E_MAC_ACTIVITY: 1491 case I40E_LINK_ACTIVITY: 1492 continue; 1493 default: 1494 break; 1495 } 1496 1497 mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >> 1498 I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT; 1499 break; 1500 } 1501 1502 return mode; 1503 } 1504 1505 /** 1506 * i40e_led_set - set new on/off mode 1507 * @hw: pointer to the hw struct 1508 * @mode: 0=off, 0xf=on (else see manual for mode details) 1509 * @blink: true if the LED should blink when on, false if steady 1510 * 1511 * if this function is used to turn on the blink it should 1512 * be used to disable the blink when restoring the original state. 1513 **/ 1514 void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) 1515 { 1516 u32 current_mode = 0; 1517 int i; 1518 1519 if (mode & 0xfffffff0) 1520 hw_dbg(hw, "invalid mode passed in %X\n", mode); 1521 1522 /* as per the documentation GPIO 22-29 are the LED 1523 * GPIO pins named LED0..LED7 1524 */ 1525 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { 1526 u32 gpio_val = i40e_led_is_mine(hw, i); 1527 1528 if (!gpio_val) 1529 continue; 1530 1531 /* ignore gpio LED src mode entries related to the activity 1532 * LEDs 1533 */ 1534 current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) 1535 >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT); 1536 switch (current_mode) { 1537 case I40E_COMBINED_ACTIVITY: 1538 case I40E_FILTER_ACTIVITY: 1539 case I40E_MAC_ACTIVITY: 1540 case I40E_LINK_ACTIVITY: 1541 continue; 1542 default: 1543 break; 1544 } 1545 1546 gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK; 1547 /* this & is a bit of paranoia, but serves as a range check */ 1548 gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) & 1549 I40E_GLGEN_GPIO_CTL_LED_MODE_MASK); 1550 1551 if (blink) 1552 gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); 1553 else 1554 gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); 1555 1556 wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val); 1557 break; 1558 } 1559 } 1560 1561 /* Admin command wrappers */ 1562 1563 /** 1564 * i40e_aq_get_phy_capabilities 1565 * @hw: pointer to the hw struct 1566 * @abilities: structure for PHY capabilities to be filled 1567 * @qualified_modules: report Qualified Modules 1568 * @report_init: report init capabilities (active are default) 1569 * @cmd_details: pointer to command details structure or NULL 1570 * 1571 * Returns the various PHY abilities supported on the Port. 1572 **/ 1573 i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw, 1574 bool qualified_modules, bool report_init, 1575 struct i40e_aq_get_phy_abilities_resp *abilities, 1576 struct i40e_asq_cmd_details *cmd_details) 1577 { 1578 struct i40e_aq_desc desc; 1579 i40e_status status; 1580 u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp); 1581 u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0; 1582 1583 if (!abilities) 1584 return I40E_ERR_PARAM; 1585 1586 do { 1587 i40e_fill_default_direct_cmd_desc(&desc, 1588 i40e_aqc_opc_get_phy_abilities); 1589 1590 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 1591 if (abilities_size > I40E_AQ_LARGE_BUF) 1592 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 1593 1594 if (qualified_modules) 1595 desc.params.external.param0 |= 1596 cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES); 1597 1598 if (report_init) 1599 desc.params.external.param0 |= 1600 cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES); 1601 1602 status = i40e_asq_send_command(hw, &desc, abilities, 1603 abilities_size, cmd_details); 1604 1605 if (status) 1606 break; 1607 1608 if (hw->aq.asq_last_status == I40E_AQ_RC_EIO) { 1609 status = I40E_ERR_UNKNOWN_PHY; 1610 break; 1611 } else if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) { 1612 usleep_range(1000, 2000); 1613 total_delay++; 1614 status = I40E_ERR_TIMEOUT; 1615 } 1616 } while ((hw->aq.asq_last_status != I40E_AQ_RC_OK) && 1617 (total_delay < max_delay)); 1618 1619 if (status) 1620 return status; 1621 1622 if (report_init) { 1623 if (hw->mac.type == I40E_MAC_XL710 && 1624 hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && 1625 hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) { 1626 status = i40e_aq_get_link_info(hw, true, NULL, NULL); 1627 } else { 1628 hw->phy.phy_types = le32_to_cpu(abilities->phy_type); 1629 hw->phy.phy_types |= 1630 ((u64)abilities->phy_type_ext << 32); 1631 } 1632 } 1633 1634 return status; 1635 } 1636 1637 /** 1638 * i40e_aq_set_phy_config 1639 * @hw: pointer to the hw struct 1640 * @config: structure with PHY configuration to be set 1641 * @cmd_details: pointer to command details structure or NULL 1642 * 1643 * Set the various PHY configuration parameters 1644 * supported on the Port.One or more of the Set PHY config parameters may be 1645 * ignored in an MFP mode as the PF may not have the privilege to set some 1646 * of the PHY Config parameters. This status will be indicated by the 1647 * command response. 1648 **/ 1649 enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, 1650 struct i40e_aq_set_phy_config *config, 1651 struct i40e_asq_cmd_details *cmd_details) 1652 { 1653 struct i40e_aq_desc desc; 1654 struct i40e_aq_set_phy_config *cmd = 1655 (struct i40e_aq_set_phy_config *)&desc.params.raw; 1656 enum i40e_status_code status; 1657 1658 if (!config) 1659 return I40E_ERR_PARAM; 1660 1661 i40e_fill_default_direct_cmd_desc(&desc, 1662 i40e_aqc_opc_set_phy_config); 1663 1664 *cmd = *config; 1665 1666 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1667 1668 return status; 1669 } 1670 1671 /** 1672 * i40e_set_fc 1673 * @hw: pointer to the hw struct 1674 * 1675 * Set the requested flow control mode using set_phy_config. 1676 **/ 1677 enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, 1678 bool atomic_restart) 1679 { 1680 enum i40e_fc_mode fc_mode = hw->fc.requested_mode; 1681 struct i40e_aq_get_phy_abilities_resp abilities; 1682 struct i40e_aq_set_phy_config config; 1683 enum i40e_status_code status; 1684 u8 pause_mask = 0x0; 1685 1686 *aq_failures = 0x0; 1687 1688 switch (fc_mode) { 1689 case I40E_FC_FULL: 1690 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; 1691 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; 1692 break; 1693 case I40E_FC_RX_PAUSE: 1694 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; 1695 break; 1696 case I40E_FC_TX_PAUSE: 1697 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; 1698 break; 1699 default: 1700 break; 1701 } 1702 1703 /* Get the current phy config */ 1704 status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, 1705 NULL); 1706 if (status) { 1707 *aq_failures |= I40E_SET_FC_AQ_FAIL_GET; 1708 return status; 1709 } 1710 1711 memset(&config, 0, sizeof(struct i40e_aq_set_phy_config)); 1712 /* clear the old pause settings */ 1713 config.abilities = abilities.abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) & 1714 ~(I40E_AQ_PHY_FLAG_PAUSE_RX); 1715 /* set the new abilities */ 1716 config.abilities |= pause_mask; 1717 /* If the abilities have changed, then set the new config */ 1718 if (config.abilities != abilities.abilities) { 1719 /* Auto restart link so settings take effect */ 1720 if (atomic_restart) 1721 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; 1722 /* Copy over all the old settings */ 1723 config.phy_type = abilities.phy_type; 1724 config.phy_type_ext = abilities.phy_type_ext; 1725 config.link_speed = abilities.link_speed; 1726 config.eee_capability = abilities.eee_capability; 1727 config.eeer = abilities.eeer_val; 1728 config.low_power_ctrl = abilities.d3_lpan; 1729 config.fec_config = abilities.fec_cfg_curr_mod_ext_info & 1730 I40E_AQ_PHY_FEC_CONFIG_MASK; 1731 status = i40e_aq_set_phy_config(hw, &config, NULL); 1732 1733 if (status) 1734 *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; 1735 } 1736 /* Update the link info */ 1737 status = i40e_update_link_info(hw); 1738 if (status) { 1739 /* Wait a little bit (on 40G cards it sometimes takes a really 1740 * long time for link to come back from the atomic reset) 1741 * and try once more 1742 */ 1743 msleep(1000); 1744 status = i40e_update_link_info(hw); 1745 } 1746 if (status) 1747 *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE; 1748 1749 return status; 1750 } 1751 1752 /** 1753 * i40e_aq_clear_pxe_mode 1754 * @hw: pointer to the hw struct 1755 * @cmd_details: pointer to command details structure or NULL 1756 * 1757 * Tell the firmware that the driver is taking over from PXE 1758 **/ 1759 i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw, 1760 struct i40e_asq_cmd_details *cmd_details) 1761 { 1762 i40e_status status; 1763 struct i40e_aq_desc desc; 1764 struct i40e_aqc_clear_pxe *cmd = 1765 (struct i40e_aqc_clear_pxe *)&desc.params.raw; 1766 1767 i40e_fill_default_direct_cmd_desc(&desc, 1768 i40e_aqc_opc_clear_pxe_mode); 1769 1770 cmd->rx_cnt = 0x2; 1771 1772 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1773 1774 wr32(hw, I40E_GLLAN_RCTL_0, 0x1); 1775 1776 return status; 1777 } 1778 1779 /** 1780 * i40e_aq_set_link_restart_an 1781 * @hw: pointer to the hw struct 1782 * @enable_link: if true: enable link, if false: disable link 1783 * @cmd_details: pointer to command details structure or NULL 1784 * 1785 * Sets up the link and restarts the Auto-Negotiation over the link. 1786 **/ 1787 i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw, 1788 bool enable_link, 1789 struct i40e_asq_cmd_details *cmd_details) 1790 { 1791 struct i40e_aq_desc desc; 1792 struct i40e_aqc_set_link_restart_an *cmd = 1793 (struct i40e_aqc_set_link_restart_an *)&desc.params.raw; 1794 i40e_status status; 1795 1796 i40e_fill_default_direct_cmd_desc(&desc, 1797 i40e_aqc_opc_set_link_restart_an); 1798 1799 cmd->command = I40E_AQ_PHY_RESTART_AN; 1800 if (enable_link) 1801 cmd->command |= I40E_AQ_PHY_LINK_ENABLE; 1802 else 1803 cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE; 1804 1805 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1806 1807 return status; 1808 } 1809 1810 /** 1811 * i40e_aq_get_link_info 1812 * @hw: pointer to the hw struct 1813 * @enable_lse: enable/disable LinkStatusEvent reporting 1814 * @link: pointer to link status structure - optional 1815 * @cmd_details: pointer to command details structure or NULL 1816 * 1817 * Returns the link status of the adapter. 1818 **/ 1819 i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, 1820 bool enable_lse, struct i40e_link_status *link, 1821 struct i40e_asq_cmd_details *cmd_details) 1822 { 1823 struct i40e_aq_desc desc; 1824 struct i40e_aqc_get_link_status *resp = 1825 (struct i40e_aqc_get_link_status *)&desc.params.raw; 1826 struct i40e_link_status *hw_link_info = &hw->phy.link_info; 1827 i40e_status status; 1828 bool tx_pause, rx_pause; 1829 u16 command_flags; 1830 1831 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status); 1832 1833 if (enable_lse) 1834 command_flags = I40E_AQ_LSE_ENABLE; 1835 else 1836 command_flags = I40E_AQ_LSE_DISABLE; 1837 resp->command_flags = cpu_to_le16(command_flags); 1838 1839 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1840 1841 if (status) 1842 goto aq_get_link_info_exit; 1843 1844 /* save off old link status information */ 1845 hw->phy.link_info_old = *hw_link_info; 1846 1847 /* update link status */ 1848 hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type; 1849 hw->phy.media_type = i40e_get_media_type(hw); 1850 hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed; 1851 hw_link_info->link_info = resp->link_info; 1852 hw_link_info->an_info = resp->an_info; 1853 hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA | 1854 I40E_AQ_CONFIG_FEC_RS_ENA); 1855 hw_link_info->ext_info = resp->ext_info; 1856 hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK; 1857 hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size); 1858 hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK; 1859 1860 /* update fc info */ 1861 tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX); 1862 rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX); 1863 if (tx_pause & rx_pause) 1864 hw->fc.current_mode = I40E_FC_FULL; 1865 else if (tx_pause) 1866 hw->fc.current_mode = I40E_FC_TX_PAUSE; 1867 else if (rx_pause) 1868 hw->fc.current_mode = I40E_FC_RX_PAUSE; 1869 else 1870 hw->fc.current_mode = I40E_FC_NONE; 1871 1872 if (resp->config & I40E_AQ_CONFIG_CRC_ENA) 1873 hw_link_info->crc_enable = true; 1874 else 1875 hw_link_info->crc_enable = false; 1876 1877 if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_IS_ENABLED)) 1878 hw_link_info->lse_enable = true; 1879 else 1880 hw_link_info->lse_enable = false; 1881 1882 if ((hw->mac.type == I40E_MAC_XL710) && 1883 (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 && 1884 hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE) 1885 hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU; 1886 1887 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && 1888 hw->aq.api_min_ver >= 7) { 1889 __le32 tmp; 1890 1891 memcpy(&tmp, resp->link_type, sizeof(tmp)); 1892 hw->phy.phy_types = le32_to_cpu(tmp); 1893 hw->phy.phy_types |= ((u64)resp->link_type_ext << 32); 1894 } 1895 1896 /* save link status information */ 1897 if (link) 1898 *link = *hw_link_info; 1899 1900 /* flag cleared so helper functions don't call AQ again */ 1901 hw->phy.get_link_info = false; 1902 1903 aq_get_link_info_exit: 1904 return status; 1905 } 1906 1907 /** 1908 * i40e_aq_set_phy_int_mask 1909 * @hw: pointer to the hw struct 1910 * @mask: interrupt mask to be set 1911 * @cmd_details: pointer to command details structure or NULL 1912 * 1913 * Set link interrupt mask. 1914 **/ 1915 i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, 1916 u16 mask, 1917 struct i40e_asq_cmd_details *cmd_details) 1918 { 1919 struct i40e_aq_desc desc; 1920 struct i40e_aqc_set_phy_int_mask *cmd = 1921 (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw; 1922 i40e_status status; 1923 1924 i40e_fill_default_direct_cmd_desc(&desc, 1925 i40e_aqc_opc_set_phy_int_mask); 1926 1927 cmd->event_mask = cpu_to_le16(mask); 1928 1929 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1930 1931 return status; 1932 } 1933 1934 /** 1935 * i40e_aq_set_phy_debug 1936 * @hw: pointer to the hw struct 1937 * @cmd_flags: debug command flags 1938 * @cmd_details: pointer to command details structure or NULL 1939 * 1940 * Reset the external PHY. 1941 **/ 1942 i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, 1943 struct i40e_asq_cmd_details *cmd_details) 1944 { 1945 struct i40e_aq_desc desc; 1946 struct i40e_aqc_set_phy_debug *cmd = 1947 (struct i40e_aqc_set_phy_debug *)&desc.params.raw; 1948 i40e_status status; 1949 1950 i40e_fill_default_direct_cmd_desc(&desc, 1951 i40e_aqc_opc_set_phy_debug); 1952 1953 cmd->command_flags = cmd_flags; 1954 1955 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1956 1957 return status; 1958 } 1959 1960 /** 1961 * i40e_aq_add_vsi 1962 * @hw: pointer to the hw struct 1963 * @vsi_ctx: pointer to a vsi context struct 1964 * @cmd_details: pointer to command details structure or NULL 1965 * 1966 * Add a VSI context to the hardware. 1967 **/ 1968 i40e_status i40e_aq_add_vsi(struct i40e_hw *hw, 1969 struct i40e_vsi_context *vsi_ctx, 1970 struct i40e_asq_cmd_details *cmd_details) 1971 { 1972 struct i40e_aq_desc desc; 1973 struct i40e_aqc_add_get_update_vsi *cmd = 1974 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 1975 struct i40e_aqc_add_get_update_vsi_completion *resp = 1976 (struct i40e_aqc_add_get_update_vsi_completion *) 1977 &desc.params.raw; 1978 i40e_status status; 1979 1980 i40e_fill_default_direct_cmd_desc(&desc, 1981 i40e_aqc_opc_add_vsi); 1982 1983 cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid); 1984 cmd->connection_type = vsi_ctx->connection_type; 1985 cmd->vf_id = vsi_ctx->vf_num; 1986 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags); 1987 1988 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 1989 1990 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, 1991 sizeof(vsi_ctx->info), cmd_details); 1992 1993 if (status) 1994 goto aq_add_vsi_exit; 1995 1996 vsi_ctx->seid = le16_to_cpu(resp->seid); 1997 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number); 1998 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 1999 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 2000 2001 aq_add_vsi_exit: 2002 return status; 2003 } 2004 2005 /** 2006 * i40e_aq_set_default_vsi 2007 * @hw: pointer to the hw struct 2008 * @seid: vsi number 2009 * @cmd_details: pointer to command details structure or NULL 2010 **/ 2011 i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, 2012 u16 seid, 2013 struct i40e_asq_cmd_details *cmd_details) 2014 { 2015 struct i40e_aq_desc desc; 2016 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2017 (struct i40e_aqc_set_vsi_promiscuous_modes *) 2018 &desc.params.raw; 2019 i40e_status status; 2020 2021 i40e_fill_default_direct_cmd_desc(&desc, 2022 i40e_aqc_opc_set_vsi_promiscuous_modes); 2023 2024 cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 2025 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 2026 cmd->seid = cpu_to_le16(seid); 2027 2028 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2029 2030 return status; 2031 } 2032 2033 /** 2034 * i40e_aq_clear_default_vsi 2035 * @hw: pointer to the hw struct 2036 * @seid: vsi number 2037 * @cmd_details: pointer to command details structure or NULL 2038 **/ 2039 i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw, 2040 u16 seid, 2041 struct i40e_asq_cmd_details *cmd_details) 2042 { 2043 struct i40e_aq_desc desc; 2044 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2045 (struct i40e_aqc_set_vsi_promiscuous_modes *) 2046 &desc.params.raw; 2047 i40e_status status; 2048 2049 i40e_fill_default_direct_cmd_desc(&desc, 2050 i40e_aqc_opc_set_vsi_promiscuous_modes); 2051 2052 cmd->promiscuous_flags = cpu_to_le16(0); 2053 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 2054 cmd->seid = cpu_to_le16(seid); 2055 2056 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2057 2058 return status; 2059 } 2060 2061 /** 2062 * i40e_aq_set_vsi_unicast_promiscuous 2063 * @hw: pointer to the hw struct 2064 * @seid: vsi number 2065 * @set: set unicast promiscuous enable/disable 2066 * @cmd_details: pointer to command details structure or NULL 2067 * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc 2068 **/ 2069 i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, 2070 u16 seid, bool set, 2071 struct i40e_asq_cmd_details *cmd_details, 2072 bool rx_only_promisc) 2073 { 2074 struct i40e_aq_desc desc; 2075 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2076 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2077 i40e_status status; 2078 u16 flags = 0; 2079 2080 i40e_fill_default_direct_cmd_desc(&desc, 2081 i40e_aqc_opc_set_vsi_promiscuous_modes); 2082 2083 if (set) { 2084 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; 2085 if (rx_only_promisc && 2086 (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) || 2087 (hw->aq.api_maj_ver > 1))) 2088 flags |= I40E_AQC_SET_VSI_PROMISC_TX; 2089 } 2090 2091 cmd->promiscuous_flags = cpu_to_le16(flags); 2092 2093 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); 2094 if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) || 2095 (hw->aq.api_maj_ver > 1)) 2096 cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_TX); 2097 2098 cmd->seid = cpu_to_le16(seid); 2099 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2100 2101 return status; 2102 } 2103 2104 /** 2105 * i40e_aq_set_vsi_multicast_promiscuous 2106 * @hw: pointer to the hw struct 2107 * @seid: vsi number 2108 * @set: set multicast promiscuous enable/disable 2109 * @cmd_details: pointer to command details structure or NULL 2110 **/ 2111 i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, 2112 u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details) 2113 { 2114 struct i40e_aq_desc desc; 2115 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2116 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2117 i40e_status status; 2118 u16 flags = 0; 2119 2120 i40e_fill_default_direct_cmd_desc(&desc, 2121 i40e_aqc_opc_set_vsi_promiscuous_modes); 2122 2123 if (set) 2124 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; 2125 2126 cmd->promiscuous_flags = cpu_to_le16(flags); 2127 2128 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); 2129 2130 cmd->seid = cpu_to_le16(seid); 2131 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2132 2133 return status; 2134 } 2135 2136 /** 2137 * i40e_aq_set_vsi_mc_promisc_on_vlan 2138 * @hw: pointer to the hw struct 2139 * @seid: vsi number 2140 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 2141 * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag 2142 * @cmd_details: pointer to command details structure or NULL 2143 **/ 2144 enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, 2145 u16 seid, bool enable, 2146 u16 vid, 2147 struct i40e_asq_cmd_details *cmd_details) 2148 { 2149 struct i40e_aq_desc desc; 2150 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2151 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2152 enum i40e_status_code status; 2153 u16 flags = 0; 2154 2155 i40e_fill_default_direct_cmd_desc(&desc, 2156 i40e_aqc_opc_set_vsi_promiscuous_modes); 2157 2158 if (enable) 2159 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; 2160 2161 cmd->promiscuous_flags = cpu_to_le16(flags); 2162 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); 2163 cmd->seid = cpu_to_le16(seid); 2164 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 2165 2166 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2167 2168 return status; 2169 } 2170 2171 /** 2172 * i40e_aq_set_vsi_uc_promisc_on_vlan 2173 * @hw: pointer to the hw struct 2174 * @seid: vsi number 2175 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 2176 * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag 2177 * @cmd_details: pointer to command details structure or NULL 2178 **/ 2179 enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, 2180 u16 seid, bool enable, 2181 u16 vid, 2182 struct i40e_asq_cmd_details *cmd_details) 2183 { 2184 struct i40e_aq_desc desc; 2185 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2186 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2187 enum i40e_status_code status; 2188 u16 flags = 0; 2189 2190 i40e_fill_default_direct_cmd_desc(&desc, 2191 i40e_aqc_opc_set_vsi_promiscuous_modes); 2192 2193 if (enable) 2194 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; 2195 2196 cmd->promiscuous_flags = cpu_to_le16(flags); 2197 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); 2198 cmd->seid = cpu_to_le16(seid); 2199 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 2200 2201 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2202 2203 return status; 2204 } 2205 2206 /** 2207 * i40e_aq_set_vsi_bc_promisc_on_vlan 2208 * @hw: pointer to the hw struct 2209 * @seid: vsi number 2210 * @enable: set broadcast promiscuous enable/disable for a given VLAN 2211 * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag 2212 * @cmd_details: pointer to command details structure or NULL 2213 **/ 2214 i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw, 2215 u16 seid, bool enable, u16 vid, 2216 struct i40e_asq_cmd_details *cmd_details) 2217 { 2218 struct i40e_aq_desc desc; 2219 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2220 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2221 i40e_status status; 2222 u16 flags = 0; 2223 2224 i40e_fill_default_direct_cmd_desc(&desc, 2225 i40e_aqc_opc_set_vsi_promiscuous_modes); 2226 2227 if (enable) 2228 flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST; 2229 2230 cmd->promiscuous_flags = cpu_to_le16(flags); 2231 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2232 cmd->seid = cpu_to_le16(seid); 2233 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 2234 2235 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2236 2237 return status; 2238 } 2239 2240 /** 2241 * i40e_aq_set_vsi_broadcast 2242 * @hw: pointer to the hw struct 2243 * @seid: vsi number 2244 * @set_filter: true to set filter, false to clear filter 2245 * @cmd_details: pointer to command details structure or NULL 2246 * 2247 * Set or clear the broadcast promiscuous flag (filter) for a given VSI. 2248 **/ 2249 i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, 2250 u16 seid, bool set_filter, 2251 struct i40e_asq_cmd_details *cmd_details) 2252 { 2253 struct i40e_aq_desc desc; 2254 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2255 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2256 i40e_status status; 2257 2258 i40e_fill_default_direct_cmd_desc(&desc, 2259 i40e_aqc_opc_set_vsi_promiscuous_modes); 2260 2261 if (set_filter) 2262 cmd->promiscuous_flags 2263 |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2264 else 2265 cmd->promiscuous_flags 2266 &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2267 2268 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2269 cmd->seid = cpu_to_le16(seid); 2270 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2271 2272 return status; 2273 } 2274 2275 /** 2276 * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting 2277 * @hw: pointer to the hw struct 2278 * @seid: vsi number 2279 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 2280 * @cmd_details: pointer to command details structure or NULL 2281 **/ 2282 i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, 2283 u16 seid, bool enable, 2284 struct i40e_asq_cmd_details *cmd_details) 2285 { 2286 struct i40e_aq_desc desc; 2287 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2288 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2289 i40e_status status; 2290 u16 flags = 0; 2291 2292 i40e_fill_default_direct_cmd_desc(&desc, 2293 i40e_aqc_opc_set_vsi_promiscuous_modes); 2294 if (enable) 2295 flags |= I40E_AQC_SET_VSI_PROMISC_VLAN; 2296 2297 cmd->promiscuous_flags = cpu_to_le16(flags); 2298 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN); 2299 cmd->seid = cpu_to_le16(seid); 2300 2301 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2302 2303 return status; 2304 } 2305 2306 /** 2307 * i40e_get_vsi_params - get VSI configuration info 2308 * @hw: pointer to the hw struct 2309 * @vsi_ctx: pointer to a vsi context struct 2310 * @cmd_details: pointer to command details structure or NULL 2311 **/ 2312 i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw, 2313 struct i40e_vsi_context *vsi_ctx, 2314 struct i40e_asq_cmd_details *cmd_details) 2315 { 2316 struct i40e_aq_desc desc; 2317 struct i40e_aqc_add_get_update_vsi *cmd = 2318 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 2319 struct i40e_aqc_add_get_update_vsi_completion *resp = 2320 (struct i40e_aqc_add_get_update_vsi_completion *) 2321 &desc.params.raw; 2322 i40e_status status; 2323 2324 i40e_fill_default_direct_cmd_desc(&desc, 2325 i40e_aqc_opc_get_vsi_parameters); 2326 2327 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); 2328 2329 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 2330 2331 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, 2332 sizeof(vsi_ctx->info), NULL); 2333 2334 if (status) 2335 goto aq_get_vsi_params_exit; 2336 2337 vsi_ctx->seid = le16_to_cpu(resp->seid); 2338 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number); 2339 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 2340 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 2341 2342 aq_get_vsi_params_exit: 2343 return status; 2344 } 2345 2346 /** 2347 * i40e_aq_update_vsi_params 2348 * @hw: pointer to the hw struct 2349 * @vsi_ctx: pointer to a vsi context struct 2350 * @cmd_details: pointer to command details structure or NULL 2351 * 2352 * Update a VSI context. 2353 **/ 2354 i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw, 2355 struct i40e_vsi_context *vsi_ctx, 2356 struct i40e_asq_cmd_details *cmd_details) 2357 { 2358 struct i40e_aq_desc desc; 2359 struct i40e_aqc_add_get_update_vsi *cmd = 2360 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 2361 struct i40e_aqc_add_get_update_vsi_completion *resp = 2362 (struct i40e_aqc_add_get_update_vsi_completion *) 2363 &desc.params.raw; 2364 i40e_status status; 2365 2366 i40e_fill_default_direct_cmd_desc(&desc, 2367 i40e_aqc_opc_update_vsi_parameters); 2368 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); 2369 2370 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2371 2372 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, 2373 sizeof(vsi_ctx->info), cmd_details); 2374 2375 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 2376 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 2377 2378 return status; 2379 } 2380 2381 /** 2382 * i40e_aq_get_switch_config 2383 * @hw: pointer to the hardware structure 2384 * @buf: pointer to the result buffer 2385 * @buf_size: length of input buffer 2386 * @start_seid: seid to start for the report, 0 == beginning 2387 * @cmd_details: pointer to command details structure or NULL 2388 * 2389 * Fill the buf with switch configuration returned from AdminQ command 2390 **/ 2391 i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw, 2392 struct i40e_aqc_get_switch_config_resp *buf, 2393 u16 buf_size, u16 *start_seid, 2394 struct i40e_asq_cmd_details *cmd_details) 2395 { 2396 struct i40e_aq_desc desc; 2397 struct i40e_aqc_switch_seid *scfg = 2398 (struct i40e_aqc_switch_seid *)&desc.params.raw; 2399 i40e_status status; 2400 2401 i40e_fill_default_direct_cmd_desc(&desc, 2402 i40e_aqc_opc_get_switch_config); 2403 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 2404 if (buf_size > I40E_AQ_LARGE_BUF) 2405 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2406 scfg->seid = cpu_to_le16(*start_seid); 2407 2408 status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details); 2409 *start_seid = le16_to_cpu(scfg->seid); 2410 2411 return status; 2412 } 2413 2414 /** 2415 * i40e_aq_set_switch_config 2416 * @hw: pointer to the hardware structure 2417 * @flags: bit flag values to set 2418 * @valid_flags: which bit flags to set 2419 * @mode: cloud filter mode 2420 * @cmd_details: pointer to command details structure or NULL 2421 * 2422 * Set switch configuration bits 2423 **/ 2424 enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw, 2425 u16 flags, 2426 u16 valid_flags, u8 mode, 2427 struct i40e_asq_cmd_details *cmd_details) 2428 { 2429 struct i40e_aq_desc desc; 2430 struct i40e_aqc_set_switch_config *scfg = 2431 (struct i40e_aqc_set_switch_config *)&desc.params.raw; 2432 enum i40e_status_code status; 2433 2434 i40e_fill_default_direct_cmd_desc(&desc, 2435 i40e_aqc_opc_set_switch_config); 2436 scfg->flags = cpu_to_le16(flags); 2437 scfg->valid_flags = cpu_to_le16(valid_flags); 2438 scfg->mode = mode; 2439 if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) { 2440 scfg->switch_tag = cpu_to_le16(hw->switch_tag); 2441 scfg->first_tag = cpu_to_le16(hw->first_tag); 2442 scfg->second_tag = cpu_to_le16(hw->second_tag); 2443 } 2444 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2445 2446 return status; 2447 } 2448 2449 /** 2450 * i40e_aq_get_firmware_version 2451 * @hw: pointer to the hw struct 2452 * @fw_major_version: firmware major version 2453 * @fw_minor_version: firmware minor version 2454 * @fw_build: firmware build number 2455 * @api_major_version: major queue version 2456 * @api_minor_version: minor queue version 2457 * @cmd_details: pointer to command details structure or NULL 2458 * 2459 * Get the firmware version from the admin queue commands 2460 **/ 2461 i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, 2462 u16 *fw_major_version, u16 *fw_minor_version, 2463 u32 *fw_build, 2464 u16 *api_major_version, u16 *api_minor_version, 2465 struct i40e_asq_cmd_details *cmd_details) 2466 { 2467 struct i40e_aq_desc desc; 2468 struct i40e_aqc_get_version *resp = 2469 (struct i40e_aqc_get_version *)&desc.params.raw; 2470 i40e_status status; 2471 2472 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version); 2473 2474 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2475 2476 if (!status) { 2477 if (fw_major_version) 2478 *fw_major_version = le16_to_cpu(resp->fw_major); 2479 if (fw_minor_version) 2480 *fw_minor_version = le16_to_cpu(resp->fw_minor); 2481 if (fw_build) 2482 *fw_build = le32_to_cpu(resp->fw_build); 2483 if (api_major_version) 2484 *api_major_version = le16_to_cpu(resp->api_major); 2485 if (api_minor_version) 2486 *api_minor_version = le16_to_cpu(resp->api_minor); 2487 } 2488 2489 return status; 2490 } 2491 2492 /** 2493 * i40e_aq_send_driver_version 2494 * @hw: pointer to the hw struct 2495 * @dv: driver's major, minor version 2496 * @cmd_details: pointer to command details structure or NULL 2497 * 2498 * Send the driver version to the firmware 2499 **/ 2500 i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw, 2501 struct i40e_driver_version *dv, 2502 struct i40e_asq_cmd_details *cmd_details) 2503 { 2504 struct i40e_aq_desc desc; 2505 struct i40e_aqc_driver_version *cmd = 2506 (struct i40e_aqc_driver_version *)&desc.params.raw; 2507 i40e_status status; 2508 u16 len; 2509 2510 if (dv == NULL) 2511 return I40E_ERR_PARAM; 2512 2513 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version); 2514 2515 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); 2516 cmd->driver_major_ver = dv->major_version; 2517 cmd->driver_minor_ver = dv->minor_version; 2518 cmd->driver_build_ver = dv->build_version; 2519 cmd->driver_subbuild_ver = dv->subbuild_version; 2520 2521 len = 0; 2522 while (len < sizeof(dv->driver_string) && 2523 (dv->driver_string[len] < 0x80) && 2524 dv->driver_string[len]) 2525 len++; 2526 status = i40e_asq_send_command(hw, &desc, dv->driver_string, 2527 len, cmd_details); 2528 2529 return status; 2530 } 2531 2532 /** 2533 * i40e_get_link_status - get status of the HW network link 2534 * @hw: pointer to the hw struct 2535 * @link_up: pointer to bool (true/false = linkup/linkdown) 2536 * 2537 * Variable link_up true if link is up, false if link is down. 2538 * The variable link_up is invalid if returned value of status != 0 2539 * 2540 * Side effect: LinkStatusEvent reporting becomes enabled 2541 **/ 2542 i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up) 2543 { 2544 i40e_status status = 0; 2545 2546 if (hw->phy.get_link_info) { 2547 status = i40e_update_link_info(hw); 2548 2549 if (status) 2550 i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n", 2551 status); 2552 } 2553 2554 *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP; 2555 2556 return status; 2557 } 2558 2559 /** 2560 * i40e_updatelink_status - update status of the HW network link 2561 * @hw: pointer to the hw struct 2562 **/ 2563 i40e_status i40e_update_link_info(struct i40e_hw *hw) 2564 { 2565 struct i40e_aq_get_phy_abilities_resp abilities; 2566 i40e_status status = 0; 2567 2568 status = i40e_aq_get_link_info(hw, true, NULL, NULL); 2569 if (status) 2570 return status; 2571 2572 /* extra checking needed to ensure link info to user is timely */ 2573 if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) && 2574 ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) || 2575 !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) { 2576 status = i40e_aq_get_phy_capabilities(hw, false, false, 2577 &abilities, NULL); 2578 if (status) 2579 return status; 2580 2581 hw->phy.link_info.req_fec_info = 2582 abilities.fec_cfg_curr_mod_ext_info & 2583 (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS); 2584 2585 memcpy(hw->phy.link_info.module_type, &abilities.module_type, 2586 sizeof(hw->phy.link_info.module_type)); 2587 } 2588 2589 return status; 2590 } 2591 2592 /** 2593 * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC 2594 * @hw: pointer to the hw struct 2595 * @uplink_seid: the MAC or other gizmo SEID 2596 * @downlink_seid: the VSI SEID 2597 * @enabled_tc: bitmap of TCs to be enabled 2598 * @default_port: true for default port VSI, false for control port 2599 * @veb_seid: pointer to where to put the resulting VEB SEID 2600 * @enable_stats: true to turn on VEB stats 2601 * @cmd_details: pointer to command details structure or NULL 2602 * 2603 * This asks the FW to add a VEB between the uplink and downlink 2604 * elements. If the uplink SEID is 0, this will be a floating VEB. 2605 **/ 2606 i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, 2607 u16 downlink_seid, u8 enabled_tc, 2608 bool default_port, u16 *veb_seid, 2609 bool enable_stats, 2610 struct i40e_asq_cmd_details *cmd_details) 2611 { 2612 struct i40e_aq_desc desc; 2613 struct i40e_aqc_add_veb *cmd = 2614 (struct i40e_aqc_add_veb *)&desc.params.raw; 2615 struct i40e_aqc_add_veb_completion *resp = 2616 (struct i40e_aqc_add_veb_completion *)&desc.params.raw; 2617 i40e_status status; 2618 u16 veb_flags = 0; 2619 2620 /* SEIDs need to either both be set or both be 0 for floating VEB */ 2621 if (!!uplink_seid != !!downlink_seid) 2622 return I40E_ERR_PARAM; 2623 2624 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb); 2625 2626 cmd->uplink_seid = cpu_to_le16(uplink_seid); 2627 cmd->downlink_seid = cpu_to_le16(downlink_seid); 2628 cmd->enable_tcs = enabled_tc; 2629 if (!uplink_seid) 2630 veb_flags |= I40E_AQC_ADD_VEB_FLOATING; 2631 if (default_port) 2632 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT; 2633 else 2634 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA; 2635 2636 /* reverse logic here: set the bitflag to disable the stats */ 2637 if (!enable_stats) 2638 veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS; 2639 2640 cmd->veb_flags = cpu_to_le16(veb_flags); 2641 2642 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2643 2644 if (!status && veb_seid) 2645 *veb_seid = le16_to_cpu(resp->veb_seid); 2646 2647 return status; 2648 } 2649 2650 /** 2651 * i40e_aq_get_veb_parameters - Retrieve VEB parameters 2652 * @hw: pointer to the hw struct 2653 * @veb_seid: the SEID of the VEB to query 2654 * @switch_id: the uplink switch id 2655 * @floating: set to true if the VEB is floating 2656 * @statistic_index: index of the stats counter block for this VEB 2657 * @vebs_used: number of VEB's used by function 2658 * @vebs_free: total VEB's not reserved by any function 2659 * @cmd_details: pointer to command details structure or NULL 2660 * 2661 * This retrieves the parameters for a particular VEB, specified by 2662 * uplink_seid, and returns them to the caller. 2663 **/ 2664 i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw, 2665 u16 veb_seid, u16 *switch_id, 2666 bool *floating, u16 *statistic_index, 2667 u16 *vebs_used, u16 *vebs_free, 2668 struct i40e_asq_cmd_details *cmd_details) 2669 { 2670 struct i40e_aq_desc desc; 2671 struct i40e_aqc_get_veb_parameters_completion *cmd_resp = 2672 (struct i40e_aqc_get_veb_parameters_completion *) 2673 &desc.params.raw; 2674 i40e_status status; 2675 2676 if (veb_seid == 0) 2677 return I40E_ERR_PARAM; 2678 2679 i40e_fill_default_direct_cmd_desc(&desc, 2680 i40e_aqc_opc_get_veb_parameters); 2681 cmd_resp->seid = cpu_to_le16(veb_seid); 2682 2683 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2684 if (status) 2685 goto get_veb_exit; 2686 2687 if (switch_id) 2688 *switch_id = le16_to_cpu(cmd_resp->switch_id); 2689 if (statistic_index) 2690 *statistic_index = le16_to_cpu(cmd_resp->statistic_index); 2691 if (vebs_used) 2692 *vebs_used = le16_to_cpu(cmd_resp->vebs_used); 2693 if (vebs_free) 2694 *vebs_free = le16_to_cpu(cmd_resp->vebs_free); 2695 if (floating) { 2696 u16 flags = le16_to_cpu(cmd_resp->veb_flags); 2697 2698 if (flags & I40E_AQC_ADD_VEB_FLOATING) 2699 *floating = true; 2700 else 2701 *floating = false; 2702 } 2703 2704 get_veb_exit: 2705 return status; 2706 } 2707 2708 /** 2709 * i40e_aq_add_macvlan 2710 * @hw: pointer to the hw struct 2711 * @seid: VSI for the mac address 2712 * @mv_list: list of macvlans to be added 2713 * @count: length of the list 2714 * @cmd_details: pointer to command details structure or NULL 2715 * 2716 * Add MAC/VLAN addresses to the HW filtering 2717 **/ 2718 i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid, 2719 struct i40e_aqc_add_macvlan_element_data *mv_list, 2720 u16 count, struct i40e_asq_cmd_details *cmd_details) 2721 { 2722 struct i40e_aq_desc desc; 2723 struct i40e_aqc_macvlan *cmd = 2724 (struct i40e_aqc_macvlan *)&desc.params.raw; 2725 i40e_status status; 2726 u16 buf_size; 2727 int i; 2728 2729 if (count == 0 || !mv_list || !hw) 2730 return I40E_ERR_PARAM; 2731 2732 buf_size = count * sizeof(*mv_list); 2733 2734 /* prep the rest of the request */ 2735 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan); 2736 cmd->num_addresses = cpu_to_le16(count); 2737 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); 2738 cmd->seid[1] = 0; 2739 cmd->seid[2] = 0; 2740 2741 for (i = 0; i < count; i++) 2742 if (is_multicast_ether_addr(mv_list[i].mac_addr)) 2743 mv_list[i].flags |= 2744 cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC); 2745 2746 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2747 if (buf_size > I40E_AQ_LARGE_BUF) 2748 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2749 2750 status = i40e_asq_send_command(hw, &desc, mv_list, buf_size, 2751 cmd_details); 2752 2753 return status; 2754 } 2755 2756 /** 2757 * i40e_aq_remove_macvlan 2758 * @hw: pointer to the hw struct 2759 * @seid: VSI for the mac address 2760 * @mv_list: list of macvlans to be removed 2761 * @count: length of the list 2762 * @cmd_details: pointer to command details structure or NULL 2763 * 2764 * Remove MAC/VLAN addresses from the HW filtering 2765 **/ 2766 i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, 2767 struct i40e_aqc_remove_macvlan_element_data *mv_list, 2768 u16 count, struct i40e_asq_cmd_details *cmd_details) 2769 { 2770 struct i40e_aq_desc desc; 2771 struct i40e_aqc_macvlan *cmd = 2772 (struct i40e_aqc_macvlan *)&desc.params.raw; 2773 i40e_status status; 2774 u16 buf_size; 2775 2776 if (count == 0 || !mv_list || !hw) 2777 return I40E_ERR_PARAM; 2778 2779 buf_size = count * sizeof(*mv_list); 2780 2781 /* prep the rest of the request */ 2782 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan); 2783 cmd->num_addresses = cpu_to_le16(count); 2784 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); 2785 cmd->seid[1] = 0; 2786 cmd->seid[2] = 0; 2787 2788 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2789 if (buf_size > I40E_AQ_LARGE_BUF) 2790 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2791 2792 status = i40e_asq_send_command(hw, &desc, mv_list, buf_size, 2793 cmd_details); 2794 2795 return status; 2796 } 2797 2798 /** 2799 * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule 2800 * @hw: pointer to the hw struct 2801 * @opcode: AQ opcode for add or delete mirror rule 2802 * @sw_seid: Switch SEID (to which rule refers) 2803 * @rule_type: Rule Type (ingress/egress/VLAN) 2804 * @id: Destination VSI SEID or Rule ID 2805 * @count: length of the list 2806 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs 2807 * @cmd_details: pointer to command details structure or NULL 2808 * @rule_id: Rule ID returned from FW 2809 * @rule_used: Number of rules used in internal switch 2810 * @rule_free: Number of rules free in internal switch 2811 * 2812 * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for 2813 * VEBs/VEPA elements only 2814 **/ 2815 static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw, 2816 u16 opcode, u16 sw_seid, u16 rule_type, u16 id, 2817 u16 count, __le16 *mr_list, 2818 struct i40e_asq_cmd_details *cmd_details, 2819 u16 *rule_id, u16 *rules_used, u16 *rules_free) 2820 { 2821 struct i40e_aq_desc desc; 2822 struct i40e_aqc_add_delete_mirror_rule *cmd = 2823 (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw; 2824 struct i40e_aqc_add_delete_mirror_rule_completion *resp = 2825 (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw; 2826 i40e_status status; 2827 u16 buf_size; 2828 2829 buf_size = count * sizeof(*mr_list); 2830 2831 /* prep the rest of the request */ 2832 i40e_fill_default_direct_cmd_desc(&desc, opcode); 2833 cmd->seid = cpu_to_le16(sw_seid); 2834 cmd->rule_type = cpu_to_le16(rule_type & 2835 I40E_AQC_MIRROR_RULE_TYPE_MASK); 2836 cmd->num_entries = cpu_to_le16(count); 2837 /* Dest VSI for add, rule_id for delete */ 2838 cmd->destination = cpu_to_le16(id); 2839 if (mr_list) { 2840 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | 2841 I40E_AQ_FLAG_RD)); 2842 if (buf_size > I40E_AQ_LARGE_BUF) 2843 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2844 } 2845 2846 status = i40e_asq_send_command(hw, &desc, mr_list, buf_size, 2847 cmd_details); 2848 if (!status || 2849 hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) { 2850 if (rule_id) 2851 *rule_id = le16_to_cpu(resp->rule_id); 2852 if (rules_used) 2853 *rules_used = le16_to_cpu(resp->mirror_rules_used); 2854 if (rules_free) 2855 *rules_free = le16_to_cpu(resp->mirror_rules_free); 2856 } 2857 return status; 2858 } 2859 2860 /** 2861 * i40e_aq_add_mirrorrule - add a mirror rule 2862 * @hw: pointer to the hw struct 2863 * @sw_seid: Switch SEID (to which rule refers) 2864 * @rule_type: Rule Type (ingress/egress/VLAN) 2865 * @dest_vsi: SEID of VSI to which packets will be mirrored 2866 * @count: length of the list 2867 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs 2868 * @cmd_details: pointer to command details structure or NULL 2869 * @rule_id: Rule ID returned from FW 2870 * @rule_used: Number of rules used in internal switch 2871 * @rule_free: Number of rules free in internal switch 2872 * 2873 * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only 2874 **/ 2875 i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, 2876 u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list, 2877 struct i40e_asq_cmd_details *cmd_details, 2878 u16 *rule_id, u16 *rules_used, u16 *rules_free) 2879 { 2880 if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS || 2881 rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) { 2882 if (count == 0 || !mr_list) 2883 return I40E_ERR_PARAM; 2884 } 2885 2886 return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid, 2887 rule_type, dest_vsi, count, mr_list, 2888 cmd_details, rule_id, rules_used, rules_free); 2889 } 2890 2891 /** 2892 * i40e_aq_delete_mirrorrule - delete a mirror rule 2893 * @hw: pointer to the hw struct 2894 * @sw_seid: Switch SEID (to which rule refers) 2895 * @rule_type: Rule Type (ingress/egress/VLAN) 2896 * @count: length of the list 2897 * @rule_id: Rule ID that is returned in the receive desc as part of 2898 * add_mirrorrule. 2899 * @mr_list: list of mirrored VLAN IDs to be removed 2900 * @cmd_details: pointer to command details structure or NULL 2901 * @rule_used: Number of rules used in internal switch 2902 * @rule_free: Number of rules free in internal switch 2903 * 2904 * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only 2905 **/ 2906 i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, 2907 u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list, 2908 struct i40e_asq_cmd_details *cmd_details, 2909 u16 *rules_used, u16 *rules_free) 2910 { 2911 /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */ 2912 if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) { 2913 /* count and mr_list shall be valid for rule_type INGRESS VLAN 2914 * mirroring. For other rule_type, count and rule_type should 2915 * not matter. 2916 */ 2917 if (count == 0 || !mr_list) 2918 return I40E_ERR_PARAM; 2919 } 2920 2921 return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid, 2922 rule_type, rule_id, count, mr_list, 2923 cmd_details, NULL, rules_used, rules_free); 2924 } 2925 2926 /** 2927 * i40e_aq_send_msg_to_vf 2928 * @hw: pointer to the hardware structure 2929 * @vfid: VF id to send msg 2930 * @v_opcode: opcodes for VF-PF communication 2931 * @v_retval: return error code 2932 * @msg: pointer to the msg buffer 2933 * @msglen: msg length 2934 * @cmd_details: pointer to command details 2935 * 2936 * send msg to vf 2937 **/ 2938 i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, 2939 u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, 2940 struct i40e_asq_cmd_details *cmd_details) 2941 { 2942 struct i40e_aq_desc desc; 2943 struct i40e_aqc_pf_vf_message *cmd = 2944 (struct i40e_aqc_pf_vf_message *)&desc.params.raw; 2945 i40e_status status; 2946 2947 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf); 2948 cmd->id = cpu_to_le32(vfid); 2949 desc.cookie_high = cpu_to_le32(v_opcode); 2950 desc.cookie_low = cpu_to_le32(v_retval); 2951 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI); 2952 if (msglen) { 2953 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | 2954 I40E_AQ_FLAG_RD)); 2955 if (msglen > I40E_AQ_LARGE_BUF) 2956 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2957 desc.datalen = cpu_to_le16(msglen); 2958 } 2959 status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details); 2960 2961 return status; 2962 } 2963 2964 /** 2965 * i40e_aq_debug_read_register 2966 * @hw: pointer to the hw struct 2967 * @reg_addr: register address 2968 * @reg_val: register value 2969 * @cmd_details: pointer to command details structure or NULL 2970 * 2971 * Read the register using the admin queue commands 2972 **/ 2973 i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw, 2974 u32 reg_addr, u64 *reg_val, 2975 struct i40e_asq_cmd_details *cmd_details) 2976 { 2977 struct i40e_aq_desc desc; 2978 struct i40e_aqc_debug_reg_read_write *cmd_resp = 2979 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; 2980 i40e_status status; 2981 2982 if (reg_val == NULL) 2983 return I40E_ERR_PARAM; 2984 2985 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg); 2986 2987 cmd_resp->address = cpu_to_le32(reg_addr); 2988 2989 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2990 2991 if (!status) { 2992 *reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) | 2993 (u64)le32_to_cpu(cmd_resp->value_low); 2994 } 2995 2996 return status; 2997 } 2998 2999 /** 3000 * i40e_aq_debug_write_register 3001 * @hw: pointer to the hw struct 3002 * @reg_addr: register address 3003 * @reg_val: register value 3004 * @cmd_details: pointer to command details structure or NULL 3005 * 3006 * Write to a register using the admin queue commands 3007 **/ 3008 i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw, 3009 u32 reg_addr, u64 reg_val, 3010 struct i40e_asq_cmd_details *cmd_details) 3011 { 3012 struct i40e_aq_desc desc; 3013 struct i40e_aqc_debug_reg_read_write *cmd = 3014 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; 3015 i40e_status status; 3016 3017 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg); 3018 3019 cmd->address = cpu_to_le32(reg_addr); 3020 cmd->value_high = cpu_to_le32((u32)(reg_val >> 32)); 3021 cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF)); 3022 3023 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3024 3025 return status; 3026 } 3027 3028 /** 3029 * i40e_aq_request_resource 3030 * @hw: pointer to the hw struct 3031 * @resource: resource id 3032 * @access: access type 3033 * @sdp_number: resource number 3034 * @timeout: the maximum time in ms that the driver may hold the resource 3035 * @cmd_details: pointer to command details structure or NULL 3036 * 3037 * requests common resource using the admin queue commands 3038 **/ 3039 i40e_status i40e_aq_request_resource(struct i40e_hw *hw, 3040 enum i40e_aq_resources_ids resource, 3041 enum i40e_aq_resource_access_type access, 3042 u8 sdp_number, u64 *timeout, 3043 struct i40e_asq_cmd_details *cmd_details) 3044 { 3045 struct i40e_aq_desc desc; 3046 struct i40e_aqc_request_resource *cmd_resp = 3047 (struct i40e_aqc_request_resource *)&desc.params.raw; 3048 i40e_status status; 3049 3050 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource); 3051 3052 cmd_resp->resource_id = cpu_to_le16(resource); 3053 cmd_resp->access_type = cpu_to_le16(access); 3054 cmd_resp->resource_number = cpu_to_le32(sdp_number); 3055 3056 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3057 /* The completion specifies the maximum time in ms that the driver 3058 * may hold the resource in the Timeout field. 3059 * If the resource is held by someone else, the command completes with 3060 * busy return value and the timeout field indicates the maximum time 3061 * the current owner of the resource has to free it. 3062 */ 3063 if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) 3064 *timeout = le32_to_cpu(cmd_resp->timeout); 3065 3066 return status; 3067 } 3068 3069 /** 3070 * i40e_aq_release_resource 3071 * @hw: pointer to the hw struct 3072 * @resource: resource id 3073 * @sdp_number: resource number 3074 * @cmd_details: pointer to command details structure or NULL 3075 * 3076 * release common resource using the admin queue commands 3077 **/ 3078 i40e_status i40e_aq_release_resource(struct i40e_hw *hw, 3079 enum i40e_aq_resources_ids resource, 3080 u8 sdp_number, 3081 struct i40e_asq_cmd_details *cmd_details) 3082 { 3083 struct i40e_aq_desc desc; 3084 struct i40e_aqc_request_resource *cmd = 3085 (struct i40e_aqc_request_resource *)&desc.params.raw; 3086 i40e_status status; 3087 3088 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource); 3089 3090 cmd->resource_id = cpu_to_le16(resource); 3091 cmd->resource_number = cpu_to_le32(sdp_number); 3092 3093 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3094 3095 return status; 3096 } 3097 3098 /** 3099 * i40e_aq_read_nvm 3100 * @hw: pointer to the hw struct 3101 * @module_pointer: module pointer location in words from the NVM beginning 3102 * @offset: byte offset from the module beginning 3103 * @length: length of the section to be read (in bytes from the offset) 3104 * @data: command buffer (size [bytes] = length) 3105 * @last_command: tells if this is the last command in a series 3106 * @cmd_details: pointer to command details structure or NULL 3107 * 3108 * Read the NVM using the admin queue commands 3109 **/ 3110 i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer, 3111 u32 offset, u16 length, void *data, 3112 bool last_command, 3113 struct i40e_asq_cmd_details *cmd_details) 3114 { 3115 struct i40e_aq_desc desc; 3116 struct i40e_aqc_nvm_update *cmd = 3117 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3118 i40e_status status; 3119 3120 /* In offset the highest byte must be zeroed. */ 3121 if (offset & 0xFF000000) { 3122 status = I40E_ERR_PARAM; 3123 goto i40e_aq_read_nvm_exit; 3124 } 3125 3126 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read); 3127 3128 /* If this is the last command in a series, set the proper flag. */ 3129 if (last_command) 3130 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3131 cmd->module_pointer = module_pointer; 3132 cmd->offset = cpu_to_le32(offset); 3133 cmd->length = cpu_to_le16(length); 3134 3135 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3136 if (length > I40E_AQ_LARGE_BUF) 3137 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3138 3139 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); 3140 3141 i40e_aq_read_nvm_exit: 3142 return status; 3143 } 3144 3145 /** 3146 * i40e_aq_erase_nvm 3147 * @hw: pointer to the hw struct 3148 * @module_pointer: module pointer location in words from the NVM beginning 3149 * @offset: offset in the module (expressed in 4 KB from module's beginning) 3150 * @length: length of the section to be erased (expressed in 4 KB) 3151 * @last_command: tells if this is the last command in a series 3152 * @cmd_details: pointer to command details structure or NULL 3153 * 3154 * Erase the NVM sector using the admin queue commands 3155 **/ 3156 i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, 3157 u32 offset, u16 length, bool last_command, 3158 struct i40e_asq_cmd_details *cmd_details) 3159 { 3160 struct i40e_aq_desc desc; 3161 struct i40e_aqc_nvm_update *cmd = 3162 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3163 i40e_status status; 3164 3165 /* In offset the highest byte must be zeroed. */ 3166 if (offset & 0xFF000000) { 3167 status = I40E_ERR_PARAM; 3168 goto i40e_aq_erase_nvm_exit; 3169 } 3170 3171 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase); 3172 3173 /* If this is the last command in a series, set the proper flag. */ 3174 if (last_command) 3175 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3176 cmd->module_pointer = module_pointer; 3177 cmd->offset = cpu_to_le32(offset); 3178 cmd->length = cpu_to_le16(length); 3179 3180 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3181 3182 i40e_aq_erase_nvm_exit: 3183 return status; 3184 } 3185 3186 /** 3187 * i40e_parse_discover_capabilities 3188 * @hw: pointer to the hw struct 3189 * @buff: pointer to a buffer containing device/function capability records 3190 * @cap_count: number of capability records in the list 3191 * @list_type_opc: type of capabilities list to parse 3192 * 3193 * Parse the device/function capabilities list. 3194 **/ 3195 static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, 3196 u32 cap_count, 3197 enum i40e_admin_queue_opc list_type_opc) 3198 { 3199 struct i40e_aqc_list_capabilities_element_resp *cap; 3200 u32 valid_functions, num_functions; 3201 u32 number, logical_id, phys_id; 3202 struct i40e_hw_capabilities *p; 3203 u8 major_rev; 3204 u32 i = 0; 3205 u16 id; 3206 3207 cap = (struct i40e_aqc_list_capabilities_element_resp *) buff; 3208 3209 if (list_type_opc == i40e_aqc_opc_list_dev_capabilities) 3210 p = &hw->dev_caps; 3211 else if (list_type_opc == i40e_aqc_opc_list_func_capabilities) 3212 p = &hw->func_caps; 3213 else 3214 return; 3215 3216 for (i = 0; i < cap_count; i++, cap++) { 3217 id = le16_to_cpu(cap->id); 3218 number = le32_to_cpu(cap->number); 3219 logical_id = le32_to_cpu(cap->logical_id); 3220 phys_id = le32_to_cpu(cap->phys_id); 3221 major_rev = cap->major_rev; 3222 3223 switch (id) { 3224 case I40E_AQ_CAP_ID_SWITCH_MODE: 3225 p->switch_mode = number; 3226 break; 3227 case I40E_AQ_CAP_ID_MNG_MODE: 3228 p->management_mode = number; 3229 if (major_rev > 1) { 3230 p->mng_protocols_over_mctp = logical_id; 3231 i40e_debug(hw, I40E_DEBUG_INIT, 3232 "HW Capability: Protocols over MCTP = %d\n", 3233 p->mng_protocols_over_mctp); 3234 } else { 3235 p->mng_protocols_over_mctp = 0; 3236 } 3237 break; 3238 case I40E_AQ_CAP_ID_NPAR_ACTIVE: 3239 p->npar_enable = number; 3240 break; 3241 case I40E_AQ_CAP_ID_OS2BMC_CAP: 3242 p->os2bmc = number; 3243 break; 3244 case I40E_AQ_CAP_ID_FUNCTIONS_VALID: 3245 p->valid_functions = number; 3246 break; 3247 case I40E_AQ_CAP_ID_SRIOV: 3248 if (number == 1) 3249 p->sr_iov_1_1 = true; 3250 break; 3251 case I40E_AQ_CAP_ID_VF: 3252 p->num_vfs = number; 3253 p->vf_base_id = logical_id; 3254 break; 3255 case I40E_AQ_CAP_ID_VMDQ: 3256 if (number == 1) 3257 p->vmdq = true; 3258 break; 3259 case I40E_AQ_CAP_ID_8021QBG: 3260 if (number == 1) 3261 p->evb_802_1_qbg = true; 3262 break; 3263 case I40E_AQ_CAP_ID_8021QBR: 3264 if (number == 1) 3265 p->evb_802_1_qbh = true; 3266 break; 3267 case I40E_AQ_CAP_ID_VSI: 3268 p->num_vsis = number; 3269 break; 3270 case I40E_AQ_CAP_ID_DCB: 3271 if (number == 1) { 3272 p->dcb = true; 3273 p->enabled_tcmap = logical_id; 3274 p->maxtc = phys_id; 3275 } 3276 break; 3277 case I40E_AQ_CAP_ID_FCOE: 3278 if (number == 1) 3279 p->fcoe = true; 3280 break; 3281 case I40E_AQ_CAP_ID_ISCSI: 3282 if (number == 1) 3283 p->iscsi = true; 3284 break; 3285 case I40E_AQ_CAP_ID_RSS: 3286 p->rss = true; 3287 p->rss_table_size = number; 3288 p->rss_table_entry_width = logical_id; 3289 break; 3290 case I40E_AQ_CAP_ID_RXQ: 3291 p->num_rx_qp = number; 3292 p->base_queue = phys_id; 3293 break; 3294 case I40E_AQ_CAP_ID_TXQ: 3295 p->num_tx_qp = number; 3296 p->base_queue = phys_id; 3297 break; 3298 case I40E_AQ_CAP_ID_MSIX: 3299 p->num_msix_vectors = number; 3300 i40e_debug(hw, I40E_DEBUG_INIT, 3301 "HW Capability: MSIX vector count = %d\n", 3302 p->num_msix_vectors); 3303 break; 3304 case I40E_AQ_CAP_ID_VF_MSIX: 3305 p->num_msix_vectors_vf = number; 3306 break; 3307 case I40E_AQ_CAP_ID_FLEX10: 3308 if (major_rev == 1) { 3309 if (number == 1) { 3310 p->flex10_enable = true; 3311 p->flex10_capable = true; 3312 } 3313 } else { 3314 /* Capability revision >= 2 */ 3315 if (number & 1) 3316 p->flex10_enable = true; 3317 if (number & 2) 3318 p->flex10_capable = true; 3319 } 3320 p->flex10_mode = logical_id; 3321 p->flex10_status = phys_id; 3322 break; 3323 case I40E_AQ_CAP_ID_CEM: 3324 if (number == 1) 3325 p->mgmt_cem = true; 3326 break; 3327 case I40E_AQ_CAP_ID_IWARP: 3328 if (number == 1) 3329 p->iwarp = true; 3330 break; 3331 case I40E_AQ_CAP_ID_LED: 3332 if (phys_id < I40E_HW_CAP_MAX_GPIO) 3333 p->led[phys_id] = true; 3334 break; 3335 case I40E_AQ_CAP_ID_SDP: 3336 if (phys_id < I40E_HW_CAP_MAX_GPIO) 3337 p->sdp[phys_id] = true; 3338 break; 3339 case I40E_AQ_CAP_ID_MDIO: 3340 if (number == 1) { 3341 p->mdio_port_num = phys_id; 3342 p->mdio_port_mode = logical_id; 3343 } 3344 break; 3345 case I40E_AQ_CAP_ID_1588: 3346 if (number == 1) 3347 p->ieee_1588 = true; 3348 break; 3349 case I40E_AQ_CAP_ID_FLOW_DIRECTOR: 3350 p->fd = true; 3351 p->fd_filters_guaranteed = number; 3352 p->fd_filters_best_effort = logical_id; 3353 break; 3354 case I40E_AQ_CAP_ID_WSR_PROT: 3355 p->wr_csr_prot = (u64)number; 3356 p->wr_csr_prot |= (u64)logical_id << 32; 3357 break; 3358 case I40E_AQ_CAP_ID_NVM_MGMT: 3359 if (number & I40E_NVM_MGMT_SEC_REV_DISABLED) 3360 p->sec_rev_disabled = true; 3361 if (number & I40E_NVM_MGMT_UPDATE_DISABLED) 3362 p->update_disabled = true; 3363 break; 3364 default: 3365 break; 3366 } 3367 } 3368 3369 if (p->fcoe) 3370 i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n"); 3371 3372 /* Software override ensuring FCoE is disabled if npar or mfp 3373 * mode because it is not supported in these modes. 3374 */ 3375 if (p->npar_enable || p->flex10_enable) 3376 p->fcoe = false; 3377 3378 /* count the enabled ports (aka the "not disabled" ports) */ 3379 hw->num_ports = 0; 3380 for (i = 0; i < 4; i++) { 3381 u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i); 3382 u64 port_cfg = 0; 3383 3384 /* use AQ read to get the physical register offset instead 3385 * of the port relative offset 3386 */ 3387 i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL); 3388 if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK)) 3389 hw->num_ports++; 3390 } 3391 3392 valid_functions = p->valid_functions; 3393 num_functions = 0; 3394 while (valid_functions) { 3395 if (valid_functions & 1) 3396 num_functions++; 3397 valid_functions >>= 1; 3398 } 3399 3400 /* partition id is 1-based, and functions are evenly spread 3401 * across the ports as partitions 3402 */ 3403 if (hw->num_ports != 0) { 3404 hw->partition_id = (hw->pf_id / hw->num_ports) + 1; 3405 hw->num_partitions = num_functions / hw->num_ports; 3406 } 3407 3408 /* additional HW specific goodies that might 3409 * someday be HW version specific 3410 */ 3411 p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS; 3412 } 3413 3414 /** 3415 * i40e_aq_discover_capabilities 3416 * @hw: pointer to the hw struct 3417 * @buff: a virtual buffer to hold the capabilities 3418 * @buff_size: Size of the virtual buffer 3419 * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM 3420 * @list_type_opc: capabilities type to discover - pass in the command opcode 3421 * @cmd_details: pointer to command details structure or NULL 3422 * 3423 * Get the device capabilities descriptions from the firmware 3424 **/ 3425 i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw, 3426 void *buff, u16 buff_size, u16 *data_size, 3427 enum i40e_admin_queue_opc list_type_opc, 3428 struct i40e_asq_cmd_details *cmd_details) 3429 { 3430 struct i40e_aqc_list_capabilites *cmd; 3431 struct i40e_aq_desc desc; 3432 i40e_status status = 0; 3433 3434 cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw; 3435 3436 if (list_type_opc != i40e_aqc_opc_list_func_capabilities && 3437 list_type_opc != i40e_aqc_opc_list_dev_capabilities) { 3438 status = I40E_ERR_PARAM; 3439 goto exit; 3440 } 3441 3442 i40e_fill_default_direct_cmd_desc(&desc, list_type_opc); 3443 3444 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3445 if (buff_size > I40E_AQ_LARGE_BUF) 3446 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3447 3448 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3449 *data_size = le16_to_cpu(desc.datalen); 3450 3451 if (status) 3452 goto exit; 3453 3454 i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count), 3455 list_type_opc); 3456 3457 exit: 3458 return status; 3459 } 3460 3461 /** 3462 * i40e_aq_update_nvm 3463 * @hw: pointer to the hw struct 3464 * @module_pointer: module pointer location in words from the NVM beginning 3465 * @offset: byte offset from the module beginning 3466 * @length: length of the section to be written (in bytes from the offset) 3467 * @data: command buffer (size [bytes] = length) 3468 * @last_command: tells if this is the last command in a series 3469 * @preservation_flags: Preservation mode flags 3470 * @cmd_details: pointer to command details structure or NULL 3471 * 3472 * Update the NVM using the admin queue commands 3473 **/ 3474 i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, 3475 u32 offset, u16 length, void *data, 3476 bool last_command, u8 preservation_flags, 3477 struct i40e_asq_cmd_details *cmd_details) 3478 { 3479 struct i40e_aq_desc desc; 3480 struct i40e_aqc_nvm_update *cmd = 3481 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3482 i40e_status status; 3483 3484 /* In offset the highest byte must be zeroed. */ 3485 if (offset & 0xFF000000) { 3486 status = I40E_ERR_PARAM; 3487 goto i40e_aq_update_nvm_exit; 3488 } 3489 3490 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); 3491 3492 /* If this is the last command in a series, set the proper flag. */ 3493 if (last_command) 3494 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3495 if (hw->mac.type == I40E_MAC_X722) { 3496 if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED) 3497 cmd->command_flags |= 3498 (I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED << 3499 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT); 3500 else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL) 3501 cmd->command_flags |= 3502 (I40E_AQ_NVM_PRESERVATION_FLAGS_ALL << 3503 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT); 3504 } 3505 cmd->module_pointer = module_pointer; 3506 cmd->offset = cpu_to_le32(offset); 3507 cmd->length = cpu_to_le16(length); 3508 3509 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 3510 if (length > I40E_AQ_LARGE_BUF) 3511 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3512 3513 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); 3514 3515 i40e_aq_update_nvm_exit: 3516 return status; 3517 } 3518 3519 /** 3520 * i40e_aq_get_lldp_mib 3521 * @hw: pointer to the hw struct 3522 * @bridge_type: type of bridge requested 3523 * @mib_type: Local, Remote or both Local and Remote MIBs 3524 * @buff: pointer to a user supplied buffer to store the MIB block 3525 * @buff_size: size of the buffer (in bytes) 3526 * @local_len : length of the returned Local LLDP MIB 3527 * @remote_len: length of the returned Remote LLDP MIB 3528 * @cmd_details: pointer to command details structure or NULL 3529 * 3530 * Requests the complete LLDP MIB (entire packet). 3531 **/ 3532 i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, 3533 u8 mib_type, void *buff, u16 buff_size, 3534 u16 *local_len, u16 *remote_len, 3535 struct i40e_asq_cmd_details *cmd_details) 3536 { 3537 struct i40e_aq_desc desc; 3538 struct i40e_aqc_lldp_get_mib *cmd = 3539 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; 3540 struct i40e_aqc_lldp_get_mib *resp = 3541 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; 3542 i40e_status status; 3543 3544 if (buff_size == 0 || !buff) 3545 return I40E_ERR_PARAM; 3546 3547 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib); 3548 /* Indirect Command */ 3549 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3550 3551 cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK; 3552 cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) & 3553 I40E_AQ_LLDP_BRIDGE_TYPE_MASK); 3554 3555 desc.datalen = cpu_to_le16(buff_size); 3556 3557 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3558 if (buff_size > I40E_AQ_LARGE_BUF) 3559 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3560 3561 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3562 if (!status) { 3563 if (local_len != NULL) 3564 *local_len = le16_to_cpu(resp->local_len); 3565 if (remote_len != NULL) 3566 *remote_len = le16_to_cpu(resp->remote_len); 3567 } 3568 3569 return status; 3570 } 3571 3572 /** 3573 * i40e_aq_cfg_lldp_mib_change_event 3574 * @hw: pointer to the hw struct 3575 * @enable_update: Enable or Disable event posting 3576 * @cmd_details: pointer to command details structure or NULL 3577 * 3578 * Enable or Disable posting of an event on ARQ when LLDP MIB 3579 * associated with the interface changes 3580 **/ 3581 i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, 3582 bool enable_update, 3583 struct i40e_asq_cmd_details *cmd_details) 3584 { 3585 struct i40e_aq_desc desc; 3586 struct i40e_aqc_lldp_update_mib *cmd = 3587 (struct i40e_aqc_lldp_update_mib *)&desc.params.raw; 3588 i40e_status status; 3589 3590 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib); 3591 3592 if (!enable_update) 3593 cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE; 3594 3595 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3596 3597 return status; 3598 } 3599 3600 /** 3601 * i40e_aq_stop_lldp 3602 * @hw: pointer to the hw struct 3603 * @shutdown_agent: True if LLDP Agent needs to be Shutdown 3604 * @cmd_details: pointer to command details structure or NULL 3605 * 3606 * Stop or Shutdown the embedded LLDP Agent 3607 **/ 3608 i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, 3609 struct i40e_asq_cmd_details *cmd_details) 3610 { 3611 struct i40e_aq_desc desc; 3612 struct i40e_aqc_lldp_stop *cmd = 3613 (struct i40e_aqc_lldp_stop *)&desc.params.raw; 3614 i40e_status status; 3615 3616 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop); 3617 3618 if (shutdown_agent) 3619 cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN; 3620 3621 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3622 3623 return status; 3624 } 3625 3626 /** 3627 * i40e_aq_start_lldp 3628 * @hw: pointer to the hw struct 3629 * @cmd_details: pointer to command details structure or NULL 3630 * 3631 * Start the embedded LLDP Agent on all ports. 3632 **/ 3633 i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, 3634 struct i40e_asq_cmd_details *cmd_details) 3635 { 3636 struct i40e_aq_desc desc; 3637 struct i40e_aqc_lldp_start *cmd = 3638 (struct i40e_aqc_lldp_start *)&desc.params.raw; 3639 i40e_status status; 3640 3641 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start); 3642 3643 cmd->command = I40E_AQ_LLDP_AGENT_START; 3644 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3645 3646 return status; 3647 } 3648 3649 /** 3650 * i40e_aq_set_dcb_parameters 3651 * @hw: pointer to the hw struct 3652 * @cmd_details: pointer to command details structure or NULL 3653 * @dcb_enable: True if DCB configuration needs to be applied 3654 * 3655 **/ 3656 enum i40e_status_code 3657 i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable, 3658 struct i40e_asq_cmd_details *cmd_details) 3659 { 3660 struct i40e_aq_desc desc; 3661 struct i40e_aqc_set_dcb_parameters *cmd = 3662 (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw; 3663 i40e_status status; 3664 3665 i40e_fill_default_direct_cmd_desc(&desc, 3666 i40e_aqc_opc_set_dcb_parameters); 3667 3668 if (dcb_enable) { 3669 cmd->valid_flags = I40E_DCB_VALID; 3670 cmd->command = I40E_AQ_DCB_SET_AGENT; 3671 } 3672 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3673 3674 return status; 3675 } 3676 3677 /** 3678 * i40e_aq_get_cee_dcb_config 3679 * @hw: pointer to the hw struct 3680 * @buff: response buffer that stores CEE operational configuration 3681 * @buff_size: size of the buffer passed 3682 * @cmd_details: pointer to command details structure or NULL 3683 * 3684 * Get CEE DCBX mode operational configuration from firmware 3685 **/ 3686 i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, 3687 void *buff, u16 buff_size, 3688 struct i40e_asq_cmd_details *cmd_details) 3689 { 3690 struct i40e_aq_desc desc; 3691 i40e_status status; 3692 3693 if (buff_size == 0 || !buff) 3694 return I40E_ERR_PARAM; 3695 3696 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg); 3697 3698 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3699 status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size, 3700 cmd_details); 3701 3702 return status; 3703 } 3704 3705 /** 3706 * i40e_aq_add_udp_tunnel 3707 * @hw: pointer to the hw struct 3708 * @udp_port: the UDP port to add in Host byte order 3709 * @header_len: length of the tunneling header length in DWords 3710 * @protocol_index: protocol index type 3711 * @filter_index: pointer to filter index 3712 * @cmd_details: pointer to command details structure or NULL 3713 * 3714 * Note: Firmware expects the udp_port value to be in Little Endian format, 3715 * and this function will call cpu_to_le16 to convert from Host byte order to 3716 * Little Endian order. 3717 **/ 3718 i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, 3719 u16 udp_port, u8 protocol_index, 3720 u8 *filter_index, 3721 struct i40e_asq_cmd_details *cmd_details) 3722 { 3723 struct i40e_aq_desc desc; 3724 struct i40e_aqc_add_udp_tunnel *cmd = 3725 (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw; 3726 struct i40e_aqc_del_udp_tunnel_completion *resp = 3727 (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw; 3728 i40e_status status; 3729 3730 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel); 3731 3732 cmd->udp_port = cpu_to_le16(udp_port); 3733 cmd->protocol_type = protocol_index; 3734 3735 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3736 3737 if (!status && filter_index) 3738 *filter_index = resp->index; 3739 3740 return status; 3741 } 3742 3743 /** 3744 * i40e_aq_del_udp_tunnel 3745 * @hw: pointer to the hw struct 3746 * @index: filter index 3747 * @cmd_details: pointer to command details structure or NULL 3748 **/ 3749 i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, 3750 struct i40e_asq_cmd_details *cmd_details) 3751 { 3752 struct i40e_aq_desc desc; 3753 struct i40e_aqc_remove_udp_tunnel *cmd = 3754 (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw; 3755 i40e_status status; 3756 3757 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel); 3758 3759 cmd->index = index; 3760 3761 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3762 3763 return status; 3764 } 3765 3766 /** 3767 * i40e_aq_delete_element - Delete switch element 3768 * @hw: pointer to the hw struct 3769 * @seid: the SEID to delete from the switch 3770 * @cmd_details: pointer to command details structure or NULL 3771 * 3772 * This deletes a switch element from the switch. 3773 **/ 3774 i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, 3775 struct i40e_asq_cmd_details *cmd_details) 3776 { 3777 struct i40e_aq_desc desc; 3778 struct i40e_aqc_switch_seid *cmd = 3779 (struct i40e_aqc_switch_seid *)&desc.params.raw; 3780 i40e_status status; 3781 3782 if (seid == 0) 3783 return I40E_ERR_PARAM; 3784 3785 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element); 3786 3787 cmd->seid = cpu_to_le16(seid); 3788 3789 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3790 3791 return status; 3792 } 3793 3794 /** 3795 * i40e_aq_dcb_updated - DCB Updated Command 3796 * @hw: pointer to the hw struct 3797 * @cmd_details: pointer to command details structure or NULL 3798 * 3799 * EMP will return when the shared RPB settings have been 3800 * recomputed and modified. The retval field in the descriptor 3801 * will be set to 0 when RPB is modified. 3802 **/ 3803 i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw, 3804 struct i40e_asq_cmd_details *cmd_details) 3805 { 3806 struct i40e_aq_desc desc; 3807 i40e_status status; 3808 3809 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated); 3810 3811 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3812 3813 return status; 3814 } 3815 3816 /** 3817 * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler 3818 * @hw: pointer to the hw struct 3819 * @seid: seid for the physical port/switching component/vsi 3820 * @buff: Indirect buffer to hold data parameters and response 3821 * @buff_size: Indirect buffer size 3822 * @opcode: Tx scheduler AQ command opcode 3823 * @cmd_details: pointer to command details structure or NULL 3824 * 3825 * Generic command handler for Tx scheduler AQ commands 3826 **/ 3827 static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid, 3828 void *buff, u16 buff_size, 3829 enum i40e_admin_queue_opc opcode, 3830 struct i40e_asq_cmd_details *cmd_details) 3831 { 3832 struct i40e_aq_desc desc; 3833 struct i40e_aqc_tx_sched_ind *cmd = 3834 (struct i40e_aqc_tx_sched_ind *)&desc.params.raw; 3835 i40e_status status; 3836 bool cmd_param_flag = false; 3837 3838 switch (opcode) { 3839 case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit: 3840 case i40e_aqc_opc_configure_vsi_tc_bw: 3841 case i40e_aqc_opc_enable_switching_comp_ets: 3842 case i40e_aqc_opc_modify_switching_comp_ets: 3843 case i40e_aqc_opc_disable_switching_comp_ets: 3844 case i40e_aqc_opc_configure_switching_comp_ets_bw_limit: 3845 case i40e_aqc_opc_configure_switching_comp_bw_config: 3846 cmd_param_flag = true; 3847 break; 3848 case i40e_aqc_opc_query_vsi_bw_config: 3849 case i40e_aqc_opc_query_vsi_ets_sla_config: 3850 case i40e_aqc_opc_query_switching_comp_ets_config: 3851 case i40e_aqc_opc_query_port_ets_config: 3852 case i40e_aqc_opc_query_switching_comp_bw_config: 3853 cmd_param_flag = false; 3854 break; 3855 default: 3856 return I40E_ERR_PARAM; 3857 } 3858 3859 i40e_fill_default_direct_cmd_desc(&desc, opcode); 3860 3861 /* Indirect command */ 3862 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3863 if (cmd_param_flag) 3864 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 3865 if (buff_size > I40E_AQ_LARGE_BUF) 3866 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3867 3868 desc.datalen = cpu_to_le16(buff_size); 3869 3870 cmd->vsi_seid = cpu_to_le16(seid); 3871 3872 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3873 3874 return status; 3875 } 3876 3877 /** 3878 * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit 3879 * @hw: pointer to the hw struct 3880 * @seid: VSI seid 3881 * @credit: BW limit credits (0 = disabled) 3882 * @max_credit: Max BW limit credits 3883 * @cmd_details: pointer to command details structure or NULL 3884 **/ 3885 i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, 3886 u16 seid, u16 credit, u8 max_credit, 3887 struct i40e_asq_cmd_details *cmd_details) 3888 { 3889 struct i40e_aq_desc desc; 3890 struct i40e_aqc_configure_vsi_bw_limit *cmd = 3891 (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw; 3892 i40e_status status; 3893 3894 i40e_fill_default_direct_cmd_desc(&desc, 3895 i40e_aqc_opc_configure_vsi_bw_limit); 3896 3897 cmd->vsi_seid = cpu_to_le16(seid); 3898 cmd->credit = cpu_to_le16(credit); 3899 cmd->max_credit = max_credit; 3900 3901 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3902 3903 return status; 3904 } 3905 3906 /** 3907 * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC 3908 * @hw: pointer to the hw struct 3909 * @seid: VSI seid 3910 * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits 3911 * @cmd_details: pointer to command details structure or NULL 3912 **/ 3913 i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, 3914 u16 seid, 3915 struct i40e_aqc_configure_vsi_tc_bw_data *bw_data, 3916 struct i40e_asq_cmd_details *cmd_details) 3917 { 3918 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 3919 i40e_aqc_opc_configure_vsi_tc_bw, 3920 cmd_details); 3921 } 3922 3923 /** 3924 * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port 3925 * @hw: pointer to the hw struct 3926 * @seid: seid of the switching component connected to Physical Port 3927 * @ets_data: Buffer holding ETS parameters 3928 * @cmd_details: pointer to command details structure or NULL 3929 **/ 3930 i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw, 3931 u16 seid, 3932 struct i40e_aqc_configure_switching_comp_ets_data *ets_data, 3933 enum i40e_admin_queue_opc opcode, 3934 struct i40e_asq_cmd_details *cmd_details) 3935 { 3936 return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data, 3937 sizeof(*ets_data), opcode, cmd_details); 3938 } 3939 3940 /** 3941 * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC 3942 * @hw: pointer to the hw struct 3943 * @seid: seid of the switching component 3944 * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits 3945 * @cmd_details: pointer to command details structure or NULL 3946 **/ 3947 i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw, 3948 u16 seid, 3949 struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data, 3950 struct i40e_asq_cmd_details *cmd_details) 3951 { 3952 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 3953 i40e_aqc_opc_configure_switching_comp_bw_config, 3954 cmd_details); 3955 } 3956 3957 /** 3958 * i40e_aq_query_vsi_bw_config - Query VSI BW configuration 3959 * @hw: pointer to the hw struct 3960 * @seid: seid of the VSI 3961 * @bw_data: Buffer to hold VSI BW configuration 3962 * @cmd_details: pointer to command details structure or NULL 3963 **/ 3964 i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw, 3965 u16 seid, 3966 struct i40e_aqc_query_vsi_bw_config_resp *bw_data, 3967 struct i40e_asq_cmd_details *cmd_details) 3968 { 3969 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 3970 i40e_aqc_opc_query_vsi_bw_config, 3971 cmd_details); 3972 } 3973 3974 /** 3975 * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC 3976 * @hw: pointer to the hw struct 3977 * @seid: seid of the VSI 3978 * @bw_data: Buffer to hold VSI BW configuration per TC 3979 * @cmd_details: pointer to command details structure or NULL 3980 **/ 3981 i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw, 3982 u16 seid, 3983 struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data, 3984 struct i40e_asq_cmd_details *cmd_details) 3985 { 3986 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 3987 i40e_aqc_opc_query_vsi_ets_sla_config, 3988 cmd_details); 3989 } 3990 3991 /** 3992 * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC 3993 * @hw: pointer to the hw struct 3994 * @seid: seid of the switching component 3995 * @bw_data: Buffer to hold switching component's per TC BW config 3996 * @cmd_details: pointer to command details structure or NULL 3997 **/ 3998 i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw, 3999 u16 seid, 4000 struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data, 4001 struct i40e_asq_cmd_details *cmd_details) 4002 { 4003 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4004 i40e_aqc_opc_query_switching_comp_ets_config, 4005 cmd_details); 4006 } 4007 4008 /** 4009 * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration 4010 * @hw: pointer to the hw struct 4011 * @seid: seid of the VSI or switching component connected to Physical Port 4012 * @bw_data: Buffer to hold current ETS configuration for the Physical Port 4013 * @cmd_details: pointer to command details structure or NULL 4014 **/ 4015 i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw, 4016 u16 seid, 4017 struct i40e_aqc_query_port_ets_config_resp *bw_data, 4018 struct i40e_asq_cmd_details *cmd_details) 4019 { 4020 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4021 i40e_aqc_opc_query_port_ets_config, 4022 cmd_details); 4023 } 4024 4025 /** 4026 * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration 4027 * @hw: pointer to the hw struct 4028 * @seid: seid of the switching component 4029 * @bw_data: Buffer to hold switching component's BW configuration 4030 * @cmd_details: pointer to command details structure or NULL 4031 **/ 4032 i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, 4033 u16 seid, 4034 struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data, 4035 struct i40e_asq_cmd_details *cmd_details) 4036 { 4037 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4038 i40e_aqc_opc_query_switching_comp_bw_config, 4039 cmd_details); 4040 } 4041 4042 /** 4043 * i40e_validate_filter_settings 4044 * @hw: pointer to the hardware structure 4045 * @settings: Filter control settings 4046 * 4047 * Check and validate the filter control settings passed. 4048 * The function checks for the valid filter/context sizes being 4049 * passed for FCoE and PE. 4050 * 4051 * Returns 0 if the values passed are valid and within 4052 * range else returns an error. 4053 **/ 4054 static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw, 4055 struct i40e_filter_control_settings *settings) 4056 { 4057 u32 fcoe_cntx_size, fcoe_filt_size; 4058 u32 pe_cntx_size, pe_filt_size; 4059 u32 fcoe_fmax; 4060 u32 val; 4061 4062 /* Validate FCoE settings passed */ 4063 switch (settings->fcoe_filt_num) { 4064 case I40E_HASH_FILTER_SIZE_1K: 4065 case I40E_HASH_FILTER_SIZE_2K: 4066 case I40E_HASH_FILTER_SIZE_4K: 4067 case I40E_HASH_FILTER_SIZE_8K: 4068 case I40E_HASH_FILTER_SIZE_16K: 4069 case I40E_HASH_FILTER_SIZE_32K: 4070 fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE; 4071 fcoe_filt_size <<= (u32)settings->fcoe_filt_num; 4072 break; 4073 default: 4074 return I40E_ERR_PARAM; 4075 } 4076 4077 switch (settings->fcoe_cntx_num) { 4078 case I40E_DMA_CNTX_SIZE_512: 4079 case I40E_DMA_CNTX_SIZE_1K: 4080 case I40E_DMA_CNTX_SIZE_2K: 4081 case I40E_DMA_CNTX_SIZE_4K: 4082 fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE; 4083 fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num; 4084 break; 4085 default: 4086 return I40E_ERR_PARAM; 4087 } 4088 4089 /* Validate PE settings passed */ 4090 switch (settings->pe_filt_num) { 4091 case I40E_HASH_FILTER_SIZE_1K: 4092 case I40E_HASH_FILTER_SIZE_2K: 4093 case I40E_HASH_FILTER_SIZE_4K: 4094 case I40E_HASH_FILTER_SIZE_8K: 4095 case I40E_HASH_FILTER_SIZE_16K: 4096 case I40E_HASH_FILTER_SIZE_32K: 4097 case I40E_HASH_FILTER_SIZE_64K: 4098 case I40E_HASH_FILTER_SIZE_128K: 4099 case I40E_HASH_FILTER_SIZE_256K: 4100 case I40E_HASH_FILTER_SIZE_512K: 4101 case I40E_HASH_FILTER_SIZE_1M: 4102 pe_filt_size = I40E_HASH_FILTER_BASE_SIZE; 4103 pe_filt_size <<= (u32)settings->pe_filt_num; 4104 break; 4105 default: 4106 return I40E_ERR_PARAM; 4107 } 4108 4109 switch (settings->pe_cntx_num) { 4110 case I40E_DMA_CNTX_SIZE_512: 4111 case I40E_DMA_CNTX_SIZE_1K: 4112 case I40E_DMA_CNTX_SIZE_2K: 4113 case I40E_DMA_CNTX_SIZE_4K: 4114 case I40E_DMA_CNTX_SIZE_8K: 4115 case I40E_DMA_CNTX_SIZE_16K: 4116 case I40E_DMA_CNTX_SIZE_32K: 4117 case I40E_DMA_CNTX_SIZE_64K: 4118 case I40E_DMA_CNTX_SIZE_128K: 4119 case I40E_DMA_CNTX_SIZE_256K: 4120 pe_cntx_size = I40E_DMA_CNTX_BASE_SIZE; 4121 pe_cntx_size <<= (u32)settings->pe_cntx_num; 4122 break; 4123 default: 4124 return I40E_ERR_PARAM; 4125 } 4126 4127 /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */ 4128 val = rd32(hw, I40E_GLHMC_FCOEFMAX); 4129 fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK) 4130 >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT; 4131 if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax) 4132 return I40E_ERR_INVALID_SIZE; 4133 4134 return 0; 4135 } 4136 4137 /** 4138 * i40e_set_filter_control 4139 * @hw: pointer to the hardware structure 4140 * @settings: Filter control settings 4141 * 4142 * Set the Queue Filters for PE/FCoE and enable filters required 4143 * for a single PF. It is expected that these settings are programmed 4144 * at the driver initialization time. 4145 **/ 4146 i40e_status i40e_set_filter_control(struct i40e_hw *hw, 4147 struct i40e_filter_control_settings *settings) 4148 { 4149 i40e_status ret = 0; 4150 u32 hash_lut_size = 0; 4151 u32 val; 4152 4153 if (!settings) 4154 return I40E_ERR_PARAM; 4155 4156 /* Validate the input settings */ 4157 ret = i40e_validate_filter_settings(hw, settings); 4158 if (ret) 4159 return ret; 4160 4161 /* Read the PF Queue Filter control register */ 4162 val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0); 4163 4164 /* Program required PE hash buckets for the PF */ 4165 val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK; 4166 val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) & 4167 I40E_PFQF_CTL_0_PEHSIZE_MASK; 4168 /* Program required PE contexts for the PF */ 4169 val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK; 4170 val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) & 4171 I40E_PFQF_CTL_0_PEDSIZE_MASK; 4172 4173 /* Program required FCoE hash buckets for the PF */ 4174 val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK; 4175 val |= ((u32)settings->fcoe_filt_num << 4176 I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) & 4177 I40E_PFQF_CTL_0_PFFCHSIZE_MASK; 4178 /* Program required FCoE DDP contexts for the PF */ 4179 val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK; 4180 val |= ((u32)settings->fcoe_cntx_num << 4181 I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) & 4182 I40E_PFQF_CTL_0_PFFCDSIZE_MASK; 4183 4184 /* Program Hash LUT size for the PF */ 4185 val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; 4186 if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512) 4187 hash_lut_size = 1; 4188 val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) & 4189 I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; 4190 4191 /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */ 4192 if (settings->enable_fdir) 4193 val |= I40E_PFQF_CTL_0_FD_ENA_MASK; 4194 if (settings->enable_ethtype) 4195 val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK; 4196 if (settings->enable_macvlan) 4197 val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK; 4198 4199 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val); 4200 4201 return 0; 4202 } 4203 4204 /** 4205 * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter 4206 * @hw: pointer to the hw struct 4207 * @mac_addr: MAC address to use in the filter 4208 * @ethtype: Ethertype to use in the filter 4209 * @flags: Flags that needs to be applied to the filter 4210 * @vsi_seid: seid of the control VSI 4211 * @queue: VSI queue number to send the packet to 4212 * @is_add: Add control packet filter if True else remove 4213 * @stats: Structure to hold information on control filter counts 4214 * @cmd_details: pointer to command details structure or NULL 4215 * 4216 * This command will Add or Remove control packet filter for a control VSI. 4217 * In return it will update the total number of perfect filter count in 4218 * the stats member. 4219 **/ 4220 i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, 4221 u8 *mac_addr, u16 ethtype, u16 flags, 4222 u16 vsi_seid, u16 queue, bool is_add, 4223 struct i40e_control_filter_stats *stats, 4224 struct i40e_asq_cmd_details *cmd_details) 4225 { 4226 struct i40e_aq_desc desc; 4227 struct i40e_aqc_add_remove_control_packet_filter *cmd = 4228 (struct i40e_aqc_add_remove_control_packet_filter *) 4229 &desc.params.raw; 4230 struct i40e_aqc_add_remove_control_packet_filter_completion *resp = 4231 (struct i40e_aqc_add_remove_control_packet_filter_completion *) 4232 &desc.params.raw; 4233 i40e_status status; 4234 4235 if (vsi_seid == 0) 4236 return I40E_ERR_PARAM; 4237 4238 if (is_add) { 4239 i40e_fill_default_direct_cmd_desc(&desc, 4240 i40e_aqc_opc_add_control_packet_filter); 4241 cmd->queue = cpu_to_le16(queue); 4242 } else { 4243 i40e_fill_default_direct_cmd_desc(&desc, 4244 i40e_aqc_opc_remove_control_packet_filter); 4245 } 4246 4247 if (mac_addr) 4248 ether_addr_copy(cmd->mac, mac_addr); 4249 4250 cmd->etype = cpu_to_le16(ethtype); 4251 cmd->flags = cpu_to_le16(flags); 4252 cmd->seid = cpu_to_le16(vsi_seid); 4253 4254 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4255 4256 if (!status && stats) { 4257 stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used); 4258 stats->etype_used = le16_to_cpu(resp->etype_used); 4259 stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free); 4260 stats->etype_free = le16_to_cpu(resp->etype_free); 4261 } 4262 4263 return status; 4264 } 4265 4266 /** 4267 * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control 4268 * @hw: pointer to the hw struct 4269 * @seid: VSI seid to add ethertype filter from 4270 **/ 4271 #define I40E_FLOW_CONTROL_ETHTYPE 0x8808 4272 void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, 4273 u16 seid) 4274 { 4275 u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC | 4276 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP | 4277 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX; 4278 u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE; 4279 i40e_status status; 4280 4281 status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag, 4282 seid, 0, true, NULL, 4283 NULL); 4284 if (status) 4285 hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n"); 4286 } 4287 4288 /** 4289 * i40e_aq_alternate_read 4290 * @hw: pointer to the hardware structure 4291 * @reg_addr0: address of first dword to be read 4292 * @reg_val0: pointer for data read from 'reg_addr0' 4293 * @reg_addr1: address of second dword to be read 4294 * @reg_val1: pointer for data read from 'reg_addr1' 4295 * 4296 * Read one or two dwords from alternate structure. Fields are indicated 4297 * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer 4298 * is not passed then only register at 'reg_addr0' is read. 4299 * 4300 **/ 4301 static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw, 4302 u32 reg_addr0, u32 *reg_val0, 4303 u32 reg_addr1, u32 *reg_val1) 4304 { 4305 struct i40e_aq_desc desc; 4306 struct i40e_aqc_alternate_write *cmd_resp = 4307 (struct i40e_aqc_alternate_write *)&desc.params.raw; 4308 i40e_status status; 4309 4310 if (!reg_val0) 4311 return I40E_ERR_PARAM; 4312 4313 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read); 4314 cmd_resp->address0 = cpu_to_le32(reg_addr0); 4315 cmd_resp->address1 = cpu_to_le32(reg_addr1); 4316 4317 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); 4318 4319 if (!status) { 4320 *reg_val0 = le32_to_cpu(cmd_resp->data0); 4321 4322 if (reg_val1) 4323 *reg_val1 = le32_to_cpu(cmd_resp->data1); 4324 } 4325 4326 return status; 4327 } 4328 4329 /** 4330 * i40e_aq_resume_port_tx 4331 * @hw: pointer to the hardware structure 4332 * @cmd_details: pointer to command details structure or NULL 4333 * 4334 * Resume port's Tx traffic 4335 **/ 4336 i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw, 4337 struct i40e_asq_cmd_details *cmd_details) 4338 { 4339 struct i40e_aq_desc desc; 4340 i40e_status status; 4341 4342 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx); 4343 4344 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4345 4346 return status; 4347 } 4348 4349 /** 4350 * i40e_set_pci_config_data - store PCI bus info 4351 * @hw: pointer to hardware structure 4352 * @link_status: the link status word from PCI config space 4353 * 4354 * Stores the PCI bus info (speed, width, type) within the i40e_hw structure 4355 **/ 4356 void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status) 4357 { 4358 hw->bus.type = i40e_bus_type_pci_express; 4359 4360 switch (link_status & PCI_EXP_LNKSTA_NLW) { 4361 case PCI_EXP_LNKSTA_NLW_X1: 4362 hw->bus.width = i40e_bus_width_pcie_x1; 4363 break; 4364 case PCI_EXP_LNKSTA_NLW_X2: 4365 hw->bus.width = i40e_bus_width_pcie_x2; 4366 break; 4367 case PCI_EXP_LNKSTA_NLW_X4: 4368 hw->bus.width = i40e_bus_width_pcie_x4; 4369 break; 4370 case PCI_EXP_LNKSTA_NLW_X8: 4371 hw->bus.width = i40e_bus_width_pcie_x8; 4372 break; 4373 default: 4374 hw->bus.width = i40e_bus_width_unknown; 4375 break; 4376 } 4377 4378 switch (link_status & PCI_EXP_LNKSTA_CLS) { 4379 case PCI_EXP_LNKSTA_CLS_2_5GB: 4380 hw->bus.speed = i40e_bus_speed_2500; 4381 break; 4382 case PCI_EXP_LNKSTA_CLS_5_0GB: 4383 hw->bus.speed = i40e_bus_speed_5000; 4384 break; 4385 case PCI_EXP_LNKSTA_CLS_8_0GB: 4386 hw->bus.speed = i40e_bus_speed_8000; 4387 break; 4388 default: 4389 hw->bus.speed = i40e_bus_speed_unknown; 4390 break; 4391 } 4392 } 4393 4394 /** 4395 * i40e_aq_debug_dump 4396 * @hw: pointer to the hardware structure 4397 * @cluster_id: specific cluster to dump 4398 * @table_id: table id within cluster 4399 * @start_index: index of line in the block to read 4400 * @buff_size: dump buffer size 4401 * @buff: dump buffer 4402 * @ret_buff_size: actual buffer size returned 4403 * @ret_next_table: next block to read 4404 * @ret_next_index: next index to read 4405 * 4406 * Dump internal FW/HW data for debug purposes. 4407 * 4408 **/ 4409 i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, 4410 u8 table_id, u32 start_index, u16 buff_size, 4411 void *buff, u16 *ret_buff_size, 4412 u8 *ret_next_table, u32 *ret_next_index, 4413 struct i40e_asq_cmd_details *cmd_details) 4414 { 4415 struct i40e_aq_desc desc; 4416 struct i40e_aqc_debug_dump_internals *cmd = 4417 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; 4418 struct i40e_aqc_debug_dump_internals *resp = 4419 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; 4420 i40e_status status; 4421 4422 if (buff_size == 0 || !buff) 4423 return I40E_ERR_PARAM; 4424 4425 i40e_fill_default_direct_cmd_desc(&desc, 4426 i40e_aqc_opc_debug_dump_internals); 4427 /* Indirect Command */ 4428 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 4429 if (buff_size > I40E_AQ_LARGE_BUF) 4430 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 4431 4432 cmd->cluster_id = cluster_id; 4433 cmd->table_id = table_id; 4434 cmd->idx = cpu_to_le32(start_index); 4435 4436 desc.datalen = cpu_to_le16(buff_size); 4437 4438 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 4439 if (!status) { 4440 if (ret_buff_size) 4441 *ret_buff_size = le16_to_cpu(desc.datalen); 4442 if (ret_next_table) 4443 *ret_next_table = resp->table_id; 4444 if (ret_next_index) 4445 *ret_next_index = le32_to_cpu(resp->idx); 4446 } 4447 4448 return status; 4449 } 4450 4451 /** 4452 * i40e_read_bw_from_alt_ram 4453 * @hw: pointer to the hardware structure 4454 * @max_bw: pointer for max_bw read 4455 * @min_bw: pointer for min_bw read 4456 * @min_valid: pointer for bool that is true if min_bw is a valid value 4457 * @max_valid: pointer for bool that is true if max_bw is a valid value 4458 * 4459 * Read bw from the alternate ram for the given pf 4460 **/ 4461 i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw, 4462 u32 *max_bw, u32 *min_bw, 4463 bool *min_valid, bool *max_valid) 4464 { 4465 i40e_status status; 4466 u32 max_bw_addr, min_bw_addr; 4467 4468 /* Calculate the address of the min/max bw registers */ 4469 max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + 4470 I40E_ALT_STRUCT_MAX_BW_OFFSET + 4471 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); 4472 min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + 4473 I40E_ALT_STRUCT_MIN_BW_OFFSET + 4474 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); 4475 4476 /* Read the bandwidths from alt ram */ 4477 status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw, 4478 min_bw_addr, min_bw); 4479 4480 if (*min_bw & I40E_ALT_BW_VALID_MASK) 4481 *min_valid = true; 4482 else 4483 *min_valid = false; 4484 4485 if (*max_bw & I40E_ALT_BW_VALID_MASK) 4486 *max_valid = true; 4487 else 4488 *max_valid = false; 4489 4490 return status; 4491 } 4492 4493 /** 4494 * i40e_aq_configure_partition_bw 4495 * @hw: pointer to the hardware structure 4496 * @bw_data: Buffer holding valid pfs and bw limits 4497 * @cmd_details: pointer to command details 4498 * 4499 * Configure partitions guaranteed/max bw 4500 **/ 4501 i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw, 4502 struct i40e_aqc_configure_partition_bw_data *bw_data, 4503 struct i40e_asq_cmd_details *cmd_details) 4504 { 4505 i40e_status status; 4506 struct i40e_aq_desc desc; 4507 u16 bwd_size = sizeof(*bw_data); 4508 4509 i40e_fill_default_direct_cmd_desc(&desc, 4510 i40e_aqc_opc_configure_partition_bw); 4511 4512 /* Indirect command */ 4513 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 4514 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 4515 4516 if (bwd_size > I40E_AQ_LARGE_BUF) 4517 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 4518 4519 desc.datalen = cpu_to_le16(bwd_size); 4520 4521 status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, 4522 cmd_details); 4523 4524 return status; 4525 } 4526 4527 /** 4528 * i40e_read_phy_register_clause22 4529 * @hw: pointer to the HW structure 4530 * @reg: register address in the page 4531 * @phy_adr: PHY address on MDIO interface 4532 * @value: PHY register value 4533 * 4534 * Reads specified PHY register value 4535 **/ 4536 i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw, 4537 u16 reg, u8 phy_addr, u16 *value) 4538 { 4539 i40e_status status = I40E_ERR_TIMEOUT; 4540 u8 port_num = (u8)hw->func_caps.mdio_port_num; 4541 u32 command = 0; 4542 u16 retry = 1000; 4543 4544 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4545 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4546 (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) | 4547 (I40E_MDIO_CLAUSE22_STCODE_MASK) | 4548 (I40E_GLGEN_MSCA_MDICMD_MASK); 4549 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4550 do { 4551 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4552 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4553 status = 0; 4554 break; 4555 } 4556 udelay(10); 4557 retry--; 4558 } while (retry); 4559 4560 if (status) { 4561 i40e_debug(hw, I40E_DEBUG_PHY, 4562 "PHY: Can't write command to external PHY.\n"); 4563 } else { 4564 command = rd32(hw, I40E_GLGEN_MSRWD(port_num)); 4565 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >> 4566 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT; 4567 } 4568 4569 return status; 4570 } 4571 4572 /** 4573 * i40e_write_phy_register_clause22 4574 * @hw: pointer to the HW structure 4575 * @reg: register address in the page 4576 * @phy_adr: PHY address on MDIO interface 4577 * @value: PHY register value 4578 * 4579 * Writes specified PHY register value 4580 **/ 4581 i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw, 4582 u16 reg, u8 phy_addr, u16 value) 4583 { 4584 i40e_status status = I40E_ERR_TIMEOUT; 4585 u8 port_num = (u8)hw->func_caps.mdio_port_num; 4586 u32 command = 0; 4587 u16 retry = 1000; 4588 4589 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT; 4590 wr32(hw, I40E_GLGEN_MSRWD(port_num), command); 4591 4592 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4593 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4594 (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) | 4595 (I40E_MDIO_CLAUSE22_STCODE_MASK) | 4596 (I40E_GLGEN_MSCA_MDICMD_MASK); 4597 4598 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4599 do { 4600 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4601 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4602 status = 0; 4603 break; 4604 } 4605 udelay(10); 4606 retry--; 4607 } while (retry); 4608 4609 return status; 4610 } 4611 4612 /** 4613 * i40e_read_phy_register_clause45 4614 * @hw: pointer to the HW structure 4615 * @page: registers page number 4616 * @reg: register address in the page 4617 * @phy_adr: PHY address on MDIO interface 4618 * @value: PHY register value 4619 * 4620 * Reads specified PHY register value 4621 **/ 4622 i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw, 4623 u8 page, u16 reg, u8 phy_addr, u16 *value) 4624 { 4625 i40e_status status = I40E_ERR_TIMEOUT; 4626 u32 command = 0; 4627 u16 retry = 1000; 4628 u8 port_num = hw->func_caps.mdio_port_num; 4629 4630 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | 4631 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4632 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4633 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) | 4634 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4635 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4636 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4637 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4638 do { 4639 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4640 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4641 status = 0; 4642 break; 4643 } 4644 usleep_range(10, 20); 4645 retry--; 4646 } while (retry); 4647 4648 if (status) { 4649 i40e_debug(hw, I40E_DEBUG_PHY, 4650 "PHY: Can't write command to external PHY.\n"); 4651 goto phy_read_end; 4652 } 4653 4654 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4655 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4656 (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) | 4657 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4658 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4659 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4660 status = I40E_ERR_TIMEOUT; 4661 retry = 1000; 4662 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4663 do { 4664 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4665 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4666 status = 0; 4667 break; 4668 } 4669 usleep_range(10, 20); 4670 retry--; 4671 } while (retry); 4672 4673 if (!status) { 4674 command = rd32(hw, I40E_GLGEN_MSRWD(port_num)); 4675 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >> 4676 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT; 4677 } else { 4678 i40e_debug(hw, I40E_DEBUG_PHY, 4679 "PHY: Can't read register value from external PHY.\n"); 4680 } 4681 4682 phy_read_end: 4683 return status; 4684 } 4685 4686 /** 4687 * i40e_write_phy_register_clause45 4688 * @hw: pointer to the HW structure 4689 * @page: registers page number 4690 * @reg: register address in the page 4691 * @phy_adr: PHY address on MDIO interface 4692 * @value: PHY register value 4693 * 4694 * Writes value to specified PHY register 4695 **/ 4696 i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw, 4697 u8 page, u16 reg, u8 phy_addr, u16 value) 4698 { 4699 i40e_status status = I40E_ERR_TIMEOUT; 4700 u32 command = 0; 4701 u16 retry = 1000; 4702 u8 port_num = hw->func_caps.mdio_port_num; 4703 4704 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | 4705 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4706 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4707 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) | 4708 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4709 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4710 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4711 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4712 do { 4713 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4714 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4715 status = 0; 4716 break; 4717 } 4718 usleep_range(10, 20); 4719 retry--; 4720 } while (retry); 4721 if (status) { 4722 i40e_debug(hw, I40E_DEBUG_PHY, 4723 "PHY: Can't write command to external PHY.\n"); 4724 goto phy_write_end; 4725 } 4726 4727 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT; 4728 wr32(hw, I40E_GLGEN_MSRWD(port_num), command); 4729 4730 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4731 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4732 (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) | 4733 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4734 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4735 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4736 status = I40E_ERR_TIMEOUT; 4737 retry = 1000; 4738 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4739 do { 4740 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4741 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4742 status = 0; 4743 break; 4744 } 4745 usleep_range(10, 20); 4746 retry--; 4747 } while (retry); 4748 4749 phy_write_end: 4750 return status; 4751 } 4752 4753 /** 4754 * i40e_write_phy_register 4755 * @hw: pointer to the HW structure 4756 * @page: registers page number 4757 * @reg: register address in the page 4758 * @phy_adr: PHY address on MDIO interface 4759 * @value: PHY register value 4760 * 4761 * Writes value to specified PHY register 4762 **/ 4763 i40e_status i40e_write_phy_register(struct i40e_hw *hw, 4764 u8 page, u16 reg, u8 phy_addr, u16 value) 4765 { 4766 i40e_status status; 4767 4768 switch (hw->device_id) { 4769 case I40E_DEV_ID_1G_BASE_T_X722: 4770 status = i40e_write_phy_register_clause22(hw, reg, phy_addr, 4771 value); 4772 break; 4773 case I40E_DEV_ID_10G_BASE_T: 4774 case I40E_DEV_ID_10G_BASE_T4: 4775 case I40E_DEV_ID_10G_BASE_T_X722: 4776 case I40E_DEV_ID_25G_B: 4777 case I40E_DEV_ID_25G_SFP28: 4778 status = i40e_write_phy_register_clause45(hw, page, reg, 4779 phy_addr, value); 4780 break; 4781 default: 4782 status = I40E_ERR_UNKNOWN_PHY; 4783 break; 4784 } 4785 4786 return status; 4787 } 4788 4789 /** 4790 * i40e_read_phy_register 4791 * @hw: pointer to the HW structure 4792 * @page: registers page number 4793 * @reg: register address in the page 4794 * @phy_adr: PHY address on MDIO interface 4795 * @value: PHY register value 4796 * 4797 * Reads specified PHY register value 4798 **/ 4799 i40e_status i40e_read_phy_register(struct i40e_hw *hw, 4800 u8 page, u16 reg, u8 phy_addr, u16 *value) 4801 { 4802 i40e_status status; 4803 4804 switch (hw->device_id) { 4805 case I40E_DEV_ID_1G_BASE_T_X722: 4806 status = i40e_read_phy_register_clause22(hw, reg, phy_addr, 4807 value); 4808 break; 4809 case I40E_DEV_ID_10G_BASE_T: 4810 case I40E_DEV_ID_10G_BASE_T4: 4811 case I40E_DEV_ID_10G_BASE_T_X722: 4812 case I40E_DEV_ID_25G_B: 4813 case I40E_DEV_ID_25G_SFP28: 4814 status = i40e_read_phy_register_clause45(hw, page, reg, 4815 phy_addr, value); 4816 break; 4817 default: 4818 status = I40E_ERR_UNKNOWN_PHY; 4819 break; 4820 } 4821 4822 return status; 4823 } 4824 4825 /** 4826 * i40e_get_phy_address 4827 * @hw: pointer to the HW structure 4828 * @dev_num: PHY port num that address we want 4829 * @phy_addr: Returned PHY address 4830 * 4831 * Gets PHY address for current port 4832 **/ 4833 u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num) 4834 { 4835 u8 port_num = hw->func_caps.mdio_port_num; 4836 u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num)); 4837 4838 return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f; 4839 } 4840 4841 /** 4842 * i40e_blink_phy_led 4843 * @hw: pointer to the HW structure 4844 * @time: time how long led will blinks in secs 4845 * @interval: gap between LED on and off in msecs 4846 * 4847 * Blinks PHY link LED 4848 **/ 4849 i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw, 4850 u32 time, u32 interval) 4851 { 4852 i40e_status status = 0; 4853 u32 i; 4854 u16 led_ctl; 4855 u16 gpio_led_port; 4856 u16 led_reg; 4857 u16 led_addr = I40E_PHY_LED_PROV_REG_1; 4858 u8 phy_addr = 0; 4859 u8 port_num; 4860 4861 i = rd32(hw, I40E_PFGEN_PORTNUM); 4862 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 4863 phy_addr = i40e_get_phy_address(hw, port_num); 4864 4865 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, 4866 led_addr++) { 4867 status = i40e_read_phy_register_clause45(hw, 4868 I40E_PHY_COM_REG_PAGE, 4869 led_addr, phy_addr, 4870 &led_reg); 4871 if (status) 4872 goto phy_blinking_end; 4873 led_ctl = led_reg; 4874 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { 4875 led_reg = 0; 4876 status = i40e_write_phy_register_clause45(hw, 4877 I40E_PHY_COM_REG_PAGE, 4878 led_addr, phy_addr, 4879 led_reg); 4880 if (status) 4881 goto phy_blinking_end; 4882 break; 4883 } 4884 } 4885 4886 if (time > 0 && interval > 0) { 4887 for (i = 0; i < time * 1000; i += interval) { 4888 status = i40e_read_phy_register_clause45(hw, 4889 I40E_PHY_COM_REG_PAGE, 4890 led_addr, phy_addr, &led_reg); 4891 if (status) 4892 goto restore_config; 4893 if (led_reg & I40E_PHY_LED_MANUAL_ON) 4894 led_reg = 0; 4895 else 4896 led_reg = I40E_PHY_LED_MANUAL_ON; 4897 status = i40e_write_phy_register_clause45(hw, 4898 I40E_PHY_COM_REG_PAGE, 4899 led_addr, phy_addr, led_reg); 4900 if (status) 4901 goto restore_config; 4902 msleep(interval); 4903 } 4904 } 4905 4906 restore_config: 4907 status = i40e_write_phy_register_clause45(hw, 4908 I40E_PHY_COM_REG_PAGE, 4909 led_addr, phy_addr, led_ctl); 4910 4911 phy_blinking_end: 4912 return status; 4913 } 4914 4915 /** 4916 * i40e_led_get_reg - read LED register 4917 * @hw: pointer to the HW structure 4918 * @led_addr: LED register address 4919 * @reg_val: read register value 4920 **/ 4921 static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr, 4922 u32 *reg_val) 4923 { 4924 enum i40e_status_code status; 4925 u8 phy_addr = 0; 4926 u8 port_num; 4927 u32 i; 4928 4929 *reg_val = 0; 4930 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 4931 status = 4932 i40e_aq_get_phy_register(hw, 4933 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 4934 I40E_PHY_COM_REG_PAGE, 4935 I40E_PHY_LED_PROV_REG_1, 4936 reg_val, NULL); 4937 } else { 4938 i = rd32(hw, I40E_PFGEN_PORTNUM); 4939 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 4940 phy_addr = i40e_get_phy_address(hw, port_num); 4941 status = i40e_read_phy_register_clause45(hw, 4942 I40E_PHY_COM_REG_PAGE, 4943 led_addr, phy_addr, 4944 (u16 *)reg_val); 4945 } 4946 return status; 4947 } 4948 4949 /** 4950 * i40e_led_set_reg - write LED register 4951 * @hw: pointer to the HW structure 4952 * @led_addr: LED register address 4953 * @reg_val: register value to write 4954 **/ 4955 static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr, 4956 u32 reg_val) 4957 { 4958 enum i40e_status_code status; 4959 u8 phy_addr = 0; 4960 u8 port_num; 4961 u32 i; 4962 4963 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 4964 status = 4965 i40e_aq_set_phy_register(hw, 4966 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 4967 I40E_PHY_COM_REG_PAGE, 4968 I40E_PHY_LED_PROV_REG_1, 4969 reg_val, NULL); 4970 } else { 4971 i = rd32(hw, I40E_PFGEN_PORTNUM); 4972 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 4973 phy_addr = i40e_get_phy_address(hw, port_num); 4974 status = i40e_write_phy_register_clause45(hw, 4975 I40E_PHY_COM_REG_PAGE, 4976 led_addr, phy_addr, 4977 (u16)reg_val); 4978 } 4979 4980 return status; 4981 } 4982 4983 /** 4984 * i40e_led_get_phy - return current on/off mode 4985 * @hw: pointer to the hw struct 4986 * @led_addr: address of led register to use 4987 * @val: original value of register to use 4988 * 4989 **/ 4990 i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, 4991 u16 *val) 4992 { 4993 i40e_status status = 0; 4994 u16 gpio_led_port; 4995 u8 phy_addr = 0; 4996 u16 reg_val; 4997 u16 temp_addr; 4998 u8 port_num; 4999 u32 i; 5000 u32 reg_val_aq; 5001 5002 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 5003 status = 5004 i40e_aq_get_phy_register(hw, 5005 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 5006 I40E_PHY_COM_REG_PAGE, 5007 I40E_PHY_LED_PROV_REG_1, 5008 ®_val_aq, NULL); 5009 if (status == I40E_SUCCESS) 5010 *val = (u16)reg_val_aq; 5011 return status; 5012 } 5013 temp_addr = I40E_PHY_LED_PROV_REG_1; 5014 i = rd32(hw, I40E_PFGEN_PORTNUM); 5015 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5016 phy_addr = i40e_get_phy_address(hw, port_num); 5017 5018 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, 5019 temp_addr++) { 5020 status = i40e_read_phy_register_clause45(hw, 5021 I40E_PHY_COM_REG_PAGE, 5022 temp_addr, phy_addr, 5023 ®_val); 5024 if (status) 5025 return status; 5026 *val = reg_val; 5027 if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) { 5028 *led_addr = temp_addr; 5029 break; 5030 } 5031 } 5032 return status; 5033 } 5034 5035 /** 5036 * i40e_led_set_phy 5037 * @hw: pointer to the HW structure 5038 * @on: true or false 5039 * @mode: original val plus bit for set or ignore 5040 * Set led's on or off when controlled by the PHY 5041 * 5042 **/ 5043 i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on, 5044 u16 led_addr, u32 mode) 5045 { 5046 i40e_status status = 0; 5047 u32 led_ctl = 0; 5048 u32 led_reg = 0; 5049 5050 status = i40e_led_get_reg(hw, led_addr, &led_reg); 5051 if (status) 5052 return status; 5053 led_ctl = led_reg; 5054 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { 5055 led_reg = 0; 5056 status = i40e_led_set_reg(hw, led_addr, led_reg); 5057 if (status) 5058 return status; 5059 } 5060 status = i40e_led_get_reg(hw, led_addr, &led_reg); 5061 if (status) 5062 goto restore_config; 5063 if (on) 5064 led_reg = I40E_PHY_LED_MANUAL_ON; 5065 else 5066 led_reg = 0; 5067 5068 status = i40e_led_set_reg(hw, led_addr, led_reg); 5069 if (status) 5070 goto restore_config; 5071 if (mode & I40E_PHY_LED_MODE_ORIG) { 5072 led_ctl = (mode & I40E_PHY_LED_MODE_MASK); 5073 status = i40e_led_set_reg(hw, led_addr, led_ctl); 5074 } 5075 return status; 5076 5077 restore_config: 5078 status = i40e_led_set_reg(hw, led_addr, led_ctl); 5079 return status; 5080 } 5081 5082 /** 5083 * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register 5084 * @hw: pointer to the hw struct 5085 * @reg_addr: register address 5086 * @reg_val: ptr to register value 5087 * @cmd_details: pointer to command details structure or NULL 5088 * 5089 * Use the firmware to read the Rx control register, 5090 * especially useful if the Rx unit is under heavy pressure 5091 **/ 5092 i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, 5093 u32 reg_addr, u32 *reg_val, 5094 struct i40e_asq_cmd_details *cmd_details) 5095 { 5096 struct i40e_aq_desc desc; 5097 struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp = 5098 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; 5099 i40e_status status; 5100 5101 if (!reg_val) 5102 return I40E_ERR_PARAM; 5103 5104 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read); 5105 5106 cmd_resp->address = cpu_to_le32(reg_addr); 5107 5108 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5109 5110 if (status == 0) 5111 *reg_val = le32_to_cpu(cmd_resp->value); 5112 5113 return status; 5114 } 5115 5116 /** 5117 * i40e_read_rx_ctl - read from an Rx control register 5118 * @hw: pointer to the hw struct 5119 * @reg_addr: register address 5120 **/ 5121 u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr) 5122 { 5123 i40e_status status = 0; 5124 bool use_register; 5125 int retry = 5; 5126 u32 val = 0; 5127 5128 use_register = (((hw->aq.api_maj_ver == 1) && 5129 (hw->aq.api_min_ver < 5)) || 5130 (hw->mac.type == I40E_MAC_X722)); 5131 if (!use_register) { 5132 do_retry: 5133 status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL); 5134 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { 5135 usleep_range(1000, 2000); 5136 retry--; 5137 goto do_retry; 5138 } 5139 } 5140 5141 /* if the AQ access failed, try the old-fashioned way */ 5142 if (status || use_register) 5143 val = rd32(hw, reg_addr); 5144 5145 return val; 5146 } 5147 5148 /** 5149 * i40e_aq_rx_ctl_write_register 5150 * @hw: pointer to the hw struct 5151 * @reg_addr: register address 5152 * @reg_val: register value 5153 * @cmd_details: pointer to command details structure or NULL 5154 * 5155 * Use the firmware to write to an Rx control register, 5156 * especially useful if the Rx unit is under heavy pressure 5157 **/ 5158 i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, 5159 u32 reg_addr, u32 reg_val, 5160 struct i40e_asq_cmd_details *cmd_details) 5161 { 5162 struct i40e_aq_desc desc; 5163 struct i40e_aqc_rx_ctl_reg_read_write *cmd = 5164 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; 5165 i40e_status status; 5166 5167 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write); 5168 5169 cmd->address = cpu_to_le32(reg_addr); 5170 cmd->value = cpu_to_le32(reg_val); 5171 5172 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5173 5174 return status; 5175 } 5176 5177 /** 5178 * i40e_write_rx_ctl - write to an Rx control register 5179 * @hw: pointer to the hw struct 5180 * @reg_addr: register address 5181 * @reg_val: register value 5182 **/ 5183 void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) 5184 { 5185 i40e_status status = 0; 5186 bool use_register; 5187 int retry = 5; 5188 5189 use_register = (((hw->aq.api_maj_ver == 1) && 5190 (hw->aq.api_min_ver < 5)) || 5191 (hw->mac.type == I40E_MAC_X722)); 5192 if (!use_register) { 5193 do_retry: 5194 status = i40e_aq_rx_ctl_write_register(hw, reg_addr, 5195 reg_val, NULL); 5196 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { 5197 usleep_range(1000, 2000); 5198 retry--; 5199 goto do_retry; 5200 } 5201 } 5202 5203 /* if the AQ access failed, try the old-fashioned way */ 5204 if (status || use_register) 5205 wr32(hw, reg_addr, reg_val); 5206 } 5207 5208 /** 5209 * i40e_aq_set_phy_register 5210 * @hw: pointer to the hw struct 5211 * @phy_select: select which phy should be accessed 5212 * @dev_addr: PHY device address 5213 * @reg_addr: PHY register address 5214 * @reg_val: new register value 5215 * @cmd_details: pointer to command details structure or NULL 5216 * 5217 * Write the external PHY register. 5218 **/ 5219 i40e_status i40e_aq_set_phy_register(struct i40e_hw *hw, 5220 u8 phy_select, u8 dev_addr, 5221 u32 reg_addr, u32 reg_val, 5222 struct i40e_asq_cmd_details *cmd_details) 5223 { 5224 struct i40e_aq_desc desc; 5225 struct i40e_aqc_phy_register_access *cmd = 5226 (struct i40e_aqc_phy_register_access *)&desc.params.raw; 5227 i40e_status status; 5228 5229 i40e_fill_default_direct_cmd_desc(&desc, 5230 i40e_aqc_opc_set_phy_register); 5231 5232 cmd->phy_interface = phy_select; 5233 cmd->dev_address = dev_addr; 5234 cmd->reg_address = cpu_to_le32(reg_addr); 5235 cmd->reg_value = cpu_to_le32(reg_val); 5236 5237 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5238 5239 return status; 5240 } 5241 5242 /** 5243 * i40e_aq_get_phy_register 5244 * @hw: pointer to the hw struct 5245 * @phy_select: select which phy should be accessed 5246 * @dev_addr: PHY device address 5247 * @reg_addr: PHY register address 5248 * @reg_val: read register value 5249 * @cmd_details: pointer to command details structure or NULL 5250 * 5251 * Read the external PHY register. 5252 **/ 5253 i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw, 5254 u8 phy_select, u8 dev_addr, 5255 u32 reg_addr, u32 *reg_val, 5256 struct i40e_asq_cmd_details *cmd_details) 5257 { 5258 struct i40e_aq_desc desc; 5259 struct i40e_aqc_phy_register_access *cmd = 5260 (struct i40e_aqc_phy_register_access *)&desc.params.raw; 5261 i40e_status status; 5262 5263 i40e_fill_default_direct_cmd_desc(&desc, 5264 i40e_aqc_opc_get_phy_register); 5265 5266 cmd->phy_interface = phy_select; 5267 cmd->dev_address = dev_addr; 5268 cmd->reg_address = cpu_to_le32(reg_addr); 5269 5270 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5271 if (!status) 5272 *reg_val = le32_to_cpu(cmd->reg_value); 5273 5274 return status; 5275 } 5276 5277 /** 5278 * i40e_aq_write_ddp - Write dynamic device personalization (ddp) 5279 * @hw: pointer to the hw struct 5280 * @buff: command buffer (size in bytes = buff_size) 5281 * @buff_size: buffer size in bytes 5282 * @track_id: package tracking id 5283 * @error_offset: returns error offset 5284 * @error_info: returns error information 5285 * @cmd_details: pointer to command details structure or NULL 5286 **/ 5287 enum 5288 i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff, 5289 u16 buff_size, u32 track_id, 5290 u32 *error_offset, u32 *error_info, 5291 struct i40e_asq_cmd_details *cmd_details) 5292 { 5293 struct i40e_aq_desc desc; 5294 struct i40e_aqc_write_personalization_profile *cmd = 5295 (struct i40e_aqc_write_personalization_profile *) 5296 &desc.params.raw; 5297 struct i40e_aqc_write_ddp_resp *resp; 5298 i40e_status status; 5299 5300 i40e_fill_default_direct_cmd_desc(&desc, 5301 i40e_aqc_opc_write_personalization_profile); 5302 5303 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); 5304 if (buff_size > I40E_AQ_LARGE_BUF) 5305 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 5306 5307 desc.datalen = cpu_to_le16(buff_size); 5308 5309 cmd->profile_track_id = cpu_to_le32(track_id); 5310 5311 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 5312 if (!status) { 5313 resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw; 5314 if (error_offset) 5315 *error_offset = le32_to_cpu(resp->error_offset); 5316 if (error_info) 5317 *error_info = le32_to_cpu(resp->error_info); 5318 } 5319 5320 return status; 5321 } 5322 5323 /** 5324 * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp) 5325 * @hw: pointer to the hw struct 5326 * @buff: command buffer (size in bytes = buff_size) 5327 * @buff_size: buffer size in bytes 5328 * @cmd_details: pointer to command details structure or NULL 5329 **/ 5330 enum 5331 i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff, 5332 u16 buff_size, u8 flags, 5333 struct i40e_asq_cmd_details *cmd_details) 5334 { 5335 struct i40e_aq_desc desc; 5336 struct i40e_aqc_get_applied_profiles *cmd = 5337 (struct i40e_aqc_get_applied_profiles *)&desc.params.raw; 5338 i40e_status status; 5339 5340 i40e_fill_default_direct_cmd_desc(&desc, 5341 i40e_aqc_opc_get_personalization_profile_list); 5342 5343 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 5344 if (buff_size > I40E_AQ_LARGE_BUF) 5345 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 5346 desc.datalen = cpu_to_le16(buff_size); 5347 5348 cmd->flags = flags; 5349 5350 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 5351 5352 return status; 5353 } 5354 5355 /** 5356 * i40e_find_segment_in_package 5357 * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E) 5358 * @pkg_hdr: pointer to the package header to be searched 5359 * 5360 * This function searches a package file for a particular segment type. On 5361 * success it returns a pointer to the segment header, otherwise it will 5362 * return NULL. 5363 **/ 5364 struct i40e_generic_seg_header * 5365 i40e_find_segment_in_package(u32 segment_type, 5366 struct i40e_package_header *pkg_hdr) 5367 { 5368 struct i40e_generic_seg_header *segment; 5369 u32 i; 5370 5371 /* Search all package segments for the requested segment type */ 5372 for (i = 0; i < pkg_hdr->segment_count; i++) { 5373 segment = 5374 (struct i40e_generic_seg_header *)((u8 *)pkg_hdr + 5375 pkg_hdr->segment_offset[i]); 5376 5377 if (segment->type == segment_type) 5378 return segment; 5379 } 5380 5381 return NULL; 5382 } 5383 5384 /** 5385 * i40e_write_profile 5386 * @hw: pointer to the hardware structure 5387 * @profile: pointer to the profile segment of the package to be downloaded 5388 * @track_id: package tracking id 5389 * 5390 * Handles the download of a complete package. 5391 */ 5392 enum i40e_status_code 5393 i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, 5394 u32 track_id) 5395 { 5396 i40e_status status = 0; 5397 struct i40e_section_table *sec_tbl; 5398 struct i40e_profile_section_header *sec = NULL; 5399 u32 dev_cnt; 5400 u32 vendor_dev_id; 5401 u32 *nvm; 5402 u32 section_size = 0; 5403 u32 offset = 0, info = 0; 5404 u32 i; 5405 5406 dev_cnt = profile->device_table_count; 5407 5408 for (i = 0; i < dev_cnt; i++) { 5409 vendor_dev_id = profile->device_table[i].vendor_dev_id; 5410 if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL) 5411 if (hw->device_id == (vendor_dev_id & 0xFFFF)) 5412 break; 5413 } 5414 if (i == dev_cnt) { 5415 i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP"); 5416 return I40E_ERR_DEVICE_NOT_SUPPORTED; 5417 } 5418 5419 nvm = (u32 *)&profile->device_table[dev_cnt]; 5420 sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; 5421 5422 for (i = 0; i < sec_tbl->section_count; i++) { 5423 sec = (struct i40e_profile_section_header *)((u8 *)profile + 5424 sec_tbl->section_offset[i]); 5425 5426 /* Skip 'AQ', 'note' and 'name' sections */ 5427 if (sec->section.type != SECTION_TYPE_MMIO) 5428 continue; 5429 5430 section_size = sec->section.size + 5431 sizeof(struct i40e_profile_section_header); 5432 5433 /* Write profile */ 5434 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size, 5435 track_id, &offset, &info, NULL); 5436 if (status) { 5437 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5438 "Failed to write profile: offset %d, info %d", 5439 offset, info); 5440 break; 5441 } 5442 } 5443 return status; 5444 } 5445 5446 /** 5447 * i40e_add_pinfo_to_list 5448 * @hw: pointer to the hardware structure 5449 * @profile: pointer to the profile segment of the package 5450 * @profile_info_sec: buffer for information section 5451 * @track_id: package tracking id 5452 * 5453 * Register a profile to the list of loaded profiles. 5454 */ 5455 enum i40e_status_code 5456 i40e_add_pinfo_to_list(struct i40e_hw *hw, 5457 struct i40e_profile_segment *profile, 5458 u8 *profile_info_sec, u32 track_id) 5459 { 5460 i40e_status status = 0; 5461 struct i40e_profile_section_header *sec = NULL; 5462 struct i40e_profile_info *pinfo; 5463 u32 offset = 0, info = 0; 5464 5465 sec = (struct i40e_profile_section_header *)profile_info_sec; 5466 sec->tbl_size = 1; 5467 sec->data_end = sizeof(struct i40e_profile_section_header) + 5468 sizeof(struct i40e_profile_info); 5469 sec->section.type = SECTION_TYPE_INFO; 5470 sec->section.offset = sizeof(struct i40e_profile_section_header); 5471 sec->section.size = sizeof(struct i40e_profile_info); 5472 pinfo = (struct i40e_profile_info *)(profile_info_sec + 5473 sec->section.offset); 5474 pinfo->track_id = track_id; 5475 pinfo->version = profile->version; 5476 pinfo->op = I40E_DDP_ADD_TRACKID; 5477 memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE); 5478 5479 status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end, 5480 track_id, &offset, &info, NULL); 5481 5482 return status; 5483 } 5484 5485 /** 5486 * i40e_aq_add_cloud_filters 5487 * @hw: pointer to the hardware structure 5488 * @seid: VSI seid to add cloud filters from 5489 * @filters: Buffer which contains the filters to be added 5490 * @filter_count: number of filters contained in the buffer 5491 * 5492 * Set the cloud filters for a given VSI. The contents of the 5493 * i40e_aqc_cloud_filters_element_data are filled in by the caller 5494 * of the function. 5495 * 5496 **/ 5497 enum i40e_status_code 5498 i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid, 5499 struct i40e_aqc_cloud_filters_element_data *filters, 5500 u8 filter_count) 5501 { 5502 struct i40e_aq_desc desc; 5503 struct i40e_aqc_add_remove_cloud_filters *cmd = 5504 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5505 enum i40e_status_code status; 5506 u16 buff_len; 5507 5508 i40e_fill_default_direct_cmd_desc(&desc, 5509 i40e_aqc_opc_add_cloud_filters); 5510 5511 buff_len = filter_count * sizeof(*filters); 5512 desc.datalen = cpu_to_le16(buff_len); 5513 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 5514 cmd->num_filters = filter_count; 5515 cmd->seid = cpu_to_le16(seid); 5516 5517 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 5518 5519 return status; 5520 } 5521 5522 /** 5523 * i40e_aq_add_cloud_filters_bb 5524 * @hw: pointer to the hardware structure 5525 * @seid: VSI seid to add cloud filters from 5526 * @filters: Buffer which contains the filters in big buffer to be added 5527 * @filter_count: number of filters contained in the buffer 5528 * 5529 * Set the big buffer cloud filters for a given VSI. The contents of the 5530 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the 5531 * function. 5532 * 5533 **/ 5534 i40e_status 5535 i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid, 5536 struct i40e_aqc_cloud_filters_element_bb *filters, 5537 u8 filter_count) 5538 { 5539 struct i40e_aq_desc desc; 5540 struct i40e_aqc_add_remove_cloud_filters *cmd = 5541 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5542 i40e_status status; 5543 u16 buff_len; 5544 int i; 5545 5546 i40e_fill_default_direct_cmd_desc(&desc, 5547 i40e_aqc_opc_add_cloud_filters); 5548 5549 buff_len = filter_count * sizeof(*filters); 5550 desc.datalen = cpu_to_le16(buff_len); 5551 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 5552 cmd->num_filters = filter_count; 5553 cmd->seid = cpu_to_le16(seid); 5554 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB; 5555 5556 for (i = 0; i < filter_count; i++) { 5557 u16 tnl_type; 5558 u32 ti; 5559 5560 tnl_type = (le16_to_cpu(filters[i].element.flags) & 5561 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> 5562 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; 5563 5564 /* Due to hardware eccentricities, the VNI for Geneve is shifted 5565 * one more byte further than normally used for Tenant ID in 5566 * other tunnel types. 5567 */ 5568 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { 5569 ti = le32_to_cpu(filters[i].element.tenant_id); 5570 filters[i].element.tenant_id = cpu_to_le32(ti << 8); 5571 } 5572 } 5573 5574 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 5575 5576 return status; 5577 } 5578 5579 /** 5580 * i40e_aq_rem_cloud_filters 5581 * @hw: pointer to the hardware structure 5582 * @seid: VSI seid to remove cloud filters from 5583 * @filters: Buffer which contains the filters to be removed 5584 * @filter_count: number of filters contained in the buffer 5585 * 5586 * Remove the cloud filters for a given VSI. The contents of the 5587 * i40e_aqc_cloud_filters_element_data are filled in by the caller 5588 * of the function. 5589 * 5590 **/ 5591 enum i40e_status_code 5592 i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid, 5593 struct i40e_aqc_cloud_filters_element_data *filters, 5594 u8 filter_count) 5595 { 5596 struct i40e_aq_desc desc; 5597 struct i40e_aqc_add_remove_cloud_filters *cmd = 5598 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5599 enum i40e_status_code status; 5600 u16 buff_len; 5601 5602 i40e_fill_default_direct_cmd_desc(&desc, 5603 i40e_aqc_opc_remove_cloud_filters); 5604 5605 buff_len = filter_count * sizeof(*filters); 5606 desc.datalen = cpu_to_le16(buff_len); 5607 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 5608 cmd->num_filters = filter_count; 5609 cmd->seid = cpu_to_le16(seid); 5610 5611 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 5612 5613 return status; 5614 } 5615 5616 /** 5617 * i40e_aq_rem_cloud_filters_bb 5618 * @hw: pointer to the hardware structure 5619 * @seid: VSI seid to remove cloud filters from 5620 * @filters: Buffer which contains the filters in big buffer to be removed 5621 * @filter_count: number of filters contained in the buffer 5622 * 5623 * Remove the big buffer cloud filters for a given VSI. The contents of the 5624 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the 5625 * function. 5626 * 5627 **/ 5628 i40e_status 5629 i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid, 5630 struct i40e_aqc_cloud_filters_element_bb *filters, 5631 u8 filter_count) 5632 { 5633 struct i40e_aq_desc desc; 5634 struct i40e_aqc_add_remove_cloud_filters *cmd = 5635 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5636 i40e_status status; 5637 u16 buff_len; 5638 int i; 5639 5640 i40e_fill_default_direct_cmd_desc(&desc, 5641 i40e_aqc_opc_remove_cloud_filters); 5642 5643 buff_len = filter_count * sizeof(*filters); 5644 desc.datalen = cpu_to_le16(buff_len); 5645 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 5646 cmd->num_filters = filter_count; 5647 cmd->seid = cpu_to_le16(seid); 5648 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB; 5649 5650 for (i = 0; i < filter_count; i++) { 5651 u16 tnl_type; 5652 u32 ti; 5653 5654 tnl_type = (le16_to_cpu(filters[i].element.flags) & 5655 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> 5656 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; 5657 5658 /* Due to hardware eccentricities, the VNI for Geneve is shifted 5659 * one more byte further than normally used for Tenant ID in 5660 * other tunnel types. 5661 */ 5662 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { 5663 ti = le32_to_cpu(filters[i].element.tenant_id); 5664 filters[i].element.tenant_id = cpu_to_le32(ti << 8); 5665 } 5666 } 5667 5668 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 5669 5670 return status; 5671 } 5672