1 /******************************************************************************* 2 * 3 * Intel Ethernet Controller XL710 Family Linux Driver 4 * Copyright(c) 2013 - 2016 Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along 16 * with this program. If not, see <http://www.gnu.org/licenses/>. 17 * 18 * The full GNU General Public License is included in this distribution in 19 * the file called "COPYING". 20 * 21 * Contact Information: 22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 * 25 ******************************************************************************/ 26 27 /* ethtool support for i40e */ 28 29 #include "i40e.h" 30 #include "i40e_diag.h" 31 32 struct i40e_stats { 33 char stat_string[ETH_GSTRING_LEN]; 34 int sizeof_stat; 35 int stat_offset; 36 }; 37 38 #define I40E_STAT(_type, _name, _stat) { \ 39 .stat_string = _name, \ 40 .sizeof_stat = FIELD_SIZEOF(_type, _stat), \ 41 .stat_offset = offsetof(_type, _stat) \ 42 } 43 44 #define I40E_NETDEV_STAT(_net_stat) \ 45 I40E_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat) 46 #define I40E_PF_STAT(_name, _stat) \ 47 I40E_STAT(struct i40e_pf, _name, _stat) 48 #define I40E_VSI_STAT(_name, _stat) \ 49 I40E_STAT(struct i40e_vsi, _name, _stat) 50 #define I40E_VEB_STAT(_name, _stat) \ 51 I40E_STAT(struct i40e_veb, _name, _stat) 52 53 static const struct i40e_stats i40e_gstrings_net_stats[] = { 54 I40E_NETDEV_STAT(rx_packets), 55 I40E_NETDEV_STAT(tx_packets), 56 I40E_NETDEV_STAT(rx_bytes), 57 I40E_NETDEV_STAT(tx_bytes), 58 I40E_NETDEV_STAT(rx_errors), 59 I40E_NETDEV_STAT(tx_errors), 60 I40E_NETDEV_STAT(rx_dropped), 61 I40E_NETDEV_STAT(tx_dropped), 62 I40E_NETDEV_STAT(collisions), 63 I40E_NETDEV_STAT(rx_length_errors), 64 I40E_NETDEV_STAT(rx_crc_errors), 65 }; 66 67 static const struct i40e_stats i40e_gstrings_veb_stats[] = { 68 I40E_VEB_STAT("rx_bytes", stats.rx_bytes), 69 I40E_VEB_STAT("tx_bytes", stats.tx_bytes), 70 I40E_VEB_STAT("rx_unicast", stats.rx_unicast), 71 I40E_VEB_STAT("tx_unicast", stats.tx_unicast), 72 I40E_VEB_STAT("rx_multicast", stats.rx_multicast), 73 I40E_VEB_STAT("tx_multicast", stats.tx_multicast), 74 I40E_VEB_STAT("rx_broadcast", stats.rx_broadcast), 75 I40E_VEB_STAT("tx_broadcast", stats.tx_broadcast), 76 I40E_VEB_STAT("rx_discards", stats.rx_discards), 77 I40E_VEB_STAT("tx_discards", stats.tx_discards), 78 I40E_VEB_STAT("tx_errors", stats.tx_errors), 79 I40E_VEB_STAT("rx_unknown_protocol", stats.rx_unknown_protocol), 80 }; 81 82 static const struct i40e_stats i40e_gstrings_misc_stats[] = { 83 I40E_VSI_STAT("rx_unicast", eth_stats.rx_unicast), 84 I40E_VSI_STAT("tx_unicast", eth_stats.tx_unicast), 85 I40E_VSI_STAT("rx_multicast", eth_stats.rx_multicast), 86 I40E_VSI_STAT("tx_multicast", eth_stats.tx_multicast), 87 I40E_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast), 88 I40E_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast), 89 I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol), 90 I40E_VSI_STAT("tx_linearize", tx_linearize), 91 I40E_VSI_STAT("tx_force_wb", tx_force_wb), 92 I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed), 93 I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed), 94 }; 95 96 /* These PF_STATs might look like duplicates of some NETDEV_STATs, 97 * but they are separate. This device supports Virtualization, and 98 * as such might have several netdevs supporting VMDq and FCoE going 99 * through a single port. The NETDEV_STATs are for individual netdevs 100 * seen at the top of the stack, and the PF_STATs are for the physical 101 * function at the bottom of the stack hosting those netdevs. 102 * 103 * The PF_STATs are appended to the netdev stats only when ethtool -S 104 * is queried on the base PF netdev, not on the VMDq or FCoE netdev. 105 */ 106 static const struct i40e_stats i40e_gstrings_stats[] = { 107 I40E_PF_STAT("rx_bytes", stats.eth.rx_bytes), 108 I40E_PF_STAT("tx_bytes", stats.eth.tx_bytes), 109 I40E_PF_STAT("rx_unicast", stats.eth.rx_unicast), 110 I40E_PF_STAT("tx_unicast", stats.eth.tx_unicast), 111 I40E_PF_STAT("rx_multicast", stats.eth.rx_multicast), 112 I40E_PF_STAT("tx_multicast", stats.eth.tx_multicast), 113 I40E_PF_STAT("rx_broadcast", stats.eth.rx_broadcast), 114 I40E_PF_STAT("tx_broadcast", stats.eth.tx_broadcast), 115 I40E_PF_STAT("tx_errors", stats.eth.tx_errors), 116 I40E_PF_STAT("rx_dropped", stats.eth.rx_discards), 117 I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down), 118 I40E_PF_STAT("rx_crc_errors", stats.crc_errors), 119 I40E_PF_STAT("illegal_bytes", stats.illegal_bytes), 120 I40E_PF_STAT("mac_local_faults", stats.mac_local_faults), 121 I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults), 122 I40E_PF_STAT("tx_timeout", tx_timeout_count), 123 I40E_PF_STAT("rx_csum_bad", hw_csum_rx_error), 124 I40E_PF_STAT("rx_length_errors", stats.rx_length_errors), 125 I40E_PF_STAT("link_xon_rx", stats.link_xon_rx), 126 I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx), 127 I40E_PF_STAT("link_xon_tx", stats.link_xon_tx), 128 I40E_PF_STAT("link_xoff_tx", stats.link_xoff_tx), 129 I40E_PF_STAT("rx_size_64", stats.rx_size_64), 130 I40E_PF_STAT("rx_size_127", stats.rx_size_127), 131 I40E_PF_STAT("rx_size_255", stats.rx_size_255), 132 I40E_PF_STAT("rx_size_511", stats.rx_size_511), 133 I40E_PF_STAT("rx_size_1023", stats.rx_size_1023), 134 I40E_PF_STAT("rx_size_1522", stats.rx_size_1522), 135 I40E_PF_STAT("rx_size_big", stats.rx_size_big), 136 I40E_PF_STAT("tx_size_64", stats.tx_size_64), 137 I40E_PF_STAT("tx_size_127", stats.tx_size_127), 138 I40E_PF_STAT("tx_size_255", stats.tx_size_255), 139 I40E_PF_STAT("tx_size_511", stats.tx_size_511), 140 I40E_PF_STAT("tx_size_1023", stats.tx_size_1023), 141 I40E_PF_STAT("tx_size_1522", stats.tx_size_1522), 142 I40E_PF_STAT("tx_size_big", stats.tx_size_big), 143 I40E_PF_STAT("rx_undersize", stats.rx_undersize), 144 I40E_PF_STAT("rx_fragments", stats.rx_fragments), 145 I40E_PF_STAT("rx_oversize", stats.rx_oversize), 146 I40E_PF_STAT("rx_jabber", stats.rx_jabber), 147 I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests), 148 I40E_PF_STAT("arq_overflows", arq_overflows), 149 I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), 150 I40E_PF_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped), 151 I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt), 152 I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match), 153 I40E_PF_STAT("fdir_atr_tunnel_match", stats.fd_atr_tunnel_match), 154 I40E_PF_STAT("fdir_atr_status", stats.fd_atr_status), 155 I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match), 156 I40E_PF_STAT("fdir_sb_status", stats.fd_sb_status), 157 158 /* LPI stats */ 159 I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status), 160 I40E_PF_STAT("rx_lpi_status", stats.rx_lpi_status), 161 I40E_PF_STAT("tx_lpi_count", stats.tx_lpi_count), 162 I40E_PF_STAT("rx_lpi_count", stats.rx_lpi_count), 163 }; 164 165 #define I40E_QUEUE_STATS_LEN(n) \ 166 (((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs \ 167 * 2 /* Tx and Rx together */ \ 168 * (sizeof(struct i40e_queue_stats) / sizeof(u64))) 169 #define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats) 170 #define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats) 171 #define I40E_MISC_STATS_LEN ARRAY_SIZE(i40e_gstrings_misc_stats) 172 #define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \ 173 I40E_MISC_STATS_LEN + \ 174 I40E_QUEUE_STATS_LEN((n))) 175 #define I40E_PFC_STATS_LEN ( \ 176 (FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \ 177 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_rx) + \ 178 FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_tx) + \ 179 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \ 180 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \ 181 / sizeof(u64)) 182 #define I40E_VEB_TC_STATS_LEN ( \ 183 (FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_rx_packets) + \ 184 FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_rx_bytes) + \ 185 FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_tx_packets) + \ 186 FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_tx_bytes)) \ 187 / sizeof(u64)) 188 #define I40E_VEB_STATS_LEN ARRAY_SIZE(i40e_gstrings_veb_stats) 189 #define I40E_VEB_STATS_TOTAL (I40E_VEB_STATS_LEN + I40E_VEB_TC_STATS_LEN) 190 #define I40E_PF_STATS_LEN(n) (I40E_GLOBAL_STATS_LEN + \ 191 I40E_PFC_STATS_LEN + \ 192 I40E_VSI_STATS_LEN((n))) 193 194 enum i40e_ethtool_test_id { 195 I40E_ETH_TEST_REG = 0, 196 I40E_ETH_TEST_EEPROM, 197 I40E_ETH_TEST_INTR, 198 I40E_ETH_TEST_LINK, 199 }; 200 201 static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = { 202 "Register test (offline)", 203 "Eeprom test (offline)", 204 "Interrupt test (offline)", 205 "Link test (on/offline)" 206 }; 207 208 #define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN) 209 210 struct i40e_priv_flags { 211 char flag_string[ETH_GSTRING_LEN]; 212 u64 flag; 213 bool read_only; 214 }; 215 216 #define I40E_PRIV_FLAG(_name, _flag, _read_only) { \ 217 .flag_string = _name, \ 218 .flag = _flag, \ 219 .read_only = _read_only, \ 220 } 221 222 static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = { 223 /* NOTE: MFP setting cannot be changed */ 224 I40E_PRIV_FLAG("MFP", I40E_FLAG_MFP_ENABLED, 1), 225 I40E_PRIV_FLAG("LinkPolling", I40E_FLAG_LINK_POLLING_ENABLED, 0), 226 I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENABLED, 0), 227 I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0), 228 I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_ENABLED, 0), 229 I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0), 230 I40E_PRIV_FLAG("disable-source-pruning", 231 I40E_FLAG_SOURCE_PRUNING_DISABLED, 0), 232 }; 233 234 #define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags) 235 236 /* Private flags with a global effect, restricted to PF 0 */ 237 static const struct i40e_priv_flags i40e_gl_gstrings_priv_flags[] = { 238 I40E_PRIV_FLAG("vf-true-promisc-support", 239 I40E_FLAG_TRUE_PROMISC_SUPPORT, 0), 240 }; 241 242 #define I40E_GL_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gl_gstrings_priv_flags) 243 244 /** 245 * i40e_partition_setting_complaint - generic complaint for MFP restriction 246 * @pf: the PF struct 247 **/ 248 static void i40e_partition_setting_complaint(struct i40e_pf *pf) 249 { 250 dev_info(&pf->pdev->dev, 251 "The link settings are allowed to be changed only from the first partition of a given port. Please switch to the first partition in order to change the setting.\n"); 252 } 253 254 /** 255 * i40e_phy_type_to_ethtool - convert the phy_types to ethtool link modes 256 * @pf: PF struct with phy_types 257 * @ks: ethtool link ksettings struct to fill out 258 * 259 **/ 260 static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, 261 struct ethtool_link_ksettings *ks) 262 { 263 struct i40e_link_status *hw_link_info = &pf->hw.phy.link_info; 264 u64 phy_types = pf->hw.phy.phy_types; 265 266 ethtool_link_ksettings_zero_link_mode(ks, supported); 267 ethtool_link_ksettings_zero_link_mode(ks, advertising); 268 269 if (phy_types & I40E_CAP_PHY_TYPE_SGMII) { 270 ethtool_link_ksettings_add_link_mode(ks, supported, 271 1000baseT_Full); 272 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) 273 ethtool_link_ksettings_add_link_mode(ks, advertising, 274 1000baseT_Full); 275 if (pf->hw_features & I40E_HW_100M_SGMII_CAPABLE) { 276 ethtool_link_ksettings_add_link_mode(ks, supported, 277 100baseT_Full); 278 ethtool_link_ksettings_add_link_mode(ks, advertising, 279 100baseT_Full); 280 } 281 } 282 if (phy_types & I40E_CAP_PHY_TYPE_XAUI || 283 phy_types & I40E_CAP_PHY_TYPE_XFI || 284 phy_types & I40E_CAP_PHY_TYPE_SFI || 285 phy_types & I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU || 286 phy_types & I40E_CAP_PHY_TYPE_10GBASE_AOC) { 287 ethtool_link_ksettings_add_link_mode(ks, supported, 288 10000baseT_Full); 289 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) 290 ethtool_link_ksettings_add_link_mode(ks, advertising, 291 10000baseT_Full); 292 } 293 if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_T) { 294 ethtool_link_ksettings_add_link_mode(ks, supported, 295 10000baseT_Full); 296 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) 297 ethtool_link_ksettings_add_link_mode(ks, advertising, 298 10000baseT_Full); 299 } 300 if (phy_types & I40E_CAP_PHY_TYPE_XLAUI || 301 phy_types & I40E_CAP_PHY_TYPE_XLPPI || 302 phy_types & I40E_CAP_PHY_TYPE_40GBASE_AOC) 303 ethtool_link_ksettings_add_link_mode(ks, supported, 304 40000baseCR4_Full); 305 if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU || 306 phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4) { 307 ethtool_link_ksettings_add_link_mode(ks, supported, 308 40000baseCR4_Full); 309 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_40GB) 310 ethtool_link_ksettings_add_link_mode(ks, advertising, 311 40000baseCR4_Full); 312 } 313 if (phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) { 314 ethtool_link_ksettings_add_link_mode(ks, supported, 315 100baseT_Full); 316 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) 317 ethtool_link_ksettings_add_link_mode(ks, advertising, 318 100baseT_Full); 319 } 320 if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T) { 321 ethtool_link_ksettings_add_link_mode(ks, supported, 322 1000baseT_Full); 323 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) 324 ethtool_link_ksettings_add_link_mode(ks, advertising, 325 1000baseT_Full); 326 } 327 if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4) 328 ethtool_link_ksettings_add_link_mode(ks, supported, 329 40000baseSR4_Full); 330 if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4) 331 ethtool_link_ksettings_add_link_mode(ks, supported, 332 40000baseLR4_Full); 333 if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4) { 334 ethtool_link_ksettings_add_link_mode(ks, supported, 335 40000baseLR4_Full); 336 ethtool_link_ksettings_add_link_mode(ks, advertising, 337 40000baseLR4_Full); 338 } 339 if (phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) { 340 ethtool_link_ksettings_add_link_mode(ks, supported, 341 20000baseKR2_Full); 342 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_20GB) 343 ethtool_link_ksettings_add_link_mode(ks, advertising, 344 20000baseKR2_Full); 345 } 346 if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) { 347 ethtool_link_ksettings_add_link_mode(ks, supported, 348 10000baseKX4_Full); 349 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) 350 ethtool_link_ksettings_add_link_mode(ks, advertising, 351 10000baseKX4_Full); 352 } 353 if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR && 354 !(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) { 355 ethtool_link_ksettings_add_link_mode(ks, supported, 356 10000baseKR_Full); 357 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) 358 ethtool_link_ksettings_add_link_mode(ks, advertising, 359 10000baseKR_Full); 360 } 361 if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX && 362 !(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) { 363 ethtool_link_ksettings_add_link_mode(ks, supported, 364 1000baseKX_Full); 365 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) 366 ethtool_link_ksettings_add_link_mode(ks, advertising, 367 1000baseKX_Full); 368 } 369 /* need to add 25G PHY types */ 370 if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_KR) { 371 ethtool_link_ksettings_add_link_mode(ks, supported, 372 25000baseKR_Full); 373 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB) 374 ethtool_link_ksettings_add_link_mode(ks, advertising, 375 25000baseKR_Full); 376 } 377 if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_CR) { 378 ethtool_link_ksettings_add_link_mode(ks, supported, 379 25000baseCR_Full); 380 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB) 381 ethtool_link_ksettings_add_link_mode(ks, advertising, 382 25000baseCR_Full); 383 } 384 if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_SR || 385 phy_types & I40E_CAP_PHY_TYPE_25GBASE_LR) { 386 ethtool_link_ksettings_add_link_mode(ks, supported, 387 25000baseSR_Full); 388 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB) 389 ethtool_link_ksettings_add_link_mode(ks, advertising, 390 25000baseSR_Full); 391 } 392 if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_AOC || 393 phy_types & I40E_CAP_PHY_TYPE_25GBASE_ACC) { 394 ethtool_link_ksettings_add_link_mode(ks, supported, 395 25000baseCR_Full); 396 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB) 397 ethtool_link_ksettings_add_link_mode(ks, advertising, 398 25000baseCR_Full); 399 } 400 /* need to add new 10G PHY types */ 401 if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 || 402 phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU) { 403 ethtool_link_ksettings_add_link_mode(ks, supported, 404 10000baseCR_Full); 405 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) 406 ethtool_link_ksettings_add_link_mode(ks, advertising, 407 10000baseCR_Full); 408 } 409 if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR) { 410 ethtool_link_ksettings_add_link_mode(ks, supported, 411 10000baseSR_Full); 412 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) 413 ethtool_link_ksettings_add_link_mode(ks, advertising, 414 10000baseSR_Full); 415 } 416 if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) { 417 ethtool_link_ksettings_add_link_mode(ks, supported, 418 10000baseLR_Full); 419 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) 420 ethtool_link_ksettings_add_link_mode(ks, advertising, 421 10000baseLR_Full); 422 } 423 if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX || 424 phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX || 425 phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) { 426 ethtool_link_ksettings_add_link_mode(ks, supported, 427 1000baseX_Full); 428 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) 429 ethtool_link_ksettings_add_link_mode(ks, advertising, 430 1000baseX_Full); 431 } 432 /* Autoneg PHY types */ 433 if (phy_types & I40E_CAP_PHY_TYPE_SGMII || 434 phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4 || 435 phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU || 436 phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4 || 437 phy_types & I40E_CAP_PHY_TYPE_25GBASE_SR || 438 phy_types & I40E_CAP_PHY_TYPE_25GBASE_LR || 439 phy_types & I40E_CAP_PHY_TYPE_25GBASE_KR || 440 phy_types & I40E_CAP_PHY_TYPE_25GBASE_CR || 441 phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2 || 442 phy_types & I40E_CAP_PHY_TYPE_10GBASE_T || 443 phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR || 444 phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR || 445 phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4 || 446 phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR || 447 phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU || 448 phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 || 449 phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL || 450 phy_types & I40E_CAP_PHY_TYPE_1000BASE_T || 451 phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX || 452 phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX || 453 phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX || 454 phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) { 455 ethtool_link_ksettings_add_link_mode(ks, supported, 456 Autoneg); 457 ethtool_link_ksettings_add_link_mode(ks, advertising, 458 Autoneg); 459 } 460 } 461 462 /** 463 * i40e_get_settings_link_up - Get the Link settings for when link is up 464 * @hw: hw structure 465 * @ks: ethtool ksettings to fill in 466 * @netdev: network interface device structure 467 * @pf: pointer to physical function struct 468 **/ 469 static void i40e_get_settings_link_up(struct i40e_hw *hw, 470 struct ethtool_link_ksettings *ks, 471 struct net_device *netdev, 472 struct i40e_pf *pf) 473 { 474 struct i40e_link_status *hw_link_info = &hw->phy.link_info; 475 struct ethtool_link_ksettings cap_ksettings; 476 u32 link_speed = hw_link_info->link_speed; 477 478 /* Initialize supported and advertised settings based on phy settings */ 479 switch (hw_link_info->phy_type) { 480 case I40E_PHY_TYPE_40GBASE_CR4: 481 case I40E_PHY_TYPE_40GBASE_CR4_CU: 482 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 483 ethtool_link_ksettings_add_link_mode(ks, supported, 484 40000baseCR4_Full); 485 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 486 ethtool_link_ksettings_add_link_mode(ks, advertising, 487 40000baseCR4_Full); 488 break; 489 case I40E_PHY_TYPE_XLAUI: 490 case I40E_PHY_TYPE_XLPPI: 491 case I40E_PHY_TYPE_40GBASE_AOC: 492 ethtool_link_ksettings_add_link_mode(ks, supported, 493 40000baseCR4_Full); 494 break; 495 case I40E_PHY_TYPE_40GBASE_SR4: 496 ethtool_link_ksettings_add_link_mode(ks, supported, 497 40000baseSR4_Full); 498 break; 499 case I40E_PHY_TYPE_40GBASE_LR4: 500 ethtool_link_ksettings_add_link_mode(ks, supported, 501 40000baseLR4_Full); 502 break; 503 case I40E_PHY_TYPE_25GBASE_SR: 504 case I40E_PHY_TYPE_25GBASE_LR: 505 case I40E_PHY_TYPE_10GBASE_SR: 506 case I40E_PHY_TYPE_10GBASE_LR: 507 case I40E_PHY_TYPE_1000BASE_SX: 508 case I40E_PHY_TYPE_1000BASE_LX: 509 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 510 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 511 ethtool_link_ksettings_add_link_mode(ks, supported, 512 25000baseSR_Full); 513 ethtool_link_ksettings_add_link_mode(ks, advertising, 514 25000baseSR_Full); 515 ethtool_link_ksettings_add_link_mode(ks, supported, 516 10000baseSR_Full); 517 ethtool_link_ksettings_add_link_mode(ks, advertising, 518 10000baseSR_Full); 519 ethtool_link_ksettings_add_link_mode(ks, supported, 520 10000baseLR_Full); 521 ethtool_link_ksettings_add_link_mode(ks, advertising, 522 10000baseLR_Full); 523 ethtool_link_ksettings_add_link_mode(ks, supported, 524 1000baseX_Full); 525 ethtool_link_ksettings_add_link_mode(ks, advertising, 526 1000baseX_Full); 527 ethtool_link_ksettings_add_link_mode(ks, supported, 528 10000baseT_Full); 529 if (hw_link_info->module_type[2] & 530 I40E_MODULE_TYPE_1000BASE_SX || 531 hw_link_info->module_type[2] & 532 I40E_MODULE_TYPE_1000BASE_LX) { 533 ethtool_link_ksettings_add_link_mode(ks, supported, 534 1000baseT_Full); 535 if (hw_link_info->requested_speeds & 536 I40E_LINK_SPEED_1GB) 537 ethtool_link_ksettings_add_link_mode( 538 ks, advertising, 1000baseT_Full); 539 } 540 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) 541 ethtool_link_ksettings_add_link_mode(ks, advertising, 542 10000baseT_Full); 543 break; 544 case I40E_PHY_TYPE_10GBASE_T: 545 case I40E_PHY_TYPE_1000BASE_T: 546 case I40E_PHY_TYPE_100BASE_TX: 547 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 548 ethtool_link_ksettings_add_link_mode(ks, supported, 549 10000baseT_Full); 550 ethtool_link_ksettings_add_link_mode(ks, supported, 551 1000baseT_Full); 552 ethtool_link_ksettings_add_link_mode(ks, supported, 553 100baseT_Full); 554 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 555 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) 556 ethtool_link_ksettings_add_link_mode(ks, advertising, 557 10000baseT_Full); 558 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) 559 ethtool_link_ksettings_add_link_mode(ks, advertising, 560 1000baseT_Full); 561 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) 562 ethtool_link_ksettings_add_link_mode(ks, advertising, 563 100baseT_Full); 564 break; 565 case I40E_PHY_TYPE_1000BASE_T_OPTICAL: 566 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 567 ethtool_link_ksettings_add_link_mode(ks, supported, 568 1000baseT_Full); 569 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 570 ethtool_link_ksettings_add_link_mode(ks, advertising, 571 1000baseT_Full); 572 break; 573 case I40E_PHY_TYPE_10GBASE_CR1_CU: 574 case I40E_PHY_TYPE_10GBASE_CR1: 575 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 576 ethtool_link_ksettings_add_link_mode(ks, supported, 577 10000baseT_Full); 578 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 579 ethtool_link_ksettings_add_link_mode(ks, advertising, 580 10000baseT_Full); 581 break; 582 case I40E_PHY_TYPE_XAUI: 583 case I40E_PHY_TYPE_XFI: 584 case I40E_PHY_TYPE_SFI: 585 case I40E_PHY_TYPE_10GBASE_SFPP_CU: 586 case I40E_PHY_TYPE_10GBASE_AOC: 587 ethtool_link_ksettings_add_link_mode(ks, supported, 588 10000baseT_Full); 589 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) 590 ethtool_link_ksettings_add_link_mode(ks, advertising, 591 10000baseT_Full); 592 break; 593 case I40E_PHY_TYPE_SGMII: 594 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 595 ethtool_link_ksettings_add_link_mode(ks, supported, 596 1000baseT_Full); 597 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) 598 ethtool_link_ksettings_add_link_mode(ks, advertising, 599 1000baseT_Full); 600 if (pf->hw_features & I40E_HW_100M_SGMII_CAPABLE) { 601 ethtool_link_ksettings_add_link_mode(ks, supported, 602 100baseT_Full); 603 if (hw_link_info->requested_speeds & 604 I40E_LINK_SPEED_100MB) 605 ethtool_link_ksettings_add_link_mode( 606 ks, advertising, 100baseT_Full); 607 } 608 break; 609 case I40E_PHY_TYPE_40GBASE_KR4: 610 case I40E_PHY_TYPE_25GBASE_KR: 611 case I40E_PHY_TYPE_20GBASE_KR2: 612 case I40E_PHY_TYPE_10GBASE_KR: 613 case I40E_PHY_TYPE_10GBASE_KX4: 614 case I40E_PHY_TYPE_1000BASE_KX: 615 ethtool_link_ksettings_add_link_mode(ks, supported, 616 40000baseKR4_Full); 617 ethtool_link_ksettings_add_link_mode(ks, supported, 618 25000baseKR_Full); 619 ethtool_link_ksettings_add_link_mode(ks, supported, 620 20000baseKR2_Full); 621 ethtool_link_ksettings_add_link_mode(ks, supported, 622 10000baseKR_Full); 623 ethtool_link_ksettings_add_link_mode(ks, supported, 624 10000baseKX4_Full); 625 ethtool_link_ksettings_add_link_mode(ks, supported, 626 1000baseKX_Full); 627 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 628 ethtool_link_ksettings_add_link_mode(ks, advertising, 629 40000baseKR4_Full); 630 ethtool_link_ksettings_add_link_mode(ks, advertising, 631 25000baseKR_Full); 632 ethtool_link_ksettings_add_link_mode(ks, advertising, 633 20000baseKR2_Full); 634 ethtool_link_ksettings_add_link_mode(ks, advertising, 635 10000baseKR_Full); 636 ethtool_link_ksettings_add_link_mode(ks, advertising, 637 10000baseKX4_Full); 638 ethtool_link_ksettings_add_link_mode(ks, advertising, 639 1000baseKX_Full); 640 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 641 break; 642 case I40E_PHY_TYPE_25GBASE_CR: 643 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 644 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 645 ethtool_link_ksettings_add_link_mode(ks, supported, 646 25000baseCR_Full); 647 ethtool_link_ksettings_add_link_mode(ks, advertising, 648 25000baseCR_Full); 649 break; 650 case I40E_PHY_TYPE_25GBASE_AOC: 651 case I40E_PHY_TYPE_25GBASE_ACC: 652 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 653 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 654 ethtool_link_ksettings_add_link_mode(ks, supported, 655 25000baseCR_Full); 656 657 ethtool_link_ksettings_add_link_mode(ks, advertising, 658 25000baseCR_Full); 659 ethtool_link_ksettings_add_link_mode(ks, supported, 660 10000baseCR_Full); 661 ethtool_link_ksettings_add_link_mode(ks, advertising, 662 10000baseCR_Full); 663 break; 664 default: 665 /* if we got here and link is up something bad is afoot */ 666 netdev_info(netdev, 667 "WARNING: Link is up but PHY type 0x%x is not recognized.\n", 668 hw_link_info->phy_type); 669 } 670 671 /* Now that we've worked out everything that could be supported by the 672 * current PHY type, get what is supported by the NVM and intersect 673 * them to get what is truly supported 674 */ 675 memset(&cap_ksettings, 0, sizeof(struct ethtool_link_ksettings)); 676 i40e_phy_type_to_ethtool(pf, &cap_ksettings); 677 ethtool_intersect_link_masks(ks, &cap_ksettings); 678 679 /* Set speed and duplex */ 680 switch (link_speed) { 681 case I40E_LINK_SPEED_40GB: 682 ks->base.speed = SPEED_40000; 683 break; 684 case I40E_LINK_SPEED_25GB: 685 ks->base.speed = SPEED_25000; 686 break; 687 case I40E_LINK_SPEED_20GB: 688 ks->base.speed = SPEED_20000; 689 break; 690 case I40E_LINK_SPEED_10GB: 691 ks->base.speed = SPEED_10000; 692 break; 693 case I40E_LINK_SPEED_1GB: 694 ks->base.speed = SPEED_1000; 695 break; 696 case I40E_LINK_SPEED_100MB: 697 ks->base.speed = SPEED_100; 698 break; 699 default: 700 break; 701 } 702 ks->base.duplex = DUPLEX_FULL; 703 } 704 705 /** 706 * i40e_get_settings_link_down - Get the Link settings for when link is down 707 * @hw: hw structure 708 * @ks: ethtool ksettings to fill in 709 * @pf: pointer to physical function struct 710 * 711 * Reports link settings that can be determined when link is down 712 **/ 713 static void i40e_get_settings_link_down(struct i40e_hw *hw, 714 struct ethtool_link_ksettings *ks, 715 struct i40e_pf *pf) 716 { 717 /* link is down and the driver needs to fall back on 718 * supported phy types to figure out what info to display 719 */ 720 i40e_phy_type_to_ethtool(pf, ks); 721 722 /* With no link speed and duplex are unknown */ 723 ks->base.speed = SPEED_UNKNOWN; 724 ks->base.duplex = DUPLEX_UNKNOWN; 725 } 726 727 /** 728 * i40e_get_link_ksettings - Get Link Speed and Duplex settings 729 * @netdev: network interface device structure 730 * @ks: ethtool ksettings 731 * 732 * Reports speed/duplex settings based on media_type 733 **/ 734 static int i40e_get_link_ksettings(struct net_device *netdev, 735 struct ethtool_link_ksettings *ks) 736 { 737 struct i40e_netdev_priv *np = netdev_priv(netdev); 738 struct i40e_pf *pf = np->vsi->back; 739 struct i40e_hw *hw = &pf->hw; 740 struct i40e_link_status *hw_link_info = &hw->phy.link_info; 741 bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP; 742 743 ethtool_link_ksettings_zero_link_mode(ks, supported); 744 ethtool_link_ksettings_zero_link_mode(ks, advertising); 745 746 if (link_up) 747 i40e_get_settings_link_up(hw, ks, netdev, pf); 748 else 749 i40e_get_settings_link_down(hw, ks, pf); 750 751 /* Now set the settings that don't rely on link being up/down */ 752 /* Set autoneg settings */ 753 ks->base.autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? 754 AUTONEG_ENABLE : AUTONEG_DISABLE); 755 756 /* Set media type settings */ 757 switch (hw->phy.media_type) { 758 case I40E_MEDIA_TYPE_BACKPLANE: 759 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); 760 ethtool_link_ksettings_add_link_mode(ks, supported, Backplane); 761 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); 762 ethtool_link_ksettings_add_link_mode(ks, advertising, 763 Backplane); 764 ks->base.port = PORT_NONE; 765 break; 766 case I40E_MEDIA_TYPE_BASET: 767 ethtool_link_ksettings_add_link_mode(ks, supported, TP); 768 ethtool_link_ksettings_add_link_mode(ks, advertising, TP); 769 ks->base.port = PORT_TP; 770 break; 771 case I40E_MEDIA_TYPE_DA: 772 case I40E_MEDIA_TYPE_CX4: 773 ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE); 774 ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE); 775 ks->base.port = PORT_DA; 776 break; 777 case I40E_MEDIA_TYPE_FIBER: 778 ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE); 779 ks->base.port = PORT_FIBRE; 780 break; 781 case I40E_MEDIA_TYPE_UNKNOWN: 782 default: 783 ks->base.port = PORT_OTHER; 784 break; 785 } 786 787 /* Set flow control settings */ 788 ethtool_link_ksettings_add_link_mode(ks, supported, Pause); 789 790 switch (hw->fc.requested_mode) { 791 case I40E_FC_FULL: 792 ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); 793 break; 794 case I40E_FC_TX_PAUSE: 795 ethtool_link_ksettings_add_link_mode(ks, advertising, 796 Asym_Pause); 797 break; 798 case I40E_FC_RX_PAUSE: 799 ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); 800 ethtool_link_ksettings_add_link_mode(ks, advertising, 801 Asym_Pause); 802 break; 803 default: 804 ethtool_link_ksettings_del_link_mode(ks, advertising, Pause); 805 ethtool_link_ksettings_del_link_mode(ks, advertising, 806 Asym_Pause); 807 break; 808 } 809 810 return 0; 811 } 812 813 /** 814 * i40e_set_link_ksettings - Set Speed and Duplex 815 * @netdev: network interface device structure 816 * @ks: ethtool ksettings 817 * 818 * Set speed/duplex per media_types advertised/forced 819 **/ 820 static int i40e_set_link_ksettings(struct net_device *netdev, 821 const struct ethtool_link_ksettings *ks) 822 { 823 struct i40e_netdev_priv *np = netdev_priv(netdev); 824 struct i40e_aq_get_phy_abilities_resp abilities; 825 struct ethtool_link_ksettings safe_ks; 826 struct ethtool_link_ksettings copy_ks; 827 struct i40e_aq_set_phy_config config; 828 struct i40e_pf *pf = np->vsi->back; 829 struct i40e_vsi *vsi = np->vsi; 830 struct i40e_hw *hw = &pf->hw; 831 bool autoneg_changed = false; 832 i40e_status status = 0; 833 int timeout = 50; 834 int err = 0; 835 u8 autoneg; 836 837 /* Changing port settings is not supported if this isn't the 838 * port's controlling PF 839 */ 840 if (hw->partition_id != 1) { 841 i40e_partition_setting_complaint(pf); 842 return -EOPNOTSUPP; 843 } 844 if (vsi != pf->vsi[pf->lan_vsi]) 845 return -EOPNOTSUPP; 846 if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET && 847 hw->phy.media_type != I40E_MEDIA_TYPE_FIBER && 848 hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE && 849 hw->phy.media_type != I40E_MEDIA_TYPE_DA && 850 hw->phy.link_info.link_info & I40E_AQ_LINK_UP) 851 return -EOPNOTSUPP; 852 if (hw->device_id == I40E_DEV_ID_KX_B || 853 hw->device_id == I40E_DEV_ID_KX_C || 854 hw->device_id == I40E_DEV_ID_20G_KR2 || 855 hw->device_id == I40E_DEV_ID_20G_KR2_A) { 856 netdev_info(netdev, "Changing settings is not supported on backplane.\n"); 857 return -EOPNOTSUPP; 858 } 859 860 /* copy the ksettings to copy_ks to avoid modifying the origin */ 861 memcpy(©_ks, ks, sizeof(struct ethtool_link_ksettings)); 862 863 /* save autoneg out of ksettings */ 864 autoneg = copy_ks.base.autoneg; 865 866 memset(&safe_ks, 0, sizeof(safe_ks)); 867 /* Get link modes supported by hardware and check against modes 868 * requested by the user. Return an error if unsupported mode was set. 869 */ 870 i40e_phy_type_to_ethtool(pf, &safe_ks); 871 if (!bitmap_subset(copy_ks.link_modes.advertising, 872 safe_ks.link_modes.supported, 873 __ETHTOOL_LINK_MODE_MASK_NBITS)) 874 return -EINVAL; 875 876 /* get our own copy of the bits to check against */ 877 memset(&safe_ks, 0, sizeof(struct ethtool_link_ksettings)); 878 safe_ks.base.cmd = copy_ks.base.cmd; 879 safe_ks.base.link_mode_masks_nwords = 880 copy_ks.base.link_mode_masks_nwords; 881 i40e_get_link_ksettings(netdev, &safe_ks); 882 883 /* set autoneg back to what it currently is */ 884 copy_ks.base.autoneg = safe_ks.base.autoneg; 885 886 /* If copy_ks.base and safe_ks.base are not the same now, then they are 887 * trying to set something that we do not support. 888 */ 889 if (memcmp(©_ks.base, &safe_ks.base, 890 sizeof(struct ethtool_link_settings))) 891 return -EOPNOTSUPP; 892 893 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) { 894 timeout--; 895 if (!timeout) 896 return -EBUSY; 897 usleep_range(1000, 2000); 898 } 899 900 /* Get the current phy config */ 901 status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, 902 NULL); 903 if (status) { 904 err = -EAGAIN; 905 goto done; 906 } 907 908 /* Copy abilities to config in case autoneg is not 909 * set below 910 */ 911 memset(&config, 0, sizeof(struct i40e_aq_set_phy_config)); 912 config.abilities = abilities.abilities; 913 914 /* Check autoneg */ 915 if (autoneg == AUTONEG_ENABLE) { 916 /* If autoneg was not already enabled */ 917 if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) { 918 /* If autoneg is not supported, return error */ 919 if (!ethtool_link_ksettings_test_link_mode(&safe_ks, 920 supported, 921 Autoneg)) { 922 netdev_info(netdev, "Autoneg not supported on this phy\n"); 923 err = -EINVAL; 924 goto done; 925 } 926 /* Autoneg is allowed to change */ 927 config.abilities = abilities.abilities | 928 I40E_AQ_PHY_ENABLE_AN; 929 autoneg_changed = true; 930 } 931 } else { 932 /* If autoneg is currently enabled */ 933 if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) { 934 /* If autoneg is supported 10GBASE_T is the only PHY 935 * that can disable it, so otherwise return error 936 */ 937 if (ethtool_link_ksettings_test_link_mode(&safe_ks, 938 supported, 939 Autoneg) && 940 hw->phy.link_info.phy_type != 941 I40E_PHY_TYPE_10GBASE_T) { 942 netdev_info(netdev, "Autoneg cannot be disabled on this phy\n"); 943 err = -EINVAL; 944 goto done; 945 } 946 /* Autoneg is allowed to change */ 947 config.abilities = abilities.abilities & 948 ~I40E_AQ_PHY_ENABLE_AN; 949 autoneg_changed = true; 950 } 951 } 952 953 if (ethtool_link_ksettings_test_link_mode(ks, advertising, 954 100baseT_Full)) 955 config.link_speed |= I40E_LINK_SPEED_100MB; 956 if (ethtool_link_ksettings_test_link_mode(ks, advertising, 957 1000baseT_Full) || 958 ethtool_link_ksettings_test_link_mode(ks, advertising, 959 1000baseX_Full) || 960 ethtool_link_ksettings_test_link_mode(ks, advertising, 961 1000baseKX_Full)) 962 config.link_speed |= I40E_LINK_SPEED_1GB; 963 if (ethtool_link_ksettings_test_link_mode(ks, advertising, 964 10000baseT_Full) || 965 ethtool_link_ksettings_test_link_mode(ks, advertising, 966 10000baseKX4_Full) || 967 ethtool_link_ksettings_test_link_mode(ks, advertising, 968 10000baseKR_Full) || 969 ethtool_link_ksettings_test_link_mode(ks, advertising, 970 10000baseCR_Full) || 971 ethtool_link_ksettings_test_link_mode(ks, advertising, 972 10000baseSR_Full)) 973 config.link_speed |= I40E_LINK_SPEED_10GB; 974 if (ethtool_link_ksettings_test_link_mode(ks, advertising, 975 20000baseKR2_Full)) 976 config.link_speed |= I40E_LINK_SPEED_20GB; 977 if (ethtool_link_ksettings_test_link_mode(ks, advertising, 978 25000baseCR_Full) || 979 ethtool_link_ksettings_test_link_mode(ks, advertising, 980 25000baseKR_Full) || 981 ethtool_link_ksettings_test_link_mode(ks, advertising, 982 25000baseSR_Full)) 983 config.link_speed |= I40E_LINK_SPEED_25GB; 984 if (ethtool_link_ksettings_test_link_mode(ks, advertising, 985 40000baseKR4_Full) || 986 ethtool_link_ksettings_test_link_mode(ks, advertising, 987 40000baseCR4_Full) || 988 ethtool_link_ksettings_test_link_mode(ks, advertising, 989 40000baseSR4_Full) || 990 ethtool_link_ksettings_test_link_mode(ks, advertising, 991 40000baseLR4_Full)) 992 config.link_speed |= I40E_LINK_SPEED_40GB; 993 994 /* If speed didn't get set, set it to what it currently is. 995 * This is needed because if advertise is 0 (as it is when autoneg 996 * is disabled) then speed won't get set. 997 */ 998 if (!config.link_speed) 999 config.link_speed = abilities.link_speed; 1000 if (autoneg_changed || abilities.link_speed != config.link_speed) { 1001 /* copy over the rest of the abilities */ 1002 config.phy_type = abilities.phy_type; 1003 config.phy_type_ext = abilities.phy_type_ext; 1004 config.eee_capability = abilities.eee_capability; 1005 config.eeer = abilities.eeer_val; 1006 config.low_power_ctrl = abilities.d3_lpan; 1007 config.fec_config = abilities.fec_cfg_curr_mod_ext_info & 1008 I40E_AQ_PHY_FEC_CONFIG_MASK; 1009 1010 /* save the requested speeds */ 1011 hw->phy.link_info.requested_speeds = config.link_speed; 1012 /* set link and auto negotiation so changes take effect */ 1013 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; 1014 /* If link is up put link down */ 1015 if (hw->phy.link_info.link_info & I40E_AQ_LINK_UP) { 1016 /* Tell the OS link is going down, the link will go 1017 * back up when fw says it is ready asynchronously 1018 */ 1019 i40e_print_link_message(vsi, false); 1020 netif_carrier_off(netdev); 1021 netif_tx_stop_all_queues(netdev); 1022 } 1023 1024 /* make the aq call */ 1025 status = i40e_aq_set_phy_config(hw, &config, NULL); 1026 if (status) { 1027 netdev_info(netdev, 1028 "Set phy config failed, err %s aq_err %s\n", 1029 i40e_stat_str(hw, status), 1030 i40e_aq_str(hw, hw->aq.asq_last_status)); 1031 err = -EAGAIN; 1032 goto done; 1033 } 1034 1035 status = i40e_update_link_info(hw); 1036 if (status) 1037 netdev_dbg(netdev, 1038 "Updating link info failed with err %s aq_err %s\n", 1039 i40e_stat_str(hw, status), 1040 i40e_aq_str(hw, hw->aq.asq_last_status)); 1041 1042 } else { 1043 netdev_info(netdev, "Nothing changed, exiting without setting anything.\n"); 1044 } 1045 1046 done: 1047 clear_bit(__I40E_CONFIG_BUSY, pf->state); 1048 1049 return err; 1050 } 1051 1052 static int i40e_nway_reset(struct net_device *netdev) 1053 { 1054 /* restart autonegotiation */ 1055 struct i40e_netdev_priv *np = netdev_priv(netdev); 1056 struct i40e_pf *pf = np->vsi->back; 1057 struct i40e_hw *hw = &pf->hw; 1058 bool link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP; 1059 i40e_status ret = 0; 1060 1061 ret = i40e_aq_set_link_restart_an(hw, link_up, NULL); 1062 if (ret) { 1063 netdev_info(netdev, "link restart failed, err %s aq_err %s\n", 1064 i40e_stat_str(hw, ret), 1065 i40e_aq_str(hw, hw->aq.asq_last_status)); 1066 return -EIO; 1067 } 1068 1069 return 0; 1070 } 1071 1072 /** 1073 * i40e_get_pauseparam - Get Flow Control status 1074 * Return tx/rx-pause status 1075 **/ 1076 static void i40e_get_pauseparam(struct net_device *netdev, 1077 struct ethtool_pauseparam *pause) 1078 { 1079 struct i40e_netdev_priv *np = netdev_priv(netdev); 1080 struct i40e_pf *pf = np->vsi->back; 1081 struct i40e_hw *hw = &pf->hw; 1082 struct i40e_link_status *hw_link_info = &hw->phy.link_info; 1083 struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config; 1084 1085 pause->autoneg = 1086 ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? 1087 AUTONEG_ENABLE : AUTONEG_DISABLE); 1088 1089 /* PFC enabled so report LFC as off */ 1090 if (dcbx_cfg->pfc.pfcenable) { 1091 pause->rx_pause = 0; 1092 pause->tx_pause = 0; 1093 return; 1094 } 1095 1096 if (hw->fc.current_mode == I40E_FC_RX_PAUSE) { 1097 pause->rx_pause = 1; 1098 } else if (hw->fc.current_mode == I40E_FC_TX_PAUSE) { 1099 pause->tx_pause = 1; 1100 } else if (hw->fc.current_mode == I40E_FC_FULL) { 1101 pause->rx_pause = 1; 1102 pause->tx_pause = 1; 1103 } 1104 } 1105 1106 /** 1107 * i40e_set_pauseparam - Set Flow Control parameter 1108 * @netdev: network interface device structure 1109 * @pause: return tx/rx flow control status 1110 **/ 1111 static int i40e_set_pauseparam(struct net_device *netdev, 1112 struct ethtool_pauseparam *pause) 1113 { 1114 struct i40e_netdev_priv *np = netdev_priv(netdev); 1115 struct i40e_pf *pf = np->vsi->back; 1116 struct i40e_vsi *vsi = np->vsi; 1117 struct i40e_hw *hw = &pf->hw; 1118 struct i40e_link_status *hw_link_info = &hw->phy.link_info; 1119 struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config; 1120 bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP; 1121 i40e_status status; 1122 u8 aq_failures; 1123 int err = 0; 1124 1125 /* Changing the port's flow control is not supported if this isn't the 1126 * port's controlling PF 1127 */ 1128 if (hw->partition_id != 1) { 1129 i40e_partition_setting_complaint(pf); 1130 return -EOPNOTSUPP; 1131 } 1132 1133 if (vsi != pf->vsi[pf->lan_vsi]) 1134 return -EOPNOTSUPP; 1135 1136 if (pause->autoneg != ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? 1137 AUTONEG_ENABLE : AUTONEG_DISABLE)) { 1138 netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n"); 1139 return -EOPNOTSUPP; 1140 } 1141 1142 /* If we have link and don't have autoneg */ 1143 if (!test_bit(__I40E_DOWN, pf->state) && 1144 !(hw_link_info->an_info & I40E_AQ_AN_COMPLETED)) { 1145 /* Send message that it might not necessarily work*/ 1146 netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n"); 1147 } 1148 1149 if (dcbx_cfg->pfc.pfcenable) { 1150 netdev_info(netdev, 1151 "Priority flow control enabled. Cannot set link flow control.\n"); 1152 return -EOPNOTSUPP; 1153 } 1154 1155 if (pause->rx_pause && pause->tx_pause) 1156 hw->fc.requested_mode = I40E_FC_FULL; 1157 else if (pause->rx_pause && !pause->tx_pause) 1158 hw->fc.requested_mode = I40E_FC_RX_PAUSE; 1159 else if (!pause->rx_pause && pause->tx_pause) 1160 hw->fc.requested_mode = I40E_FC_TX_PAUSE; 1161 else if (!pause->rx_pause && !pause->tx_pause) 1162 hw->fc.requested_mode = I40E_FC_NONE; 1163 else 1164 return -EINVAL; 1165 1166 /* Tell the OS link is going down, the link will go back up when fw 1167 * says it is ready asynchronously 1168 */ 1169 i40e_print_link_message(vsi, false); 1170 netif_carrier_off(netdev); 1171 netif_tx_stop_all_queues(netdev); 1172 1173 /* Set the fc mode and only restart an if link is up*/ 1174 status = i40e_set_fc(hw, &aq_failures, link_up); 1175 1176 if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) { 1177 netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n", 1178 i40e_stat_str(hw, status), 1179 i40e_aq_str(hw, hw->aq.asq_last_status)); 1180 err = -EAGAIN; 1181 } 1182 if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) { 1183 netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n", 1184 i40e_stat_str(hw, status), 1185 i40e_aq_str(hw, hw->aq.asq_last_status)); 1186 err = -EAGAIN; 1187 } 1188 if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) { 1189 netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n", 1190 i40e_stat_str(hw, status), 1191 i40e_aq_str(hw, hw->aq.asq_last_status)); 1192 err = -EAGAIN; 1193 } 1194 1195 if (!test_bit(__I40E_DOWN, pf->state)) { 1196 /* Give it a little more time to try to come back */ 1197 msleep(75); 1198 if (!test_bit(__I40E_DOWN, pf->state)) 1199 return i40e_nway_reset(netdev); 1200 } 1201 1202 return err; 1203 } 1204 1205 static u32 i40e_get_msglevel(struct net_device *netdev) 1206 { 1207 struct i40e_netdev_priv *np = netdev_priv(netdev); 1208 struct i40e_pf *pf = np->vsi->back; 1209 u32 debug_mask = pf->hw.debug_mask; 1210 1211 if (debug_mask) 1212 netdev_info(netdev, "i40e debug_mask: 0x%08X\n", debug_mask); 1213 1214 return pf->msg_enable; 1215 } 1216 1217 static void i40e_set_msglevel(struct net_device *netdev, u32 data) 1218 { 1219 struct i40e_netdev_priv *np = netdev_priv(netdev); 1220 struct i40e_pf *pf = np->vsi->back; 1221 1222 if (I40E_DEBUG_USER & data) 1223 pf->hw.debug_mask = data; 1224 else 1225 pf->msg_enable = data; 1226 } 1227 1228 static int i40e_get_regs_len(struct net_device *netdev) 1229 { 1230 int reg_count = 0; 1231 int i; 1232 1233 for (i = 0; i40e_reg_list[i].offset != 0; i++) 1234 reg_count += i40e_reg_list[i].elements; 1235 1236 return reg_count * sizeof(u32); 1237 } 1238 1239 static void i40e_get_regs(struct net_device *netdev, struct ethtool_regs *regs, 1240 void *p) 1241 { 1242 struct i40e_netdev_priv *np = netdev_priv(netdev); 1243 struct i40e_pf *pf = np->vsi->back; 1244 struct i40e_hw *hw = &pf->hw; 1245 u32 *reg_buf = p; 1246 unsigned int i, j, ri; 1247 u32 reg; 1248 1249 /* Tell ethtool which driver-version-specific regs output we have. 1250 * 1251 * At some point, if we have ethtool doing special formatting of 1252 * this data, it will rely on this version number to know how to 1253 * interpret things. Hence, this needs to be updated if/when the 1254 * diags register table is changed. 1255 */ 1256 regs->version = 1; 1257 1258 /* loop through the diags reg table for what to print */ 1259 ri = 0; 1260 for (i = 0; i40e_reg_list[i].offset != 0; i++) { 1261 for (j = 0; j < i40e_reg_list[i].elements; j++) { 1262 reg = i40e_reg_list[i].offset 1263 + (j * i40e_reg_list[i].stride); 1264 reg_buf[ri++] = rd32(hw, reg); 1265 } 1266 } 1267 1268 } 1269 1270 static int i40e_get_eeprom(struct net_device *netdev, 1271 struct ethtool_eeprom *eeprom, u8 *bytes) 1272 { 1273 struct i40e_netdev_priv *np = netdev_priv(netdev); 1274 struct i40e_hw *hw = &np->vsi->back->hw; 1275 struct i40e_pf *pf = np->vsi->back; 1276 int ret_val = 0, len, offset; 1277 u8 *eeprom_buff; 1278 u16 i, sectors; 1279 bool last; 1280 u32 magic; 1281 1282 #define I40E_NVM_SECTOR_SIZE 4096 1283 if (eeprom->len == 0) 1284 return -EINVAL; 1285 1286 /* check for NVMUpdate access method */ 1287 magic = hw->vendor_id | (hw->device_id << 16); 1288 if (eeprom->magic && eeprom->magic != magic) { 1289 struct i40e_nvm_access *cmd = (struct i40e_nvm_access *)eeprom; 1290 int errno = 0; 1291 1292 /* make sure it is the right magic for NVMUpdate */ 1293 if ((eeprom->magic >> 16) != hw->device_id) 1294 errno = -EINVAL; 1295 else if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || 1296 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) 1297 errno = -EBUSY; 1298 else 1299 ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); 1300 1301 if ((errno || ret_val) && (hw->debug_mask & I40E_DEBUG_NVM)) 1302 dev_info(&pf->pdev->dev, 1303 "NVMUpdate read failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n", 1304 ret_val, hw->aq.asq_last_status, errno, 1305 (u8)(cmd->config & I40E_NVM_MOD_PNT_MASK), 1306 cmd->offset, cmd->data_size); 1307 1308 return errno; 1309 } 1310 1311 /* normal ethtool get_eeprom support */ 1312 eeprom->magic = hw->vendor_id | (hw->device_id << 16); 1313 1314 eeprom_buff = kzalloc(eeprom->len, GFP_KERNEL); 1315 if (!eeprom_buff) 1316 return -ENOMEM; 1317 1318 ret_val = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); 1319 if (ret_val) { 1320 dev_info(&pf->pdev->dev, 1321 "Failed Acquiring NVM resource for read err=%d status=0x%x\n", 1322 ret_val, hw->aq.asq_last_status); 1323 goto free_buff; 1324 } 1325 1326 sectors = eeprom->len / I40E_NVM_SECTOR_SIZE; 1327 sectors += (eeprom->len % I40E_NVM_SECTOR_SIZE) ? 1 : 0; 1328 len = I40E_NVM_SECTOR_SIZE; 1329 last = false; 1330 for (i = 0; i < sectors; i++) { 1331 if (i == (sectors - 1)) { 1332 len = eeprom->len - (I40E_NVM_SECTOR_SIZE * i); 1333 last = true; 1334 } 1335 offset = eeprom->offset + (I40E_NVM_SECTOR_SIZE * i), 1336 ret_val = i40e_aq_read_nvm(hw, 0x0, offset, len, 1337 (u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i), 1338 last, NULL); 1339 if (ret_val && hw->aq.asq_last_status == I40E_AQ_RC_EPERM) { 1340 dev_info(&pf->pdev->dev, 1341 "read NVM failed, invalid offset 0x%x\n", 1342 offset); 1343 break; 1344 } else if (ret_val && 1345 hw->aq.asq_last_status == I40E_AQ_RC_EACCES) { 1346 dev_info(&pf->pdev->dev, 1347 "read NVM failed, access, offset 0x%x\n", 1348 offset); 1349 break; 1350 } else if (ret_val) { 1351 dev_info(&pf->pdev->dev, 1352 "read NVM failed offset %d err=%d status=0x%x\n", 1353 offset, ret_val, hw->aq.asq_last_status); 1354 break; 1355 } 1356 } 1357 1358 i40e_release_nvm(hw); 1359 memcpy(bytes, (u8 *)eeprom_buff, eeprom->len); 1360 free_buff: 1361 kfree(eeprom_buff); 1362 return ret_val; 1363 } 1364 1365 static int i40e_get_eeprom_len(struct net_device *netdev) 1366 { 1367 struct i40e_netdev_priv *np = netdev_priv(netdev); 1368 struct i40e_hw *hw = &np->vsi->back->hw; 1369 u32 val; 1370 1371 #define X722_EEPROM_SCOPE_LIMIT 0x5B9FFF 1372 if (hw->mac.type == I40E_MAC_X722) { 1373 val = X722_EEPROM_SCOPE_LIMIT + 1; 1374 return val; 1375 } 1376 val = (rd32(hw, I40E_GLPCI_LBARCTRL) 1377 & I40E_GLPCI_LBARCTRL_FL_SIZE_MASK) 1378 >> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT; 1379 /* register returns value in power of 2, 64Kbyte chunks. */ 1380 val = (64 * 1024) * BIT(val); 1381 return val; 1382 } 1383 1384 static int i40e_set_eeprom(struct net_device *netdev, 1385 struct ethtool_eeprom *eeprom, u8 *bytes) 1386 { 1387 struct i40e_netdev_priv *np = netdev_priv(netdev); 1388 struct i40e_hw *hw = &np->vsi->back->hw; 1389 struct i40e_pf *pf = np->vsi->back; 1390 struct i40e_nvm_access *cmd = (struct i40e_nvm_access *)eeprom; 1391 int ret_val = 0; 1392 int errno = 0; 1393 u32 magic; 1394 1395 /* normal ethtool set_eeprom is not supported */ 1396 magic = hw->vendor_id | (hw->device_id << 16); 1397 if (eeprom->magic == magic) 1398 errno = -EOPNOTSUPP; 1399 /* check for NVMUpdate access method */ 1400 else if (!eeprom->magic || (eeprom->magic >> 16) != hw->device_id) 1401 errno = -EINVAL; 1402 else if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || 1403 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) 1404 errno = -EBUSY; 1405 else 1406 ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); 1407 1408 if ((errno || ret_val) && (hw->debug_mask & I40E_DEBUG_NVM)) 1409 dev_info(&pf->pdev->dev, 1410 "NVMUpdate write failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n", 1411 ret_val, hw->aq.asq_last_status, errno, 1412 (u8)(cmd->config & I40E_NVM_MOD_PNT_MASK), 1413 cmd->offset, cmd->data_size); 1414 1415 return errno; 1416 } 1417 1418 static void i40e_get_drvinfo(struct net_device *netdev, 1419 struct ethtool_drvinfo *drvinfo) 1420 { 1421 struct i40e_netdev_priv *np = netdev_priv(netdev); 1422 struct i40e_vsi *vsi = np->vsi; 1423 struct i40e_pf *pf = vsi->back; 1424 1425 strlcpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver)); 1426 strlcpy(drvinfo->version, i40e_driver_version_str, 1427 sizeof(drvinfo->version)); 1428 strlcpy(drvinfo->fw_version, i40e_nvm_version_str(&pf->hw), 1429 sizeof(drvinfo->fw_version)); 1430 strlcpy(drvinfo->bus_info, pci_name(pf->pdev), 1431 sizeof(drvinfo->bus_info)); 1432 drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN; 1433 if (pf->hw.pf_id == 0) 1434 drvinfo->n_priv_flags += I40E_GL_PRIV_FLAGS_STR_LEN; 1435 } 1436 1437 static void i40e_get_ringparam(struct net_device *netdev, 1438 struct ethtool_ringparam *ring) 1439 { 1440 struct i40e_netdev_priv *np = netdev_priv(netdev); 1441 struct i40e_pf *pf = np->vsi->back; 1442 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; 1443 1444 ring->rx_max_pending = I40E_MAX_NUM_DESCRIPTORS; 1445 ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS; 1446 ring->rx_mini_max_pending = 0; 1447 ring->rx_jumbo_max_pending = 0; 1448 ring->rx_pending = vsi->rx_rings[0]->count; 1449 ring->tx_pending = vsi->tx_rings[0]->count; 1450 ring->rx_mini_pending = 0; 1451 ring->rx_jumbo_pending = 0; 1452 } 1453 1454 static bool i40e_active_tx_ring_index(struct i40e_vsi *vsi, u16 index) 1455 { 1456 if (i40e_enabled_xdp_vsi(vsi)) { 1457 return index < vsi->num_queue_pairs || 1458 (index >= vsi->alloc_queue_pairs && 1459 index < vsi->alloc_queue_pairs + vsi->num_queue_pairs); 1460 } 1461 1462 return index < vsi->num_queue_pairs; 1463 } 1464 1465 static int i40e_set_ringparam(struct net_device *netdev, 1466 struct ethtool_ringparam *ring) 1467 { 1468 struct i40e_ring *tx_rings = NULL, *rx_rings = NULL; 1469 struct i40e_netdev_priv *np = netdev_priv(netdev); 1470 struct i40e_hw *hw = &np->vsi->back->hw; 1471 struct i40e_vsi *vsi = np->vsi; 1472 struct i40e_pf *pf = vsi->back; 1473 u32 new_rx_count, new_tx_count; 1474 u16 tx_alloc_queue_pairs; 1475 int timeout = 50; 1476 int i, err = 0; 1477 1478 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 1479 return -EINVAL; 1480 1481 if (ring->tx_pending > I40E_MAX_NUM_DESCRIPTORS || 1482 ring->tx_pending < I40E_MIN_NUM_DESCRIPTORS || 1483 ring->rx_pending > I40E_MAX_NUM_DESCRIPTORS || 1484 ring->rx_pending < I40E_MIN_NUM_DESCRIPTORS) { 1485 netdev_info(netdev, 1486 "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n", 1487 ring->tx_pending, ring->rx_pending, 1488 I40E_MIN_NUM_DESCRIPTORS, I40E_MAX_NUM_DESCRIPTORS); 1489 return -EINVAL; 1490 } 1491 1492 new_tx_count = ALIGN(ring->tx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE); 1493 new_rx_count = ALIGN(ring->rx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE); 1494 1495 /* if nothing to do return success */ 1496 if ((new_tx_count == vsi->tx_rings[0]->count) && 1497 (new_rx_count == vsi->rx_rings[0]->count)) 1498 return 0; 1499 1500 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) { 1501 timeout--; 1502 if (!timeout) 1503 return -EBUSY; 1504 usleep_range(1000, 2000); 1505 } 1506 1507 if (!netif_running(vsi->netdev)) { 1508 /* simple case - set for the next time the netdev is started */ 1509 for (i = 0; i < vsi->num_queue_pairs; i++) { 1510 vsi->tx_rings[i]->count = new_tx_count; 1511 vsi->rx_rings[i]->count = new_rx_count; 1512 if (i40e_enabled_xdp_vsi(vsi)) 1513 vsi->xdp_rings[i]->count = new_tx_count; 1514 } 1515 goto done; 1516 } 1517 1518 /* We can't just free everything and then setup again, 1519 * because the ISRs in MSI-X mode get passed pointers 1520 * to the Tx and Rx ring structs. 1521 */ 1522 1523 /* alloc updated Tx and XDP Tx resources */ 1524 tx_alloc_queue_pairs = vsi->alloc_queue_pairs * 1525 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1); 1526 if (new_tx_count != vsi->tx_rings[0]->count) { 1527 netdev_info(netdev, 1528 "Changing Tx descriptor count from %d to %d.\n", 1529 vsi->tx_rings[0]->count, new_tx_count); 1530 tx_rings = kcalloc(tx_alloc_queue_pairs, 1531 sizeof(struct i40e_ring), GFP_KERNEL); 1532 if (!tx_rings) { 1533 err = -ENOMEM; 1534 goto done; 1535 } 1536 1537 for (i = 0; i < tx_alloc_queue_pairs; i++) { 1538 if (!i40e_active_tx_ring_index(vsi, i)) 1539 continue; 1540 1541 tx_rings[i] = *vsi->tx_rings[i]; 1542 tx_rings[i].count = new_tx_count; 1543 /* the desc and bi pointers will be reallocated in the 1544 * setup call 1545 */ 1546 tx_rings[i].desc = NULL; 1547 tx_rings[i].rx_bi = NULL; 1548 err = i40e_setup_tx_descriptors(&tx_rings[i]); 1549 if (err) { 1550 while (i) { 1551 i--; 1552 if (!i40e_active_tx_ring_index(vsi, i)) 1553 continue; 1554 i40e_free_tx_resources(&tx_rings[i]); 1555 } 1556 kfree(tx_rings); 1557 tx_rings = NULL; 1558 1559 goto done; 1560 } 1561 } 1562 } 1563 1564 /* alloc updated Rx resources */ 1565 if (new_rx_count != vsi->rx_rings[0]->count) { 1566 netdev_info(netdev, 1567 "Changing Rx descriptor count from %d to %d\n", 1568 vsi->rx_rings[0]->count, new_rx_count); 1569 rx_rings = kcalloc(vsi->alloc_queue_pairs, 1570 sizeof(struct i40e_ring), GFP_KERNEL); 1571 if (!rx_rings) { 1572 err = -ENOMEM; 1573 goto free_tx; 1574 } 1575 1576 for (i = 0; i < vsi->num_queue_pairs; i++) { 1577 struct i40e_ring *ring; 1578 u16 unused; 1579 1580 /* clone ring and setup updated count */ 1581 rx_rings[i] = *vsi->rx_rings[i]; 1582 rx_rings[i].count = new_rx_count; 1583 /* the desc and bi pointers will be reallocated in the 1584 * setup call 1585 */ 1586 rx_rings[i].desc = NULL; 1587 rx_rings[i].rx_bi = NULL; 1588 /* Clear cloned XDP RX-queue info before setup call */ 1589 memset(&rx_rings[i].xdp_rxq, 0, sizeof(rx_rings[i].xdp_rxq)); 1590 /* this is to allow wr32 to have something to write to 1591 * during early allocation of Rx buffers 1592 */ 1593 rx_rings[i].tail = hw->hw_addr + I40E_PRTGEN_STATUS; 1594 err = i40e_setup_rx_descriptors(&rx_rings[i]); 1595 if (err) 1596 goto rx_unwind; 1597 1598 /* now allocate the Rx buffers to make sure the OS 1599 * has enough memory, any failure here means abort 1600 */ 1601 ring = &rx_rings[i]; 1602 unused = I40E_DESC_UNUSED(ring); 1603 err = i40e_alloc_rx_buffers(ring, unused); 1604 rx_unwind: 1605 if (err) { 1606 do { 1607 i40e_free_rx_resources(&rx_rings[i]); 1608 } while (i--); 1609 kfree(rx_rings); 1610 rx_rings = NULL; 1611 1612 goto free_tx; 1613 } 1614 } 1615 } 1616 1617 /* Bring interface down, copy in the new ring info, 1618 * then restore the interface 1619 */ 1620 i40e_down(vsi); 1621 1622 if (tx_rings) { 1623 for (i = 0; i < tx_alloc_queue_pairs; i++) { 1624 if (i40e_active_tx_ring_index(vsi, i)) { 1625 i40e_free_tx_resources(vsi->tx_rings[i]); 1626 *vsi->tx_rings[i] = tx_rings[i]; 1627 } 1628 } 1629 kfree(tx_rings); 1630 tx_rings = NULL; 1631 } 1632 1633 if (rx_rings) { 1634 for (i = 0; i < vsi->num_queue_pairs; i++) { 1635 i40e_free_rx_resources(vsi->rx_rings[i]); 1636 /* get the real tail offset */ 1637 rx_rings[i].tail = vsi->rx_rings[i]->tail; 1638 /* this is to fake out the allocation routine 1639 * into thinking it has to realloc everything 1640 * but the recycling logic will let us re-use 1641 * the buffers allocated above 1642 */ 1643 rx_rings[i].next_to_use = 0; 1644 rx_rings[i].next_to_clean = 0; 1645 rx_rings[i].next_to_alloc = 0; 1646 /* do a struct copy */ 1647 *vsi->rx_rings[i] = rx_rings[i]; 1648 } 1649 kfree(rx_rings); 1650 rx_rings = NULL; 1651 } 1652 1653 i40e_up(vsi); 1654 1655 free_tx: 1656 /* error cleanup if the Rx allocations failed after getting Tx */ 1657 if (tx_rings) { 1658 for (i = 0; i < tx_alloc_queue_pairs; i++) { 1659 if (i40e_active_tx_ring_index(vsi, i)) 1660 i40e_free_tx_resources(vsi->tx_rings[i]); 1661 } 1662 kfree(tx_rings); 1663 tx_rings = NULL; 1664 } 1665 1666 done: 1667 clear_bit(__I40E_CONFIG_BUSY, pf->state); 1668 1669 return err; 1670 } 1671 1672 static int i40e_get_sset_count(struct net_device *netdev, int sset) 1673 { 1674 struct i40e_netdev_priv *np = netdev_priv(netdev); 1675 struct i40e_vsi *vsi = np->vsi; 1676 struct i40e_pf *pf = vsi->back; 1677 1678 switch (sset) { 1679 case ETH_SS_TEST: 1680 return I40E_TEST_LEN; 1681 case ETH_SS_STATS: 1682 if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) { 1683 int len = I40E_PF_STATS_LEN(netdev); 1684 1685 if ((pf->lan_veb != I40E_NO_VEB) && 1686 (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) 1687 len += I40E_VEB_STATS_TOTAL; 1688 return len; 1689 } else { 1690 return I40E_VSI_STATS_LEN(netdev); 1691 } 1692 case ETH_SS_PRIV_FLAGS: 1693 return I40E_PRIV_FLAGS_STR_LEN + 1694 (pf->hw.pf_id == 0 ? I40E_GL_PRIV_FLAGS_STR_LEN : 0); 1695 default: 1696 return -EOPNOTSUPP; 1697 } 1698 } 1699 1700 static void i40e_get_ethtool_stats(struct net_device *netdev, 1701 struct ethtool_stats *stats, u64 *data) 1702 { 1703 struct i40e_netdev_priv *np = netdev_priv(netdev); 1704 struct i40e_ring *tx_ring, *rx_ring; 1705 struct i40e_vsi *vsi = np->vsi; 1706 struct i40e_pf *pf = vsi->back; 1707 unsigned int j; 1708 int i = 0; 1709 char *p; 1710 struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi); 1711 unsigned int start; 1712 1713 i40e_update_stats(vsi); 1714 1715 for (j = 0; j < I40E_NETDEV_STATS_LEN; j++) { 1716 p = (char *)net_stats + i40e_gstrings_net_stats[j].stat_offset; 1717 data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat == 1718 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1719 } 1720 for (j = 0; j < I40E_MISC_STATS_LEN; j++) { 1721 p = (char *)vsi + i40e_gstrings_misc_stats[j].stat_offset; 1722 data[i++] = (i40e_gstrings_misc_stats[j].sizeof_stat == 1723 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1724 } 1725 rcu_read_lock(); 1726 for (j = 0; j < vsi->num_queue_pairs; j++) { 1727 tx_ring = READ_ONCE(vsi->tx_rings[j]); 1728 1729 if (!tx_ring) 1730 continue; 1731 1732 /* process Tx ring statistics */ 1733 do { 1734 start = u64_stats_fetch_begin_irq(&tx_ring->syncp); 1735 data[i] = tx_ring->stats.packets; 1736 data[i + 1] = tx_ring->stats.bytes; 1737 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); 1738 i += 2; 1739 1740 /* Rx ring is the 2nd half of the queue pair */ 1741 rx_ring = &tx_ring[1]; 1742 do { 1743 start = u64_stats_fetch_begin_irq(&rx_ring->syncp); 1744 data[i] = rx_ring->stats.packets; 1745 data[i + 1] = rx_ring->stats.bytes; 1746 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); 1747 i += 2; 1748 } 1749 rcu_read_unlock(); 1750 if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) 1751 return; 1752 1753 if ((pf->lan_veb != I40E_NO_VEB) && 1754 (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) { 1755 struct i40e_veb *veb = pf->veb[pf->lan_veb]; 1756 1757 for (j = 0; j < I40E_VEB_STATS_LEN; j++) { 1758 p = (char *)veb; 1759 p += i40e_gstrings_veb_stats[j].stat_offset; 1760 data[i++] = (i40e_gstrings_veb_stats[j].sizeof_stat == 1761 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1762 } 1763 for (j = 0; j < I40E_MAX_TRAFFIC_CLASS; j++) { 1764 data[i++] = veb->tc_stats.tc_tx_packets[j]; 1765 data[i++] = veb->tc_stats.tc_tx_bytes[j]; 1766 data[i++] = veb->tc_stats.tc_rx_packets[j]; 1767 data[i++] = veb->tc_stats.tc_rx_bytes[j]; 1768 } 1769 } 1770 for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) { 1771 p = (char *)pf + i40e_gstrings_stats[j].stat_offset; 1772 data[i++] = (i40e_gstrings_stats[j].sizeof_stat == 1773 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1774 } 1775 for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) { 1776 data[i++] = pf->stats.priority_xon_tx[j]; 1777 data[i++] = pf->stats.priority_xoff_tx[j]; 1778 } 1779 for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) { 1780 data[i++] = pf->stats.priority_xon_rx[j]; 1781 data[i++] = pf->stats.priority_xoff_rx[j]; 1782 } 1783 for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) 1784 data[i++] = pf->stats.priority_xon_2_xoff[j]; 1785 } 1786 1787 static void i40e_get_strings(struct net_device *netdev, u32 stringset, 1788 u8 *data) 1789 { 1790 struct i40e_netdev_priv *np = netdev_priv(netdev); 1791 struct i40e_vsi *vsi = np->vsi; 1792 struct i40e_pf *pf = vsi->back; 1793 char *p = (char *)data; 1794 unsigned int i; 1795 1796 switch (stringset) { 1797 case ETH_SS_TEST: 1798 memcpy(data, i40e_gstrings_test, 1799 I40E_TEST_LEN * ETH_GSTRING_LEN); 1800 break; 1801 case ETH_SS_STATS: 1802 for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) { 1803 snprintf(p, ETH_GSTRING_LEN, "%s", 1804 i40e_gstrings_net_stats[i].stat_string); 1805 p += ETH_GSTRING_LEN; 1806 } 1807 for (i = 0; i < I40E_MISC_STATS_LEN; i++) { 1808 snprintf(p, ETH_GSTRING_LEN, "%s", 1809 i40e_gstrings_misc_stats[i].stat_string); 1810 p += ETH_GSTRING_LEN; 1811 } 1812 for (i = 0; i < vsi->num_queue_pairs; i++) { 1813 snprintf(p, ETH_GSTRING_LEN, "tx-%d.tx_packets", i); 1814 p += ETH_GSTRING_LEN; 1815 snprintf(p, ETH_GSTRING_LEN, "tx-%d.tx_bytes", i); 1816 p += ETH_GSTRING_LEN; 1817 snprintf(p, ETH_GSTRING_LEN, "rx-%d.rx_packets", i); 1818 p += ETH_GSTRING_LEN; 1819 snprintf(p, ETH_GSTRING_LEN, "rx-%d.rx_bytes", i); 1820 p += ETH_GSTRING_LEN; 1821 } 1822 if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) 1823 return; 1824 1825 if ((pf->lan_veb != I40E_NO_VEB) && 1826 (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) { 1827 for (i = 0; i < I40E_VEB_STATS_LEN; i++) { 1828 snprintf(p, ETH_GSTRING_LEN, "veb.%s", 1829 i40e_gstrings_veb_stats[i].stat_string); 1830 p += ETH_GSTRING_LEN; 1831 } 1832 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { 1833 snprintf(p, ETH_GSTRING_LEN, 1834 "veb.tc_%d_tx_packets", i); 1835 p += ETH_GSTRING_LEN; 1836 snprintf(p, ETH_GSTRING_LEN, 1837 "veb.tc_%d_tx_bytes", i); 1838 p += ETH_GSTRING_LEN; 1839 snprintf(p, ETH_GSTRING_LEN, 1840 "veb.tc_%d_rx_packets", i); 1841 p += ETH_GSTRING_LEN; 1842 snprintf(p, ETH_GSTRING_LEN, 1843 "veb.tc_%d_rx_bytes", i); 1844 p += ETH_GSTRING_LEN; 1845 } 1846 } 1847 for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) { 1848 snprintf(p, ETH_GSTRING_LEN, "port.%s", 1849 i40e_gstrings_stats[i].stat_string); 1850 p += ETH_GSTRING_LEN; 1851 } 1852 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 1853 snprintf(p, ETH_GSTRING_LEN, 1854 "port.tx_priority_%d_xon", i); 1855 p += ETH_GSTRING_LEN; 1856 snprintf(p, ETH_GSTRING_LEN, 1857 "port.tx_priority_%d_xoff", i); 1858 p += ETH_GSTRING_LEN; 1859 } 1860 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 1861 snprintf(p, ETH_GSTRING_LEN, 1862 "port.rx_priority_%d_xon", i); 1863 p += ETH_GSTRING_LEN; 1864 snprintf(p, ETH_GSTRING_LEN, 1865 "port.rx_priority_%d_xoff", i); 1866 p += ETH_GSTRING_LEN; 1867 } 1868 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { 1869 snprintf(p, ETH_GSTRING_LEN, 1870 "port.rx_priority_%d_xon_2_xoff", i); 1871 p += ETH_GSTRING_LEN; 1872 } 1873 /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */ 1874 break; 1875 case ETH_SS_PRIV_FLAGS: 1876 for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) { 1877 snprintf(p, ETH_GSTRING_LEN, "%s", 1878 i40e_gstrings_priv_flags[i].flag_string); 1879 p += ETH_GSTRING_LEN; 1880 } 1881 if (pf->hw.pf_id != 0) 1882 break; 1883 for (i = 0; i < I40E_GL_PRIV_FLAGS_STR_LEN; i++) { 1884 snprintf(p, ETH_GSTRING_LEN, "%s", 1885 i40e_gl_gstrings_priv_flags[i].flag_string); 1886 p += ETH_GSTRING_LEN; 1887 } 1888 break; 1889 default: 1890 break; 1891 } 1892 } 1893 1894 static int i40e_get_ts_info(struct net_device *dev, 1895 struct ethtool_ts_info *info) 1896 { 1897 struct i40e_pf *pf = i40e_netdev_to_pf(dev); 1898 1899 /* only report HW timestamping if PTP is enabled */ 1900 if (!(pf->flags & I40E_FLAG_PTP)) 1901 return ethtool_op_get_ts_info(dev, info); 1902 1903 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 1904 SOF_TIMESTAMPING_RX_SOFTWARE | 1905 SOF_TIMESTAMPING_SOFTWARE | 1906 SOF_TIMESTAMPING_TX_HARDWARE | 1907 SOF_TIMESTAMPING_RX_HARDWARE | 1908 SOF_TIMESTAMPING_RAW_HARDWARE; 1909 1910 if (pf->ptp_clock) 1911 info->phc_index = ptp_clock_index(pf->ptp_clock); 1912 else 1913 info->phc_index = -1; 1914 1915 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); 1916 1917 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | 1918 BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | 1919 BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | 1920 BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ); 1921 1922 if (pf->hw_features & I40E_HW_PTP_L4_CAPABLE) 1923 info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | 1924 BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | 1925 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) | 1926 BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | 1927 BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) | 1928 BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | 1929 BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | 1930 BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ); 1931 1932 return 0; 1933 } 1934 1935 static int i40e_link_test(struct net_device *netdev, u64 *data) 1936 { 1937 struct i40e_netdev_priv *np = netdev_priv(netdev); 1938 struct i40e_pf *pf = np->vsi->back; 1939 i40e_status status; 1940 bool link_up = false; 1941 1942 netif_info(pf, hw, netdev, "link test\n"); 1943 status = i40e_get_link_status(&pf->hw, &link_up); 1944 if (status) { 1945 netif_err(pf, drv, netdev, "link query timed out, please retry test\n"); 1946 *data = 1; 1947 return *data; 1948 } 1949 1950 if (link_up) 1951 *data = 0; 1952 else 1953 *data = 1; 1954 1955 return *data; 1956 } 1957 1958 static int i40e_reg_test(struct net_device *netdev, u64 *data) 1959 { 1960 struct i40e_netdev_priv *np = netdev_priv(netdev); 1961 struct i40e_pf *pf = np->vsi->back; 1962 1963 netif_info(pf, hw, netdev, "register test\n"); 1964 *data = i40e_diag_reg_test(&pf->hw); 1965 1966 return *data; 1967 } 1968 1969 static int i40e_eeprom_test(struct net_device *netdev, u64 *data) 1970 { 1971 struct i40e_netdev_priv *np = netdev_priv(netdev); 1972 struct i40e_pf *pf = np->vsi->back; 1973 1974 netif_info(pf, hw, netdev, "eeprom test\n"); 1975 *data = i40e_diag_eeprom_test(&pf->hw); 1976 1977 /* forcebly clear the NVM Update state machine */ 1978 pf->hw.nvmupd_state = I40E_NVMUPD_STATE_INIT; 1979 1980 return *data; 1981 } 1982 1983 static int i40e_intr_test(struct net_device *netdev, u64 *data) 1984 { 1985 struct i40e_netdev_priv *np = netdev_priv(netdev); 1986 struct i40e_pf *pf = np->vsi->back; 1987 u16 swc_old = pf->sw_int_count; 1988 1989 netif_info(pf, hw, netdev, "interrupt test\n"); 1990 wr32(&pf->hw, I40E_PFINT_DYN_CTL0, 1991 (I40E_PFINT_DYN_CTL0_INTENA_MASK | 1992 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK | 1993 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | 1994 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK | 1995 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK)); 1996 usleep_range(1000, 2000); 1997 *data = (swc_old == pf->sw_int_count); 1998 1999 return *data; 2000 } 2001 2002 static inline bool i40e_active_vfs(struct i40e_pf *pf) 2003 { 2004 struct i40e_vf *vfs = pf->vf; 2005 int i; 2006 2007 for (i = 0; i < pf->num_alloc_vfs; i++) 2008 if (test_bit(I40E_VF_STATE_ACTIVE, &vfs[i].vf_states)) 2009 return true; 2010 return false; 2011 } 2012 2013 static inline bool i40e_active_vmdqs(struct i40e_pf *pf) 2014 { 2015 return !!i40e_find_vsi_by_type(pf, I40E_VSI_VMDQ2); 2016 } 2017 2018 static void i40e_diag_test(struct net_device *netdev, 2019 struct ethtool_test *eth_test, u64 *data) 2020 { 2021 struct i40e_netdev_priv *np = netdev_priv(netdev); 2022 bool if_running = netif_running(netdev); 2023 struct i40e_pf *pf = np->vsi->back; 2024 2025 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 2026 /* Offline tests */ 2027 netif_info(pf, drv, netdev, "offline testing starting\n"); 2028 2029 set_bit(__I40E_TESTING, pf->state); 2030 2031 if (i40e_active_vfs(pf) || i40e_active_vmdqs(pf)) { 2032 dev_warn(&pf->pdev->dev, 2033 "Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n"); 2034 data[I40E_ETH_TEST_REG] = 1; 2035 data[I40E_ETH_TEST_EEPROM] = 1; 2036 data[I40E_ETH_TEST_INTR] = 1; 2037 data[I40E_ETH_TEST_LINK] = 1; 2038 eth_test->flags |= ETH_TEST_FL_FAILED; 2039 clear_bit(__I40E_TESTING, pf->state); 2040 goto skip_ol_tests; 2041 } 2042 2043 /* If the device is online then take it offline */ 2044 if (if_running) 2045 /* indicate we're in test mode */ 2046 i40e_close(netdev); 2047 else 2048 /* This reset does not affect link - if it is 2049 * changed to a type of reset that does affect 2050 * link then the following link test would have 2051 * to be moved to before the reset 2052 */ 2053 i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true); 2054 2055 if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK])) 2056 eth_test->flags |= ETH_TEST_FL_FAILED; 2057 2058 if (i40e_eeprom_test(netdev, &data[I40E_ETH_TEST_EEPROM])) 2059 eth_test->flags |= ETH_TEST_FL_FAILED; 2060 2061 if (i40e_intr_test(netdev, &data[I40E_ETH_TEST_INTR])) 2062 eth_test->flags |= ETH_TEST_FL_FAILED; 2063 2064 /* run reg test last, a reset is required after it */ 2065 if (i40e_reg_test(netdev, &data[I40E_ETH_TEST_REG])) 2066 eth_test->flags |= ETH_TEST_FL_FAILED; 2067 2068 clear_bit(__I40E_TESTING, pf->state); 2069 i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true); 2070 2071 if (if_running) 2072 i40e_open(netdev); 2073 } else { 2074 /* Online tests */ 2075 netif_info(pf, drv, netdev, "online testing starting\n"); 2076 2077 if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK])) 2078 eth_test->flags |= ETH_TEST_FL_FAILED; 2079 2080 /* Offline only tests, not run in online; pass by default */ 2081 data[I40E_ETH_TEST_REG] = 0; 2082 data[I40E_ETH_TEST_EEPROM] = 0; 2083 data[I40E_ETH_TEST_INTR] = 0; 2084 } 2085 2086 skip_ol_tests: 2087 2088 netif_info(pf, drv, netdev, "testing finished\n"); 2089 } 2090 2091 static void i40e_get_wol(struct net_device *netdev, 2092 struct ethtool_wolinfo *wol) 2093 { 2094 struct i40e_netdev_priv *np = netdev_priv(netdev); 2095 struct i40e_pf *pf = np->vsi->back; 2096 struct i40e_hw *hw = &pf->hw; 2097 u16 wol_nvm_bits; 2098 2099 /* NVM bit on means WoL disabled for the port */ 2100 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); 2101 if ((BIT(hw->port) & wol_nvm_bits) || (hw->partition_id != 1)) { 2102 wol->supported = 0; 2103 wol->wolopts = 0; 2104 } else { 2105 wol->supported = WAKE_MAGIC; 2106 wol->wolopts = (pf->wol_en ? WAKE_MAGIC : 0); 2107 } 2108 } 2109 2110 /** 2111 * i40e_set_wol - set the WakeOnLAN configuration 2112 * @netdev: the netdev in question 2113 * @wol: the ethtool WoL setting data 2114 **/ 2115 static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 2116 { 2117 struct i40e_netdev_priv *np = netdev_priv(netdev); 2118 struct i40e_pf *pf = np->vsi->back; 2119 struct i40e_vsi *vsi = np->vsi; 2120 struct i40e_hw *hw = &pf->hw; 2121 u16 wol_nvm_bits; 2122 2123 /* WoL not supported if this isn't the controlling PF on the port */ 2124 if (hw->partition_id != 1) { 2125 i40e_partition_setting_complaint(pf); 2126 return -EOPNOTSUPP; 2127 } 2128 2129 if (vsi != pf->vsi[pf->lan_vsi]) 2130 return -EOPNOTSUPP; 2131 2132 /* NVM bit on means WoL disabled for the port */ 2133 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits); 2134 if (BIT(hw->port) & wol_nvm_bits) 2135 return -EOPNOTSUPP; 2136 2137 /* only magic packet is supported */ 2138 if (wol->wolopts && (wol->wolopts != WAKE_MAGIC)) 2139 return -EOPNOTSUPP; 2140 2141 /* is this a new value? */ 2142 if (pf->wol_en != !!wol->wolopts) { 2143 pf->wol_en = !!wol->wolopts; 2144 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en); 2145 } 2146 2147 return 0; 2148 } 2149 2150 static int i40e_set_phys_id(struct net_device *netdev, 2151 enum ethtool_phys_id_state state) 2152 { 2153 struct i40e_netdev_priv *np = netdev_priv(netdev); 2154 i40e_status ret = 0; 2155 struct i40e_pf *pf = np->vsi->back; 2156 struct i40e_hw *hw = &pf->hw; 2157 int blink_freq = 2; 2158 u16 temp_status; 2159 2160 switch (state) { 2161 case ETHTOOL_ID_ACTIVE: 2162 if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) { 2163 pf->led_status = i40e_led_get(hw); 2164 } else { 2165 if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) 2166 i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_ALL, 2167 NULL); 2168 ret = i40e_led_get_phy(hw, &temp_status, 2169 &pf->phy_led_val); 2170 pf->led_status = temp_status; 2171 } 2172 return blink_freq; 2173 case ETHTOOL_ID_ON: 2174 if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) 2175 i40e_led_set(hw, 0xf, false); 2176 else 2177 ret = i40e_led_set_phy(hw, true, pf->led_status, 0); 2178 break; 2179 case ETHTOOL_ID_OFF: 2180 if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) 2181 i40e_led_set(hw, 0x0, false); 2182 else 2183 ret = i40e_led_set_phy(hw, false, pf->led_status, 0); 2184 break; 2185 case ETHTOOL_ID_INACTIVE: 2186 if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) { 2187 i40e_led_set(hw, pf->led_status, false); 2188 } else { 2189 ret = i40e_led_set_phy(hw, false, pf->led_status, 2190 (pf->phy_led_val | 2191 I40E_PHY_LED_MODE_ORIG)); 2192 if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) 2193 i40e_aq_set_phy_debug(hw, 0, NULL); 2194 } 2195 break; 2196 default: 2197 break; 2198 } 2199 if (ret) 2200 return -ENOENT; 2201 else 2202 return 0; 2203 } 2204 2205 /* NOTE: i40e hardware uses a conversion factor of 2 for Interrupt 2206 * Throttle Rate (ITR) ie. ITR(1) = 2us ITR(10) = 20 us, and also 2207 * 125us (8000 interrupts per second) == ITR(62) 2208 */ 2209 2210 /** 2211 * __i40e_get_coalesce - get per-queue coalesce settings 2212 * @netdev: the netdev to check 2213 * @ec: ethtool coalesce data structure 2214 * @queue: which queue to pick 2215 * 2216 * Gets the per-queue settings for coalescence. Specifically Rx and Tx usecs 2217 * are per queue. If queue is <0 then we default to queue 0 as the 2218 * representative value. 2219 **/ 2220 static int __i40e_get_coalesce(struct net_device *netdev, 2221 struct ethtool_coalesce *ec, 2222 int queue) 2223 { 2224 struct i40e_netdev_priv *np = netdev_priv(netdev); 2225 struct i40e_ring *rx_ring, *tx_ring; 2226 struct i40e_vsi *vsi = np->vsi; 2227 2228 ec->tx_max_coalesced_frames_irq = vsi->work_limit; 2229 ec->rx_max_coalesced_frames_irq = vsi->work_limit; 2230 2231 /* rx and tx usecs has per queue value. If user doesn't specify the 2232 * queue, return queue 0's value to represent. 2233 */ 2234 if (queue < 0) 2235 queue = 0; 2236 else if (queue >= vsi->num_queue_pairs) 2237 return -EINVAL; 2238 2239 rx_ring = vsi->rx_rings[queue]; 2240 tx_ring = vsi->tx_rings[queue]; 2241 2242 if (ITR_IS_DYNAMIC(rx_ring->rx_itr_setting)) 2243 ec->use_adaptive_rx_coalesce = 1; 2244 2245 if (ITR_IS_DYNAMIC(tx_ring->tx_itr_setting)) 2246 ec->use_adaptive_tx_coalesce = 1; 2247 2248 ec->rx_coalesce_usecs = rx_ring->rx_itr_setting & ~I40E_ITR_DYNAMIC; 2249 ec->tx_coalesce_usecs = tx_ring->tx_itr_setting & ~I40E_ITR_DYNAMIC; 2250 2251 /* we use the _usecs_high to store/set the interrupt rate limit 2252 * that the hardware supports, that almost but not quite 2253 * fits the original intent of the ethtool variable, 2254 * the rx_coalesce_usecs_high limits total interrupts 2255 * per second from both tx/rx sources. 2256 */ 2257 ec->rx_coalesce_usecs_high = vsi->int_rate_limit; 2258 ec->tx_coalesce_usecs_high = vsi->int_rate_limit; 2259 2260 return 0; 2261 } 2262 2263 /** 2264 * i40e_get_coalesce - get a netdev's coalesce settings 2265 * @netdev: the netdev to check 2266 * @ec: ethtool coalesce data structure 2267 * 2268 * Gets the coalesce settings for a particular netdev. Note that if user has 2269 * modified per-queue settings, this only guarantees to represent queue 0. See 2270 * __i40e_get_coalesce for more details. 2271 **/ 2272 static int i40e_get_coalesce(struct net_device *netdev, 2273 struct ethtool_coalesce *ec) 2274 { 2275 return __i40e_get_coalesce(netdev, ec, -1); 2276 } 2277 2278 /** 2279 * i40e_get_per_queue_coalesce - gets coalesce settings for particular queue 2280 * @netdev: netdev structure 2281 * @ec: ethtool's coalesce settings 2282 * @queue: the particular queue to read 2283 * 2284 * Will read a specific queue's coalesce settings 2285 **/ 2286 static int i40e_get_per_queue_coalesce(struct net_device *netdev, u32 queue, 2287 struct ethtool_coalesce *ec) 2288 { 2289 return __i40e_get_coalesce(netdev, ec, queue); 2290 } 2291 2292 /** 2293 * i40e_set_itr_per_queue - set ITR values for specific queue 2294 * @vsi: the VSI to set values for 2295 * @ec: coalesce settings from ethtool 2296 * @queue: the queue to modify 2297 * 2298 * Change the ITR settings for a specific queue. 2299 **/ 2300 static void i40e_set_itr_per_queue(struct i40e_vsi *vsi, 2301 struct ethtool_coalesce *ec, 2302 int queue) 2303 { 2304 struct i40e_pf *pf = vsi->back; 2305 struct i40e_hw *hw = &pf->hw; 2306 struct i40e_q_vector *q_vector; 2307 u16 vector, intrl; 2308 2309 intrl = i40e_intrl_usec_to_reg(vsi->int_rate_limit); 2310 2311 vsi->rx_rings[queue]->rx_itr_setting = ec->rx_coalesce_usecs; 2312 vsi->tx_rings[queue]->tx_itr_setting = ec->tx_coalesce_usecs; 2313 2314 if (ec->use_adaptive_rx_coalesce) 2315 vsi->rx_rings[queue]->rx_itr_setting |= I40E_ITR_DYNAMIC; 2316 else 2317 vsi->rx_rings[queue]->rx_itr_setting &= ~I40E_ITR_DYNAMIC; 2318 2319 if (ec->use_adaptive_tx_coalesce) 2320 vsi->tx_rings[queue]->tx_itr_setting |= I40E_ITR_DYNAMIC; 2321 else 2322 vsi->tx_rings[queue]->tx_itr_setting &= ~I40E_ITR_DYNAMIC; 2323 2324 q_vector = vsi->rx_rings[queue]->q_vector; 2325 q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[queue]->rx_itr_setting); 2326 vector = vsi->base_vector + q_vector->v_idx; 2327 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), q_vector->rx.itr); 2328 2329 q_vector = vsi->tx_rings[queue]->q_vector; 2330 q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[queue]->tx_itr_setting); 2331 vector = vsi->base_vector + q_vector->v_idx; 2332 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), q_vector->tx.itr); 2333 2334 wr32(hw, I40E_PFINT_RATEN(vector - 1), intrl); 2335 i40e_flush(hw); 2336 } 2337 2338 /** 2339 * __i40e_set_coalesce - set coalesce settings for particular queue 2340 * @netdev: the netdev to change 2341 * @ec: ethtool coalesce settings 2342 * @queue: the queue to change 2343 * 2344 * Sets the coalesce settings for a particular queue. 2345 **/ 2346 static int __i40e_set_coalesce(struct net_device *netdev, 2347 struct ethtool_coalesce *ec, 2348 int queue) 2349 { 2350 struct i40e_netdev_priv *np = netdev_priv(netdev); 2351 u16 intrl_reg, cur_rx_itr, cur_tx_itr; 2352 struct i40e_vsi *vsi = np->vsi; 2353 struct i40e_pf *pf = vsi->back; 2354 int i; 2355 2356 if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq) 2357 vsi->work_limit = ec->tx_max_coalesced_frames_irq; 2358 2359 if (queue < 0) { 2360 cur_rx_itr = vsi->rx_rings[0]->rx_itr_setting; 2361 cur_tx_itr = vsi->tx_rings[0]->tx_itr_setting; 2362 } else if (queue < vsi->num_queue_pairs) { 2363 cur_rx_itr = vsi->rx_rings[queue]->rx_itr_setting; 2364 cur_tx_itr = vsi->tx_rings[queue]->tx_itr_setting; 2365 } else { 2366 netif_info(pf, drv, netdev, "Invalid queue value, queue range is 0 - %d\n", 2367 vsi->num_queue_pairs - 1); 2368 return -EINVAL; 2369 } 2370 2371 cur_tx_itr &= ~I40E_ITR_DYNAMIC; 2372 cur_rx_itr &= ~I40E_ITR_DYNAMIC; 2373 2374 /* tx_coalesce_usecs_high is ignored, use rx-usecs-high instead */ 2375 if (ec->tx_coalesce_usecs_high != vsi->int_rate_limit) { 2376 netif_info(pf, drv, netdev, "tx-usecs-high is not used, please program rx-usecs-high\n"); 2377 return -EINVAL; 2378 } 2379 2380 if (ec->rx_coalesce_usecs_high > INTRL_REG_TO_USEC(I40E_MAX_INTRL)) { 2381 netif_info(pf, drv, netdev, "Invalid value, rx-usecs-high range is 0-%lu\n", 2382 INTRL_REG_TO_USEC(I40E_MAX_INTRL)); 2383 return -EINVAL; 2384 } 2385 2386 if (ec->rx_coalesce_usecs != cur_rx_itr && 2387 ec->use_adaptive_rx_coalesce) { 2388 netif_info(pf, drv, netdev, "RX interrupt moderation cannot be changed if adaptive-rx is enabled.\n"); 2389 return -EINVAL; 2390 } 2391 2392 if (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1)) { 2393 netif_info(pf, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n"); 2394 return -EINVAL; 2395 } 2396 2397 if (ec->tx_coalesce_usecs != cur_tx_itr && 2398 ec->use_adaptive_tx_coalesce) { 2399 netif_info(pf, drv, netdev, "TX interrupt moderation cannot be changed if adaptive-tx is enabled.\n"); 2400 return -EINVAL; 2401 } 2402 2403 if (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1)) { 2404 netif_info(pf, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n"); 2405 return -EINVAL; 2406 } 2407 2408 if (ec->use_adaptive_rx_coalesce && !cur_rx_itr) 2409 ec->rx_coalesce_usecs = I40E_MIN_ITR << 1; 2410 2411 if (ec->use_adaptive_tx_coalesce && !cur_tx_itr) 2412 ec->tx_coalesce_usecs = I40E_MIN_ITR << 1; 2413 2414 intrl_reg = i40e_intrl_usec_to_reg(ec->rx_coalesce_usecs_high); 2415 vsi->int_rate_limit = INTRL_REG_TO_USEC(intrl_reg); 2416 if (vsi->int_rate_limit != ec->rx_coalesce_usecs_high) { 2417 netif_info(pf, drv, netdev, "Interrupt rate limit rounded down to %d\n", 2418 vsi->int_rate_limit); 2419 } 2420 2421 /* rx and tx usecs has per queue value. If user doesn't specify the 2422 * queue, apply to all queues. 2423 */ 2424 if (queue < 0) { 2425 for (i = 0; i < vsi->num_queue_pairs; i++) 2426 i40e_set_itr_per_queue(vsi, ec, i); 2427 } else { 2428 i40e_set_itr_per_queue(vsi, ec, queue); 2429 } 2430 2431 return 0; 2432 } 2433 2434 /** 2435 * i40e_set_coalesce - set coalesce settings for every queue on the netdev 2436 * @netdev: the netdev to change 2437 * @ec: ethtool coalesce settings 2438 * 2439 * This will set each queue to the same coalesce settings. 2440 **/ 2441 static int i40e_set_coalesce(struct net_device *netdev, 2442 struct ethtool_coalesce *ec) 2443 { 2444 return __i40e_set_coalesce(netdev, ec, -1); 2445 } 2446 2447 /** 2448 * i40e_set_per_queue_coalesce - set specific queue's coalesce settings 2449 * @netdev: the netdev to change 2450 * @ec: ethtool's coalesce settings 2451 * @queue: the queue to change 2452 * 2453 * Sets the specified queue's coalesce settings. 2454 **/ 2455 static int i40e_set_per_queue_coalesce(struct net_device *netdev, u32 queue, 2456 struct ethtool_coalesce *ec) 2457 { 2458 return __i40e_set_coalesce(netdev, ec, queue); 2459 } 2460 2461 /** 2462 * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type 2463 * @pf: pointer to the physical function struct 2464 * @cmd: ethtool rxnfc command 2465 * 2466 * Returns Success if the flow is supported, else Invalid Input. 2467 **/ 2468 static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd) 2469 { 2470 struct i40e_hw *hw = &pf->hw; 2471 u8 flow_pctype = 0; 2472 u64 i_set = 0; 2473 2474 cmd->data = 0; 2475 2476 switch (cmd->flow_type) { 2477 case TCP_V4_FLOW: 2478 flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP; 2479 break; 2480 case UDP_V4_FLOW: 2481 flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; 2482 break; 2483 case TCP_V6_FLOW: 2484 flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_TCP; 2485 break; 2486 case UDP_V6_FLOW: 2487 flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_UDP; 2488 break; 2489 case SCTP_V4_FLOW: 2490 case AH_ESP_V4_FLOW: 2491 case AH_V4_FLOW: 2492 case ESP_V4_FLOW: 2493 case IPV4_FLOW: 2494 case SCTP_V6_FLOW: 2495 case AH_ESP_V6_FLOW: 2496 case AH_V6_FLOW: 2497 case ESP_V6_FLOW: 2498 case IPV6_FLOW: 2499 /* Default is src/dest for IP, no matter the L4 hashing */ 2500 cmd->data |= RXH_IP_SRC | RXH_IP_DST; 2501 break; 2502 default: 2503 return -EINVAL; 2504 } 2505 2506 /* Read flow based hash input set register */ 2507 if (flow_pctype) { 2508 i_set = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, 2509 flow_pctype)) | 2510 ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, 2511 flow_pctype)) << 32); 2512 } 2513 2514 /* Process bits of hash input set */ 2515 if (i_set) { 2516 if (i_set & I40E_L4_SRC_MASK) 2517 cmd->data |= RXH_L4_B_0_1; 2518 if (i_set & I40E_L4_DST_MASK) 2519 cmd->data |= RXH_L4_B_2_3; 2520 2521 if (cmd->flow_type == TCP_V4_FLOW || 2522 cmd->flow_type == UDP_V4_FLOW) { 2523 if (i_set & I40E_L3_SRC_MASK) 2524 cmd->data |= RXH_IP_SRC; 2525 if (i_set & I40E_L3_DST_MASK) 2526 cmd->data |= RXH_IP_DST; 2527 } else if (cmd->flow_type == TCP_V6_FLOW || 2528 cmd->flow_type == UDP_V6_FLOW) { 2529 if (i_set & I40E_L3_V6_SRC_MASK) 2530 cmd->data |= RXH_IP_SRC; 2531 if (i_set & I40E_L3_V6_DST_MASK) 2532 cmd->data |= RXH_IP_DST; 2533 } 2534 } 2535 2536 return 0; 2537 } 2538 2539 /** 2540 * i40e_check_mask - Check whether a mask field is set 2541 * @mask: the full mask value 2542 * @field; mask of the field to check 2543 * 2544 * If the given mask is fully set, return positive value. If the mask for the 2545 * field is fully unset, return zero. Otherwise return a negative error code. 2546 **/ 2547 static int i40e_check_mask(u64 mask, u64 field) 2548 { 2549 u64 value = mask & field; 2550 2551 if (value == field) 2552 return 1; 2553 else if (!value) 2554 return 0; 2555 else 2556 return -1; 2557 } 2558 2559 /** 2560 * i40e_parse_rx_flow_user_data - Deconstruct user-defined data 2561 * @fsp: pointer to rx flow specification 2562 * @data: pointer to userdef data structure for storage 2563 * 2564 * Read the user-defined data and deconstruct the value into a structure. No 2565 * other code should read the user-defined data, so as to ensure that every 2566 * place consistently reads the value correctly. 2567 * 2568 * The user-defined field is a 64bit Big Endian format value, which we 2569 * deconstruct by reading bits or bit fields from it. Single bit flags shall 2570 * be defined starting from the highest bits, while small bit field values 2571 * shall be defined starting from the lowest bits. 2572 * 2573 * Returns 0 if the data is valid, and non-zero if the userdef data is invalid 2574 * and the filter should be rejected. The data structure will always be 2575 * modified even if FLOW_EXT is not set. 2576 * 2577 **/ 2578 static int i40e_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp, 2579 struct i40e_rx_flow_userdef *data) 2580 { 2581 u64 value, mask; 2582 int valid; 2583 2584 /* Zero memory first so it's always consistent. */ 2585 memset(data, 0, sizeof(*data)); 2586 2587 if (!(fsp->flow_type & FLOW_EXT)) 2588 return 0; 2589 2590 value = be64_to_cpu(*((__be64 *)fsp->h_ext.data)); 2591 mask = be64_to_cpu(*((__be64 *)fsp->m_ext.data)); 2592 2593 #define I40E_USERDEF_FLEX_WORD GENMASK_ULL(15, 0) 2594 #define I40E_USERDEF_FLEX_OFFSET GENMASK_ULL(31, 16) 2595 #define I40E_USERDEF_FLEX_FILTER GENMASK_ULL(31, 0) 2596 2597 valid = i40e_check_mask(mask, I40E_USERDEF_FLEX_FILTER); 2598 if (valid < 0) { 2599 return -EINVAL; 2600 } else if (valid) { 2601 data->flex_word = value & I40E_USERDEF_FLEX_WORD; 2602 data->flex_offset = 2603 (value & I40E_USERDEF_FLEX_OFFSET) >> 16; 2604 data->flex_filter = true; 2605 } 2606 2607 return 0; 2608 } 2609 2610 /** 2611 * i40e_fill_rx_flow_user_data - Fill in user-defined data field 2612 * @fsp: pointer to rx_flow specification 2613 * 2614 * Reads the userdef data structure and properly fills in the user defined 2615 * fields of the rx_flow_spec. 2616 **/ 2617 static void i40e_fill_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp, 2618 struct i40e_rx_flow_userdef *data) 2619 { 2620 u64 value = 0, mask = 0; 2621 2622 if (data->flex_filter) { 2623 value |= data->flex_word; 2624 value |= (u64)data->flex_offset << 16; 2625 mask |= I40E_USERDEF_FLEX_FILTER; 2626 } 2627 2628 if (value || mask) 2629 fsp->flow_type |= FLOW_EXT; 2630 2631 *((__be64 *)fsp->h_ext.data) = cpu_to_be64(value); 2632 *((__be64 *)fsp->m_ext.data) = cpu_to_be64(mask); 2633 } 2634 2635 /** 2636 * i40e_get_ethtool_fdir_all - Populates the rule count of a command 2637 * @pf: Pointer to the physical function struct 2638 * @cmd: The command to get or set Rx flow classification rules 2639 * @rule_locs: Array of used rule locations 2640 * 2641 * This function populates both the total and actual rule count of 2642 * the ethtool flow classification command 2643 * 2644 * Returns 0 on success or -EMSGSIZE if entry not found 2645 **/ 2646 static int i40e_get_ethtool_fdir_all(struct i40e_pf *pf, 2647 struct ethtool_rxnfc *cmd, 2648 u32 *rule_locs) 2649 { 2650 struct i40e_fdir_filter *rule; 2651 struct hlist_node *node2; 2652 int cnt = 0; 2653 2654 /* report total rule count */ 2655 cmd->data = i40e_get_fd_cnt_all(pf); 2656 2657 hlist_for_each_entry_safe(rule, node2, 2658 &pf->fdir_filter_list, fdir_node) { 2659 if (cnt == cmd->rule_cnt) 2660 return -EMSGSIZE; 2661 2662 rule_locs[cnt] = rule->fd_id; 2663 cnt++; 2664 } 2665 2666 cmd->rule_cnt = cnt; 2667 2668 return 0; 2669 } 2670 2671 /** 2672 * i40e_get_ethtool_fdir_entry - Look up a filter based on Rx flow 2673 * @pf: Pointer to the physical function struct 2674 * @cmd: The command to get or set Rx flow classification rules 2675 * 2676 * This function looks up a filter based on the Rx flow classification 2677 * command and fills the flow spec info for it if found 2678 * 2679 * Returns 0 on success or -EINVAL if filter not found 2680 **/ 2681 static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf, 2682 struct ethtool_rxnfc *cmd) 2683 { 2684 struct ethtool_rx_flow_spec *fsp = 2685 (struct ethtool_rx_flow_spec *)&cmd->fs; 2686 struct i40e_rx_flow_userdef userdef = {0}; 2687 struct i40e_fdir_filter *rule = NULL; 2688 struct hlist_node *node2; 2689 u64 input_set; 2690 u16 index; 2691 2692 hlist_for_each_entry_safe(rule, node2, 2693 &pf->fdir_filter_list, fdir_node) { 2694 if (fsp->location <= rule->fd_id) 2695 break; 2696 } 2697 2698 if (!rule || fsp->location != rule->fd_id) 2699 return -EINVAL; 2700 2701 fsp->flow_type = rule->flow_type; 2702 if (fsp->flow_type == IP_USER_FLOW) { 2703 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; 2704 fsp->h_u.usr_ip4_spec.proto = 0; 2705 fsp->m_u.usr_ip4_spec.proto = 0; 2706 } 2707 2708 /* Reverse the src and dest notion, since the HW views them from 2709 * Tx perspective where as the user expects it from Rx filter view. 2710 */ 2711 fsp->h_u.tcp_ip4_spec.psrc = rule->dst_port; 2712 fsp->h_u.tcp_ip4_spec.pdst = rule->src_port; 2713 fsp->h_u.tcp_ip4_spec.ip4src = rule->dst_ip; 2714 fsp->h_u.tcp_ip4_spec.ip4dst = rule->src_ip; 2715 2716 switch (rule->flow_type) { 2717 case SCTP_V4_FLOW: 2718 index = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP; 2719 break; 2720 case TCP_V4_FLOW: 2721 index = I40E_FILTER_PCTYPE_NONF_IPV4_TCP; 2722 break; 2723 case UDP_V4_FLOW: 2724 index = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; 2725 break; 2726 case IP_USER_FLOW: 2727 index = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER; 2728 break; 2729 default: 2730 /* If we have stored a filter with a flow type not listed here 2731 * it is almost certainly a driver bug. WARN(), and then 2732 * assign the input_set as if all fields are enabled to avoid 2733 * reading unassigned memory. 2734 */ 2735 WARN(1, "Missing input set index for flow_type %d\n", 2736 rule->flow_type); 2737 input_set = 0xFFFFFFFFFFFFFFFFULL; 2738 goto no_input_set; 2739 } 2740 2741 input_set = i40e_read_fd_input_set(pf, index); 2742 2743 no_input_set: 2744 if (input_set & I40E_L3_SRC_MASK) 2745 fsp->m_u.tcp_ip4_spec.ip4src = htonl(0xFFFF); 2746 2747 if (input_set & I40E_L3_DST_MASK) 2748 fsp->m_u.tcp_ip4_spec.ip4dst = htonl(0xFFFF); 2749 2750 if (input_set & I40E_L4_SRC_MASK) 2751 fsp->m_u.tcp_ip4_spec.psrc = htons(0xFFFFFFFF); 2752 2753 if (input_set & I40E_L4_DST_MASK) 2754 fsp->m_u.tcp_ip4_spec.pdst = htons(0xFFFFFFFF); 2755 2756 if (rule->dest_ctl == I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET) 2757 fsp->ring_cookie = RX_CLS_FLOW_DISC; 2758 else 2759 fsp->ring_cookie = rule->q_index; 2760 2761 if (rule->dest_vsi != pf->vsi[pf->lan_vsi]->id) { 2762 struct i40e_vsi *vsi; 2763 2764 vsi = i40e_find_vsi_from_id(pf, rule->dest_vsi); 2765 if (vsi && vsi->type == I40E_VSI_SRIOV) { 2766 /* VFs are zero-indexed by the driver, but ethtool 2767 * expects them to be one-indexed, so add one here 2768 */ 2769 u64 ring_vf = vsi->vf_id + 1; 2770 2771 ring_vf <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; 2772 fsp->ring_cookie |= ring_vf; 2773 } 2774 } 2775 2776 if (rule->flex_filter) { 2777 userdef.flex_filter = true; 2778 userdef.flex_word = be16_to_cpu(rule->flex_word); 2779 userdef.flex_offset = rule->flex_offset; 2780 } 2781 2782 i40e_fill_rx_flow_user_data(fsp, &userdef); 2783 2784 return 0; 2785 } 2786 2787 /** 2788 * i40e_get_rxnfc - command to get RX flow classification rules 2789 * @netdev: network interface device structure 2790 * @cmd: ethtool rxnfc command 2791 * 2792 * Returns Success if the command is supported. 2793 **/ 2794 static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, 2795 u32 *rule_locs) 2796 { 2797 struct i40e_netdev_priv *np = netdev_priv(netdev); 2798 struct i40e_vsi *vsi = np->vsi; 2799 struct i40e_pf *pf = vsi->back; 2800 int ret = -EOPNOTSUPP; 2801 2802 switch (cmd->cmd) { 2803 case ETHTOOL_GRXRINGS: 2804 cmd->data = vsi->rss_size; 2805 ret = 0; 2806 break; 2807 case ETHTOOL_GRXFH: 2808 ret = i40e_get_rss_hash_opts(pf, cmd); 2809 break; 2810 case ETHTOOL_GRXCLSRLCNT: 2811 cmd->rule_cnt = pf->fdir_pf_active_filters; 2812 /* report total rule count */ 2813 cmd->data = i40e_get_fd_cnt_all(pf); 2814 ret = 0; 2815 break; 2816 case ETHTOOL_GRXCLSRULE: 2817 ret = i40e_get_ethtool_fdir_entry(pf, cmd); 2818 break; 2819 case ETHTOOL_GRXCLSRLALL: 2820 ret = i40e_get_ethtool_fdir_all(pf, cmd, rule_locs); 2821 break; 2822 default: 2823 break; 2824 } 2825 2826 return ret; 2827 } 2828 2829 /** 2830 * i40e_get_rss_hash_bits - Read RSS Hash bits from register 2831 * @nfc: pointer to user request 2832 * @i_setc bits currently set 2833 * 2834 * Returns value of bits to be set per user request 2835 **/ 2836 static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc) 2837 { 2838 u64 i_set = i_setc; 2839 u64 src_l3 = 0, dst_l3 = 0; 2840 2841 if (nfc->data & RXH_L4_B_0_1) 2842 i_set |= I40E_L4_SRC_MASK; 2843 else 2844 i_set &= ~I40E_L4_SRC_MASK; 2845 if (nfc->data & RXH_L4_B_2_3) 2846 i_set |= I40E_L4_DST_MASK; 2847 else 2848 i_set &= ~I40E_L4_DST_MASK; 2849 2850 if (nfc->flow_type == TCP_V6_FLOW || nfc->flow_type == UDP_V6_FLOW) { 2851 src_l3 = I40E_L3_V6_SRC_MASK; 2852 dst_l3 = I40E_L3_V6_DST_MASK; 2853 } else if (nfc->flow_type == TCP_V4_FLOW || 2854 nfc->flow_type == UDP_V4_FLOW) { 2855 src_l3 = I40E_L3_SRC_MASK; 2856 dst_l3 = I40E_L3_DST_MASK; 2857 } else { 2858 /* Any other flow type are not supported here */ 2859 return i_set; 2860 } 2861 2862 if (nfc->data & RXH_IP_SRC) 2863 i_set |= src_l3; 2864 else 2865 i_set &= ~src_l3; 2866 if (nfc->data & RXH_IP_DST) 2867 i_set |= dst_l3; 2868 else 2869 i_set &= ~dst_l3; 2870 2871 return i_set; 2872 } 2873 2874 /** 2875 * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash 2876 * @pf: pointer to the physical function struct 2877 * @cmd: ethtool rxnfc command 2878 * 2879 * Returns Success if the flow input set is supported. 2880 **/ 2881 static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) 2882 { 2883 struct i40e_hw *hw = &pf->hw; 2884 u64 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | 2885 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); 2886 u8 flow_pctype = 0; 2887 u64 i_set, i_setc; 2888 2889 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 2890 dev_err(&pf->pdev->dev, 2891 "Change of RSS hash input set is not supported when MFP mode is enabled\n"); 2892 return -EOPNOTSUPP; 2893 } 2894 2895 /* RSS does not support anything other than hashing 2896 * to queues on src and dst IPs and ports 2897 */ 2898 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | 2899 RXH_L4_B_0_1 | RXH_L4_B_2_3)) 2900 return -EINVAL; 2901 2902 switch (nfc->flow_type) { 2903 case TCP_V4_FLOW: 2904 flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP; 2905 if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) 2906 hena |= 2907 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK); 2908 break; 2909 case TCP_V6_FLOW: 2910 flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_TCP; 2911 if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) 2912 hena |= 2913 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK); 2914 if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) 2915 hena |= 2916 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK); 2917 break; 2918 case UDP_V4_FLOW: 2919 flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; 2920 if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) 2921 hena |= 2922 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | 2923 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP); 2924 2925 hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4); 2926 break; 2927 case UDP_V6_FLOW: 2928 flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_UDP; 2929 if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) 2930 hena |= 2931 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | 2932 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP); 2933 2934 hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6); 2935 break; 2936 case AH_ESP_V4_FLOW: 2937 case AH_V4_FLOW: 2938 case ESP_V4_FLOW: 2939 case SCTP_V4_FLOW: 2940 if ((nfc->data & RXH_L4_B_0_1) || 2941 (nfc->data & RXH_L4_B_2_3)) 2942 return -EINVAL; 2943 hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); 2944 break; 2945 case AH_ESP_V6_FLOW: 2946 case AH_V6_FLOW: 2947 case ESP_V6_FLOW: 2948 case SCTP_V6_FLOW: 2949 if ((nfc->data & RXH_L4_B_0_1) || 2950 (nfc->data & RXH_L4_B_2_3)) 2951 return -EINVAL; 2952 hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); 2953 break; 2954 case IPV4_FLOW: 2955 hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | 2956 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4); 2957 break; 2958 case IPV6_FLOW: 2959 hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | 2960 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6); 2961 break; 2962 default: 2963 return -EINVAL; 2964 } 2965 2966 if (flow_pctype) { 2967 i_setc = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, 2968 flow_pctype)) | 2969 ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, 2970 flow_pctype)) << 32); 2971 i_set = i40e_get_rss_hash_bits(nfc, i_setc); 2972 i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_pctype), 2973 (u32)i_set); 2974 i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_pctype), 2975 (u32)(i_set >> 32)); 2976 hena |= BIT_ULL(flow_pctype); 2977 } 2978 2979 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena); 2980 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); 2981 i40e_flush(hw); 2982 2983 return 0; 2984 } 2985 2986 /** 2987 * i40e_update_ethtool_fdir_entry - Updates the fdir filter entry 2988 * @vsi: Pointer to the targeted VSI 2989 * @input: The filter to update or NULL to indicate deletion 2990 * @sw_idx: Software index to the filter 2991 * @cmd: The command to get or set Rx flow classification rules 2992 * 2993 * This function updates (or deletes) a Flow Director entry from 2994 * the hlist of the corresponding PF 2995 * 2996 * Returns 0 on success 2997 **/ 2998 static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi, 2999 struct i40e_fdir_filter *input, 3000 u16 sw_idx, 3001 struct ethtool_rxnfc *cmd) 3002 { 3003 struct i40e_fdir_filter *rule, *parent; 3004 struct i40e_pf *pf = vsi->back; 3005 struct hlist_node *node2; 3006 int err = -EINVAL; 3007 3008 parent = NULL; 3009 rule = NULL; 3010 3011 hlist_for_each_entry_safe(rule, node2, 3012 &pf->fdir_filter_list, fdir_node) { 3013 /* hash found, or no matching entry */ 3014 if (rule->fd_id >= sw_idx) 3015 break; 3016 parent = rule; 3017 } 3018 3019 /* if there is an old rule occupying our place remove it */ 3020 if (rule && (rule->fd_id == sw_idx)) { 3021 /* Remove this rule, since we're either deleting it, or 3022 * replacing it. 3023 */ 3024 err = i40e_add_del_fdir(vsi, rule, false); 3025 hlist_del(&rule->fdir_node); 3026 kfree(rule); 3027 pf->fdir_pf_active_filters--; 3028 } 3029 3030 /* If we weren't given an input, this is a delete, so just return the 3031 * error code indicating if there was an entry at the requested slot 3032 */ 3033 if (!input) 3034 return err; 3035 3036 /* Otherwise, install the new rule as requested */ 3037 INIT_HLIST_NODE(&input->fdir_node); 3038 3039 /* add filter to the list */ 3040 if (parent) 3041 hlist_add_behind(&input->fdir_node, &parent->fdir_node); 3042 else 3043 hlist_add_head(&input->fdir_node, 3044 &pf->fdir_filter_list); 3045 3046 /* update counts */ 3047 pf->fdir_pf_active_filters++; 3048 3049 return 0; 3050 } 3051 3052 /** 3053 * i40e_prune_flex_pit_list - Cleanup unused entries in FLX_PIT table 3054 * @pf: pointer to PF structure 3055 * 3056 * This function searches the list of filters and determines which FLX_PIT 3057 * entries are still required. It will prune any entries which are no longer 3058 * in use after the deletion. 3059 **/ 3060 static void i40e_prune_flex_pit_list(struct i40e_pf *pf) 3061 { 3062 struct i40e_flex_pit *entry, *tmp; 3063 struct i40e_fdir_filter *rule; 3064 3065 /* First, we'll check the l3 table */ 3066 list_for_each_entry_safe(entry, tmp, &pf->l3_flex_pit_list, list) { 3067 bool found = false; 3068 3069 hlist_for_each_entry(rule, &pf->fdir_filter_list, fdir_node) { 3070 if (rule->flow_type != IP_USER_FLOW) 3071 continue; 3072 if (rule->flex_filter && 3073 rule->flex_offset == entry->src_offset) { 3074 found = true; 3075 break; 3076 } 3077 } 3078 3079 /* If we didn't find the filter, then we can prune this entry 3080 * from the list. 3081 */ 3082 if (!found) { 3083 list_del(&entry->list); 3084 kfree(entry); 3085 } 3086 } 3087 3088 /* Followed by the L4 table */ 3089 list_for_each_entry_safe(entry, tmp, &pf->l4_flex_pit_list, list) { 3090 bool found = false; 3091 3092 hlist_for_each_entry(rule, &pf->fdir_filter_list, fdir_node) { 3093 /* Skip this filter if it's L3, since we already 3094 * checked those in the above loop 3095 */ 3096 if (rule->flow_type == IP_USER_FLOW) 3097 continue; 3098 if (rule->flex_filter && 3099 rule->flex_offset == entry->src_offset) { 3100 found = true; 3101 break; 3102 } 3103 } 3104 3105 /* If we didn't find the filter, then we can prune this entry 3106 * from the list. 3107 */ 3108 if (!found) { 3109 list_del(&entry->list); 3110 kfree(entry); 3111 } 3112 } 3113 } 3114 3115 /** 3116 * i40e_del_fdir_entry - Deletes a Flow Director filter entry 3117 * @vsi: Pointer to the targeted VSI 3118 * @cmd: The command to get or set Rx flow classification rules 3119 * 3120 * The function removes a Flow Director filter entry from the 3121 * hlist of the corresponding PF 3122 * 3123 * Returns 0 on success 3124 */ 3125 static int i40e_del_fdir_entry(struct i40e_vsi *vsi, 3126 struct ethtool_rxnfc *cmd) 3127 { 3128 struct ethtool_rx_flow_spec *fsp = 3129 (struct ethtool_rx_flow_spec *)&cmd->fs; 3130 struct i40e_pf *pf = vsi->back; 3131 int ret = 0; 3132 3133 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || 3134 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) 3135 return -EBUSY; 3136 3137 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) 3138 return -EBUSY; 3139 3140 ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd); 3141 3142 i40e_prune_flex_pit_list(pf); 3143 3144 i40e_fdir_check_and_reenable(pf); 3145 return ret; 3146 } 3147 3148 /** 3149 * i40e_unused_pit_index - Find an unused PIT index for given list 3150 * @pf: the PF data structure 3151 * 3152 * Find the first unused flexible PIT index entry. We search both the L3 and 3153 * L4 flexible PIT lists so that the returned index is unique and unused by 3154 * either currently programmed L3 or L4 filters. We use a bit field as storage 3155 * to track which indexes are already used. 3156 **/ 3157 static u8 i40e_unused_pit_index(struct i40e_pf *pf) 3158 { 3159 unsigned long available_index = 0xFF; 3160 struct i40e_flex_pit *entry; 3161 3162 /* We need to make sure that the new index isn't in use by either L3 3163 * or L4 filters so that IP_USER_FLOW filters can program both L3 and 3164 * L4 to use the same index. 3165 */ 3166 3167 list_for_each_entry(entry, &pf->l4_flex_pit_list, list) 3168 clear_bit(entry->pit_index, &available_index); 3169 3170 list_for_each_entry(entry, &pf->l3_flex_pit_list, list) 3171 clear_bit(entry->pit_index, &available_index); 3172 3173 return find_first_bit(&available_index, 8); 3174 } 3175 3176 /** 3177 * i40e_find_flex_offset - Find an existing flex src_offset 3178 * @flex_pit_list: L3 or L4 flex PIT list 3179 * @src_offset: new src_offset to find 3180 * 3181 * Searches the flex_pit_list for an existing offset. If no offset is 3182 * currently programmed, then this will return an ERR_PTR if there is no space 3183 * to add a new offset, otherwise it returns NULL. 3184 **/ 3185 static 3186 struct i40e_flex_pit *i40e_find_flex_offset(struct list_head *flex_pit_list, 3187 u16 src_offset) 3188 { 3189 struct i40e_flex_pit *entry; 3190 int size = 0; 3191 3192 /* Search for the src_offset first. If we find a matching entry 3193 * already programmed, we can simply re-use it. 3194 */ 3195 list_for_each_entry(entry, flex_pit_list, list) { 3196 size++; 3197 if (entry->src_offset == src_offset) 3198 return entry; 3199 } 3200 3201 /* If we haven't found an entry yet, then the provided src offset has 3202 * not yet been programmed. We will program the src offset later on, 3203 * but we need to indicate whether there is enough space to do so 3204 * here. We'll make use of ERR_PTR for this purpose. 3205 */ 3206 if (size >= I40E_FLEX_PIT_TABLE_SIZE) 3207 return ERR_PTR(-ENOSPC); 3208 3209 return NULL; 3210 } 3211 3212 /** 3213 * i40e_add_flex_offset - Add src_offset to flex PIT table list 3214 * @flex_pit_list: L3 or L4 flex PIT list 3215 * @src_offset: new src_offset to add 3216 * @pit_index: the PIT index to program 3217 * 3218 * This function programs the new src_offset to the list. It is expected that 3219 * i40e_find_flex_offset has already been tried and returned NULL, indicating 3220 * that this offset is not programmed, and that the list has enough space to 3221 * store another offset. 3222 * 3223 * Returns 0 on success, and negative value on error. 3224 **/ 3225 static int i40e_add_flex_offset(struct list_head *flex_pit_list, 3226 u16 src_offset, 3227 u8 pit_index) 3228 { 3229 struct i40e_flex_pit *new_pit, *entry; 3230 3231 new_pit = kzalloc(sizeof(*entry), GFP_KERNEL); 3232 if (!new_pit) 3233 return -ENOMEM; 3234 3235 new_pit->src_offset = src_offset; 3236 new_pit->pit_index = pit_index; 3237 3238 /* We need to insert this item such that the list is sorted by 3239 * src_offset in ascending order. 3240 */ 3241 list_for_each_entry(entry, flex_pit_list, list) { 3242 if (new_pit->src_offset < entry->src_offset) { 3243 list_add_tail(&new_pit->list, &entry->list); 3244 return 0; 3245 } 3246 3247 /* If we found an entry with our offset already programmed we 3248 * can simply return here, after freeing the memory. However, 3249 * if the pit_index does not match we need to report an error. 3250 */ 3251 if (new_pit->src_offset == entry->src_offset) { 3252 int err = 0; 3253 3254 /* If the PIT index is not the same we can't re-use 3255 * the entry, so we must report an error. 3256 */ 3257 if (new_pit->pit_index != entry->pit_index) 3258 err = -EINVAL; 3259 3260 kfree(new_pit); 3261 return err; 3262 } 3263 } 3264 3265 /* If we reached here, then we haven't yet added the item. This means 3266 * that we should add the item at the end of the list. 3267 */ 3268 list_add_tail(&new_pit->list, flex_pit_list); 3269 return 0; 3270 } 3271 3272 /** 3273 * __i40e_reprogram_flex_pit - Re-program specific FLX_PIT table 3274 * @pf: Pointer to the PF structure 3275 * @flex_pit_list: list of flexible src offsets in use 3276 * #flex_pit_start: index to first entry for this section of the table 3277 * 3278 * In order to handle flexible data, the hardware uses a table of values 3279 * called the FLX_PIT table. This table is used to indicate which sections of 3280 * the input correspond to what PIT index values. Unfortunately, hardware is 3281 * very restrictive about programming this table. Entries must be ordered by 3282 * src_offset in ascending order, without duplicates. Additionally, unused 3283 * entries must be set to the unused index value, and must have valid size and 3284 * length according to the src_offset ordering. 3285 * 3286 * This function will reprogram the FLX_PIT register from a book-keeping 3287 * structure that we guarantee is already ordered correctly, and has no more 3288 * than 3 entries. 3289 * 3290 * To make things easier, we only support flexible values of one word length, 3291 * rather than allowing variable length flexible values. 3292 **/ 3293 static void __i40e_reprogram_flex_pit(struct i40e_pf *pf, 3294 struct list_head *flex_pit_list, 3295 int flex_pit_start) 3296 { 3297 struct i40e_flex_pit *entry = NULL; 3298 u16 last_offset = 0; 3299 int i = 0, j = 0; 3300 3301 /* First, loop over the list of flex PIT entries, and reprogram the 3302 * registers. 3303 */ 3304 list_for_each_entry(entry, flex_pit_list, list) { 3305 /* We have to be careful when programming values for the 3306 * largest SRC_OFFSET value. It is possible that adding 3307 * additional empty values at the end would overflow the space 3308 * for the SRC_OFFSET in the FLX_PIT register. To avoid this, 3309 * we check here and add the empty values prior to adding the 3310 * largest value. 3311 * 3312 * To determine this, we will use a loop from i+1 to 3, which 3313 * will determine whether the unused entries would have valid 3314 * SRC_OFFSET. Note that there cannot be extra entries past 3315 * this value, because the only valid values would have been 3316 * larger than I40E_MAX_FLEX_SRC_OFFSET, and thus would not 3317 * have been added to the list in the first place. 3318 */ 3319 for (j = i + 1; j < 3; j++) { 3320 u16 offset = entry->src_offset + j; 3321 int index = flex_pit_start + i; 3322 u32 value = I40E_FLEX_PREP_VAL(I40E_FLEX_DEST_UNUSED, 3323 1, 3324 offset - 3); 3325 3326 if (offset > I40E_MAX_FLEX_SRC_OFFSET) { 3327 i40e_write_rx_ctl(&pf->hw, 3328 I40E_PRTQF_FLX_PIT(index), 3329 value); 3330 i++; 3331 } 3332 } 3333 3334 /* Now, we can program the actual value into the table */ 3335 i40e_write_rx_ctl(&pf->hw, 3336 I40E_PRTQF_FLX_PIT(flex_pit_start + i), 3337 I40E_FLEX_PREP_VAL(entry->pit_index + 50, 3338 1, 3339 entry->src_offset)); 3340 i++; 3341 } 3342 3343 /* In order to program the last entries in the table, we need to 3344 * determine the valid offset. If the list is empty, we'll just start 3345 * with 0. Otherwise, we'll start with the last item offset and add 1. 3346 * This ensures that all entries have valid sizes. If we don't do this 3347 * correctly, the hardware will disable flexible field parsing. 3348 */ 3349 if (!list_empty(flex_pit_list)) 3350 last_offset = list_prev_entry(entry, list)->src_offset + 1; 3351 3352 for (; i < 3; i++, last_offset++) { 3353 i40e_write_rx_ctl(&pf->hw, 3354 I40E_PRTQF_FLX_PIT(flex_pit_start + i), 3355 I40E_FLEX_PREP_VAL(I40E_FLEX_DEST_UNUSED, 3356 1, 3357 last_offset)); 3358 } 3359 } 3360 3361 /** 3362 * i40e_reprogram_flex_pit - Reprogram all FLX_PIT tables after input set change 3363 * @pf: pointer to the PF structure 3364 * 3365 * This function reprograms both the L3 and L4 FLX_PIT tables. See the 3366 * internal helper function for implementation details. 3367 **/ 3368 static void i40e_reprogram_flex_pit(struct i40e_pf *pf) 3369 { 3370 __i40e_reprogram_flex_pit(pf, &pf->l3_flex_pit_list, 3371 I40E_FLEX_PIT_IDX_START_L3); 3372 3373 __i40e_reprogram_flex_pit(pf, &pf->l4_flex_pit_list, 3374 I40E_FLEX_PIT_IDX_START_L4); 3375 3376 /* We also need to program the L3 and L4 GLQF ORT register */ 3377 i40e_write_rx_ctl(&pf->hw, 3378 I40E_GLQF_ORT(I40E_L3_GLQF_ORT_IDX), 3379 I40E_ORT_PREP_VAL(I40E_FLEX_PIT_IDX_START_L3, 3380 3, 1)); 3381 3382 i40e_write_rx_ctl(&pf->hw, 3383 I40E_GLQF_ORT(I40E_L4_GLQF_ORT_IDX), 3384 I40E_ORT_PREP_VAL(I40E_FLEX_PIT_IDX_START_L4, 3385 3, 1)); 3386 } 3387 3388 /** 3389 * i40e_flow_str - Converts a flow_type into a human readable string 3390 * @flow_type: the flow type from a flow specification 3391 * 3392 * Currently only flow types we support are included here, and the string 3393 * value attempts to match what ethtool would use to configure this flow type. 3394 **/ 3395 static const char *i40e_flow_str(struct ethtool_rx_flow_spec *fsp) 3396 { 3397 switch (fsp->flow_type & ~FLOW_EXT) { 3398 case TCP_V4_FLOW: 3399 return "tcp4"; 3400 case UDP_V4_FLOW: 3401 return "udp4"; 3402 case SCTP_V4_FLOW: 3403 return "sctp4"; 3404 case IP_USER_FLOW: 3405 return "ip4"; 3406 default: 3407 return "unknown"; 3408 } 3409 } 3410 3411 /** 3412 * i40e_pit_index_to_mask - Return the FLEX mask for a given PIT index 3413 * @pit_index: PIT index to convert 3414 * 3415 * Returns the mask for a given PIT index. Will return 0 if the pit_index is 3416 * of range. 3417 **/ 3418 static u64 i40e_pit_index_to_mask(int pit_index) 3419 { 3420 switch (pit_index) { 3421 case 0: 3422 return I40E_FLEX_50_MASK; 3423 case 1: 3424 return I40E_FLEX_51_MASK; 3425 case 2: 3426 return I40E_FLEX_52_MASK; 3427 case 3: 3428 return I40E_FLEX_53_MASK; 3429 case 4: 3430 return I40E_FLEX_54_MASK; 3431 case 5: 3432 return I40E_FLEX_55_MASK; 3433 case 6: 3434 return I40E_FLEX_56_MASK; 3435 case 7: 3436 return I40E_FLEX_57_MASK; 3437 default: 3438 return 0; 3439 } 3440 } 3441 3442 /** 3443 * i40e_print_input_set - Show changes between two input sets 3444 * @vsi: the vsi being configured 3445 * @old: the old input set 3446 * @new: the new input set 3447 * 3448 * Print the difference between old and new input sets by showing which series 3449 * of words are toggled on or off. Only displays the bits we actually support 3450 * changing. 3451 **/ 3452 static void i40e_print_input_set(struct i40e_vsi *vsi, u64 old, u64 new) 3453 { 3454 struct i40e_pf *pf = vsi->back; 3455 bool old_value, new_value; 3456 int i; 3457 3458 old_value = !!(old & I40E_L3_SRC_MASK); 3459 new_value = !!(new & I40E_L3_SRC_MASK); 3460 if (old_value != new_value) 3461 netif_info(pf, drv, vsi->netdev, "L3 source address: %s -> %s\n", 3462 old_value ? "ON" : "OFF", 3463 new_value ? "ON" : "OFF"); 3464 3465 old_value = !!(old & I40E_L3_DST_MASK); 3466 new_value = !!(new & I40E_L3_DST_MASK); 3467 if (old_value != new_value) 3468 netif_info(pf, drv, vsi->netdev, "L3 destination address: %s -> %s\n", 3469 old_value ? "ON" : "OFF", 3470 new_value ? "ON" : "OFF"); 3471 3472 old_value = !!(old & I40E_L4_SRC_MASK); 3473 new_value = !!(new & I40E_L4_SRC_MASK); 3474 if (old_value != new_value) 3475 netif_info(pf, drv, vsi->netdev, "L4 source port: %s -> %s\n", 3476 old_value ? "ON" : "OFF", 3477 new_value ? "ON" : "OFF"); 3478 3479 old_value = !!(old & I40E_L4_DST_MASK); 3480 new_value = !!(new & I40E_L4_DST_MASK); 3481 if (old_value != new_value) 3482 netif_info(pf, drv, vsi->netdev, "L4 destination port: %s -> %s\n", 3483 old_value ? "ON" : "OFF", 3484 new_value ? "ON" : "OFF"); 3485 3486 old_value = !!(old & I40E_VERIFY_TAG_MASK); 3487 new_value = !!(new & I40E_VERIFY_TAG_MASK); 3488 if (old_value != new_value) 3489 netif_info(pf, drv, vsi->netdev, "SCTP verification tag: %s -> %s\n", 3490 old_value ? "ON" : "OFF", 3491 new_value ? "ON" : "OFF"); 3492 3493 /* Show change of flexible filter entries */ 3494 for (i = 0; i < I40E_FLEX_INDEX_ENTRIES; i++) { 3495 u64 flex_mask = i40e_pit_index_to_mask(i); 3496 3497 old_value = !!(old & flex_mask); 3498 new_value = !!(new & flex_mask); 3499 if (old_value != new_value) 3500 netif_info(pf, drv, vsi->netdev, "FLEX index %d: %s -> %s\n", 3501 i, 3502 old_value ? "ON" : "OFF", 3503 new_value ? "ON" : "OFF"); 3504 } 3505 3506 netif_info(pf, drv, vsi->netdev, " Current input set: %0llx\n", 3507 old); 3508 netif_info(pf, drv, vsi->netdev, "Requested input set: %0llx\n", 3509 new); 3510 } 3511 3512 /** 3513 * i40e_check_fdir_input_set - Check that a given rx_flow_spec mask is valid 3514 * @vsi: pointer to the targeted VSI 3515 * @fsp: pointer to Rx flow specification 3516 * @userdef: userdefined data from flow specification 3517 * 3518 * Ensures that a given ethtool_rx_flow_spec has a valid mask. Some support 3519 * for partial matches exists with a few limitations. First, hardware only 3520 * supports masking by word boundary (2 bytes) and not per individual bit. 3521 * Second, hardware is limited to using one mask for a flow type and cannot 3522 * use a separate mask for each filter. 3523 * 3524 * To support these limitations, if we already have a configured filter for 3525 * the specified type, this function enforces that new filters of the type 3526 * match the configured input set. Otherwise, if we do not have a filter of 3527 * the specified type, we allow the input set to be updated to match the 3528 * desired filter. 3529 * 3530 * To help ensure that administrators understand why filters weren't displayed 3531 * as supported, we print a diagnostic message displaying how the input set 3532 * would change and warning to delete the preexisting filters if required. 3533 * 3534 * Returns 0 on successful input set match, and a negative return code on 3535 * failure. 3536 **/ 3537 static int i40e_check_fdir_input_set(struct i40e_vsi *vsi, 3538 struct ethtool_rx_flow_spec *fsp, 3539 struct i40e_rx_flow_userdef *userdef) 3540 { 3541 struct i40e_pf *pf = vsi->back; 3542 struct ethtool_tcpip4_spec *tcp_ip4_spec; 3543 struct ethtool_usrip4_spec *usr_ip4_spec; 3544 u64 current_mask, new_mask; 3545 bool new_flex_offset = false; 3546 bool flex_l3 = false; 3547 u16 *fdir_filter_count; 3548 u16 index, src_offset = 0; 3549 u8 pit_index = 0; 3550 int err; 3551 3552 switch (fsp->flow_type & ~FLOW_EXT) { 3553 case SCTP_V4_FLOW: 3554 index = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP; 3555 fdir_filter_count = &pf->fd_sctp4_filter_cnt; 3556 break; 3557 case TCP_V4_FLOW: 3558 index = I40E_FILTER_PCTYPE_NONF_IPV4_TCP; 3559 fdir_filter_count = &pf->fd_tcp4_filter_cnt; 3560 break; 3561 case UDP_V4_FLOW: 3562 index = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; 3563 fdir_filter_count = &pf->fd_udp4_filter_cnt; 3564 break; 3565 case IP_USER_FLOW: 3566 index = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER; 3567 fdir_filter_count = &pf->fd_ip4_filter_cnt; 3568 flex_l3 = true; 3569 break; 3570 default: 3571 return -EOPNOTSUPP; 3572 } 3573 3574 /* Read the current input set from register memory. */ 3575 current_mask = i40e_read_fd_input_set(pf, index); 3576 new_mask = current_mask; 3577 3578 /* Determine, if any, the required changes to the input set in order 3579 * to support the provided mask. 3580 * 3581 * Hardware only supports masking at word (2 byte) granularity and does 3582 * not support full bitwise masking. This implementation simplifies 3583 * even further and only supports fully enabled or fully disabled 3584 * masks for each field, even though we could split the ip4src and 3585 * ip4dst fields. 3586 */ 3587 switch (fsp->flow_type & ~FLOW_EXT) { 3588 case SCTP_V4_FLOW: 3589 new_mask &= ~I40E_VERIFY_TAG_MASK; 3590 /* Fall through */ 3591 case TCP_V4_FLOW: 3592 case UDP_V4_FLOW: 3593 tcp_ip4_spec = &fsp->m_u.tcp_ip4_spec; 3594 3595 /* IPv4 source address */ 3596 if (tcp_ip4_spec->ip4src == htonl(0xFFFFFFFF)) 3597 new_mask |= I40E_L3_SRC_MASK; 3598 else if (!tcp_ip4_spec->ip4src) 3599 new_mask &= ~I40E_L3_SRC_MASK; 3600 else 3601 return -EOPNOTSUPP; 3602 3603 /* IPv4 destination address */ 3604 if (tcp_ip4_spec->ip4dst == htonl(0xFFFFFFFF)) 3605 new_mask |= I40E_L3_DST_MASK; 3606 else if (!tcp_ip4_spec->ip4dst) 3607 new_mask &= ~I40E_L3_DST_MASK; 3608 else 3609 return -EOPNOTSUPP; 3610 3611 /* L4 source port */ 3612 if (tcp_ip4_spec->psrc == htons(0xFFFF)) 3613 new_mask |= I40E_L4_SRC_MASK; 3614 else if (!tcp_ip4_spec->psrc) 3615 new_mask &= ~I40E_L4_SRC_MASK; 3616 else 3617 return -EOPNOTSUPP; 3618 3619 /* L4 destination port */ 3620 if (tcp_ip4_spec->pdst == htons(0xFFFF)) 3621 new_mask |= I40E_L4_DST_MASK; 3622 else if (!tcp_ip4_spec->pdst) 3623 new_mask &= ~I40E_L4_DST_MASK; 3624 else 3625 return -EOPNOTSUPP; 3626 3627 /* Filtering on Type of Service is not supported. */ 3628 if (tcp_ip4_spec->tos) 3629 return -EOPNOTSUPP; 3630 3631 break; 3632 case IP_USER_FLOW: 3633 usr_ip4_spec = &fsp->m_u.usr_ip4_spec; 3634 3635 /* IPv4 source address */ 3636 if (usr_ip4_spec->ip4src == htonl(0xFFFFFFFF)) 3637 new_mask |= I40E_L3_SRC_MASK; 3638 else if (!usr_ip4_spec->ip4src) 3639 new_mask &= ~I40E_L3_SRC_MASK; 3640 else 3641 return -EOPNOTSUPP; 3642 3643 /* IPv4 destination address */ 3644 if (usr_ip4_spec->ip4dst == htonl(0xFFFFFFFF)) 3645 new_mask |= I40E_L3_DST_MASK; 3646 else if (!usr_ip4_spec->ip4dst) 3647 new_mask &= ~I40E_L3_DST_MASK; 3648 else 3649 return -EOPNOTSUPP; 3650 3651 /* First 4 bytes of L4 header */ 3652 if (usr_ip4_spec->l4_4_bytes == htonl(0xFFFFFFFF)) 3653 new_mask |= I40E_L4_SRC_MASK | I40E_L4_DST_MASK; 3654 else if (!usr_ip4_spec->l4_4_bytes) 3655 new_mask &= ~(I40E_L4_SRC_MASK | I40E_L4_DST_MASK); 3656 else 3657 return -EOPNOTSUPP; 3658 3659 /* Filtering on Type of Service is not supported. */ 3660 if (usr_ip4_spec->tos) 3661 return -EOPNOTSUPP; 3662 3663 /* Filtering on IP version is not supported */ 3664 if (usr_ip4_spec->ip_ver) 3665 return -EINVAL; 3666 3667 /* Filtering on L4 protocol is not supported */ 3668 if (usr_ip4_spec->proto) 3669 return -EINVAL; 3670 3671 break; 3672 default: 3673 return -EOPNOTSUPP; 3674 } 3675 3676 /* First, clear all flexible filter entries */ 3677 new_mask &= ~I40E_FLEX_INPUT_MASK; 3678 3679 /* If we have a flexible filter, try to add this offset to the correct 3680 * flexible filter PIT list. Once finished, we can update the mask. 3681 * If the src_offset changed, we will get a new mask value which will 3682 * trigger an input set change. 3683 */ 3684 if (userdef->flex_filter) { 3685 struct i40e_flex_pit *l3_flex_pit = NULL, *flex_pit = NULL; 3686 3687 /* Flexible offset must be even, since the flexible payload 3688 * must be aligned on 2-byte boundary. 3689 */ 3690 if (userdef->flex_offset & 0x1) { 3691 dev_warn(&pf->pdev->dev, 3692 "Flexible data offset must be 2-byte aligned\n"); 3693 return -EINVAL; 3694 } 3695 3696 src_offset = userdef->flex_offset >> 1; 3697 3698 /* FLX_PIT source offset value is only so large */ 3699 if (src_offset > I40E_MAX_FLEX_SRC_OFFSET) { 3700 dev_warn(&pf->pdev->dev, 3701 "Flexible data must reside within first 64 bytes of the packet payload\n"); 3702 return -EINVAL; 3703 } 3704 3705 /* See if this offset has already been programmed. If we get 3706 * an ERR_PTR, then the filter is not safe to add. Otherwise, 3707 * if we get a NULL pointer, this means we will need to add 3708 * the offset. 3709 */ 3710 flex_pit = i40e_find_flex_offset(&pf->l4_flex_pit_list, 3711 src_offset); 3712 if (IS_ERR(flex_pit)) 3713 return PTR_ERR(flex_pit); 3714 3715 /* IP_USER_FLOW filters match both L4 (ICMP) and L3 (unknown) 3716 * packet types, and thus we need to program both L3 and L4 3717 * flexible values. These must have identical flexible index, 3718 * as otherwise we can't correctly program the input set. So 3719 * we'll find both an L3 and L4 index and make sure they are 3720 * the same. 3721 */ 3722 if (flex_l3) { 3723 l3_flex_pit = 3724 i40e_find_flex_offset(&pf->l3_flex_pit_list, 3725 src_offset); 3726 if (IS_ERR(l3_flex_pit)) 3727 return PTR_ERR(l3_flex_pit); 3728 3729 if (flex_pit) { 3730 /* If we already had a matching L4 entry, we 3731 * need to make sure that the L3 entry we 3732 * obtained uses the same index. 3733 */ 3734 if (l3_flex_pit) { 3735 if (l3_flex_pit->pit_index != 3736 flex_pit->pit_index) { 3737 return -EINVAL; 3738 } 3739 } else { 3740 new_flex_offset = true; 3741 } 3742 } else { 3743 flex_pit = l3_flex_pit; 3744 } 3745 } 3746 3747 /* If we didn't find an existing flex offset, we need to 3748 * program a new one. However, we don't immediately program it 3749 * here because we will wait to program until after we check 3750 * that it is safe to change the input set. 3751 */ 3752 if (!flex_pit) { 3753 new_flex_offset = true; 3754 pit_index = i40e_unused_pit_index(pf); 3755 } else { 3756 pit_index = flex_pit->pit_index; 3757 } 3758 3759 /* Update the mask with the new offset */ 3760 new_mask |= i40e_pit_index_to_mask(pit_index); 3761 } 3762 3763 /* If the mask and flexible filter offsets for this filter match the 3764 * currently programmed values we don't need any input set change, so 3765 * this filter is safe to install. 3766 */ 3767 if (new_mask == current_mask && !new_flex_offset) 3768 return 0; 3769 3770 netif_info(pf, drv, vsi->netdev, "Input set change requested for %s flows:\n", 3771 i40e_flow_str(fsp)); 3772 i40e_print_input_set(vsi, current_mask, new_mask); 3773 if (new_flex_offset) { 3774 netif_info(pf, drv, vsi->netdev, "FLEX index %d: Offset -> %d", 3775 pit_index, src_offset); 3776 } 3777 3778 /* Hardware input sets are global across multiple ports, so even the 3779 * main port cannot change them when in MFP mode as this would impact 3780 * any filters on the other ports. 3781 */ 3782 if (pf->flags & I40E_FLAG_MFP_ENABLED) { 3783 netif_err(pf, drv, vsi->netdev, "Cannot change Flow Director input sets while MFP is enabled\n"); 3784 return -EOPNOTSUPP; 3785 } 3786 3787 /* This filter requires us to update the input set. However, hardware 3788 * only supports one input set per flow type, and does not support 3789 * separate masks for each filter. This means that we can only support 3790 * a single mask for all filters of a specific type. 3791 * 3792 * If we have preexisting filters, they obviously depend on the 3793 * current programmed input set. Display a diagnostic message in this 3794 * case explaining why the filter could not be accepted. 3795 */ 3796 if (*fdir_filter_count) { 3797 netif_err(pf, drv, vsi->netdev, "Cannot change input set for %s flows until %d preexisting filters are removed\n", 3798 i40e_flow_str(fsp), 3799 *fdir_filter_count); 3800 return -EOPNOTSUPP; 3801 } 3802 3803 i40e_write_fd_input_set(pf, index, new_mask); 3804 3805 /* Add the new offset and update table, if necessary */ 3806 if (new_flex_offset) { 3807 err = i40e_add_flex_offset(&pf->l4_flex_pit_list, src_offset, 3808 pit_index); 3809 if (err) 3810 return err; 3811 3812 if (flex_l3) { 3813 err = i40e_add_flex_offset(&pf->l3_flex_pit_list, 3814 src_offset, 3815 pit_index); 3816 if (err) 3817 return err; 3818 } 3819 3820 i40e_reprogram_flex_pit(pf); 3821 } 3822 3823 return 0; 3824 } 3825 3826 /** 3827 * i40e_add_fdir_ethtool - Add/Remove Flow Director filters 3828 * @vsi: pointer to the targeted VSI 3829 * @cmd: command to get or set RX flow classification rules 3830 * 3831 * Add Flow Director filters for a specific flow spec based on their 3832 * protocol. Returns 0 if the filters were successfully added. 3833 **/ 3834 static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, 3835 struct ethtool_rxnfc *cmd) 3836 { 3837 struct i40e_rx_flow_userdef userdef; 3838 struct ethtool_rx_flow_spec *fsp; 3839 struct i40e_fdir_filter *input; 3840 u16 dest_vsi = 0, q_index = 0; 3841 struct i40e_pf *pf; 3842 int ret = -EINVAL; 3843 u8 dest_ctl; 3844 3845 if (!vsi) 3846 return -EINVAL; 3847 pf = vsi->back; 3848 3849 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) 3850 return -EOPNOTSUPP; 3851 3852 if (pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED) 3853 return -ENOSPC; 3854 3855 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || 3856 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) 3857 return -EBUSY; 3858 3859 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) 3860 return -EBUSY; 3861 3862 fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; 3863 3864 /* Parse the user-defined field */ 3865 if (i40e_parse_rx_flow_user_data(fsp, &userdef)) 3866 return -EINVAL; 3867 3868 /* Extended MAC field is not supported */ 3869 if (fsp->flow_type & FLOW_MAC_EXT) 3870 return -EINVAL; 3871 3872 ret = i40e_check_fdir_input_set(vsi, fsp, &userdef); 3873 if (ret) 3874 return ret; 3875 3876 if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort + 3877 pf->hw.func_caps.fd_filters_guaranteed)) { 3878 return -EINVAL; 3879 } 3880 3881 /* ring_cookie is either the drop index, or is a mask of the queue 3882 * index and VF id we wish to target. 3883 */ 3884 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { 3885 dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET; 3886 } else { 3887 u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); 3888 u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie); 3889 3890 if (!vf) { 3891 if (ring >= vsi->num_queue_pairs) 3892 return -EINVAL; 3893 dest_vsi = vsi->id; 3894 } else { 3895 /* VFs are zero-indexed, so we subtract one here */ 3896 vf--; 3897 3898 if (vf >= pf->num_alloc_vfs) 3899 return -EINVAL; 3900 if (ring >= pf->vf[vf].num_queue_pairs) 3901 return -EINVAL; 3902 dest_vsi = pf->vf[vf].lan_vsi_id; 3903 } 3904 dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX; 3905 q_index = ring; 3906 } 3907 3908 input = kzalloc(sizeof(*input), GFP_KERNEL); 3909 3910 if (!input) 3911 return -ENOMEM; 3912 3913 input->fd_id = fsp->location; 3914 input->q_index = q_index; 3915 input->dest_vsi = dest_vsi; 3916 input->dest_ctl = dest_ctl; 3917 input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID; 3918 input->cnt_index = I40E_FD_SB_STAT_IDX(pf->hw.pf_id); 3919 input->dst_ip = fsp->h_u.tcp_ip4_spec.ip4src; 3920 input->src_ip = fsp->h_u.tcp_ip4_spec.ip4dst; 3921 input->flow_type = fsp->flow_type & ~FLOW_EXT; 3922 input->ip4_proto = fsp->h_u.usr_ip4_spec.proto; 3923 3924 /* Reverse the src and dest notion, since the HW expects them to be from 3925 * Tx perspective where as the input from user is from Rx filter view. 3926 */ 3927 input->dst_port = fsp->h_u.tcp_ip4_spec.psrc; 3928 input->src_port = fsp->h_u.tcp_ip4_spec.pdst; 3929 input->dst_ip = fsp->h_u.tcp_ip4_spec.ip4src; 3930 input->src_ip = fsp->h_u.tcp_ip4_spec.ip4dst; 3931 3932 if (userdef.flex_filter) { 3933 input->flex_filter = true; 3934 input->flex_word = cpu_to_be16(userdef.flex_word); 3935 input->flex_offset = userdef.flex_offset; 3936 } 3937 3938 ret = i40e_add_del_fdir(vsi, input, true); 3939 if (ret) 3940 goto free_input; 3941 3942 /* Add the input filter to the fdir_input_list, possibly replacing 3943 * a previous filter. Do not free the input structure after adding it 3944 * to the list as this would cause a use-after-free bug. 3945 */ 3946 i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL); 3947 3948 return 0; 3949 3950 free_input: 3951 kfree(input); 3952 return ret; 3953 } 3954 3955 /** 3956 * i40e_set_rxnfc - command to set RX flow classification rules 3957 * @netdev: network interface device structure 3958 * @cmd: ethtool rxnfc command 3959 * 3960 * Returns Success if the command is supported. 3961 **/ 3962 static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) 3963 { 3964 struct i40e_netdev_priv *np = netdev_priv(netdev); 3965 struct i40e_vsi *vsi = np->vsi; 3966 struct i40e_pf *pf = vsi->back; 3967 int ret = -EOPNOTSUPP; 3968 3969 switch (cmd->cmd) { 3970 case ETHTOOL_SRXFH: 3971 ret = i40e_set_rss_hash_opt(pf, cmd); 3972 break; 3973 case ETHTOOL_SRXCLSRLINS: 3974 ret = i40e_add_fdir_ethtool(vsi, cmd); 3975 break; 3976 case ETHTOOL_SRXCLSRLDEL: 3977 ret = i40e_del_fdir_entry(vsi, cmd); 3978 break; 3979 default: 3980 break; 3981 } 3982 3983 return ret; 3984 } 3985 3986 /** 3987 * i40e_max_channels - get Max number of combined channels supported 3988 * @vsi: vsi pointer 3989 **/ 3990 static unsigned int i40e_max_channels(struct i40e_vsi *vsi) 3991 { 3992 /* TODO: This code assumes DCB and FD is disabled for now. */ 3993 return vsi->alloc_queue_pairs; 3994 } 3995 3996 /** 3997 * i40e_get_channels - Get the current channels enabled and max supported etc. 3998 * @netdev: network interface device structure 3999 * @ch: ethtool channels structure 4000 * 4001 * We don't support separate tx and rx queues as channels. The other count 4002 * represents how many queues are being used for control. max_combined counts 4003 * how many queue pairs we can support. They may not be mapped 1 to 1 with 4004 * q_vectors since we support a lot more queue pairs than q_vectors. 4005 **/ 4006 static void i40e_get_channels(struct net_device *dev, 4007 struct ethtool_channels *ch) 4008 { 4009 struct i40e_netdev_priv *np = netdev_priv(dev); 4010 struct i40e_vsi *vsi = np->vsi; 4011 struct i40e_pf *pf = vsi->back; 4012 4013 /* report maximum channels */ 4014 ch->max_combined = i40e_max_channels(vsi); 4015 4016 /* report info for other vector */ 4017 ch->other_count = (pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0; 4018 ch->max_other = ch->other_count; 4019 4020 /* Note: This code assumes DCB is disabled for now. */ 4021 ch->combined_count = vsi->num_queue_pairs; 4022 } 4023 4024 /** 4025 * i40e_set_channels - Set the new channels count. 4026 * @netdev: network interface device structure 4027 * @ch: ethtool channels structure 4028 * 4029 * The new channels count may not be the same as requested by the user 4030 * since it gets rounded down to a power of 2 value. 4031 **/ 4032 static int i40e_set_channels(struct net_device *dev, 4033 struct ethtool_channels *ch) 4034 { 4035 const u8 drop = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET; 4036 struct i40e_netdev_priv *np = netdev_priv(dev); 4037 unsigned int count = ch->combined_count; 4038 struct i40e_vsi *vsi = np->vsi; 4039 struct i40e_pf *pf = vsi->back; 4040 struct i40e_fdir_filter *rule; 4041 struct hlist_node *node2; 4042 int new_count; 4043 int err = 0; 4044 4045 /* We do not support setting channels for any other VSI at present */ 4046 if (vsi->type != I40E_VSI_MAIN) 4047 return -EINVAL; 4048 4049 /* We do not support setting channels via ethtool when TCs are 4050 * configured through mqprio 4051 */ 4052 if (pf->flags & I40E_FLAG_TC_MQPRIO) 4053 return -EINVAL; 4054 4055 /* verify they are not requesting separate vectors */ 4056 if (!count || ch->rx_count || ch->tx_count) 4057 return -EINVAL; 4058 4059 /* verify other_count has not changed */ 4060 if (ch->other_count != ((pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0)) 4061 return -EINVAL; 4062 4063 /* verify the number of channels does not exceed hardware limits */ 4064 if (count > i40e_max_channels(vsi)) 4065 return -EINVAL; 4066 4067 /* verify that the number of channels does not invalidate any current 4068 * flow director rules 4069 */ 4070 hlist_for_each_entry_safe(rule, node2, 4071 &pf->fdir_filter_list, fdir_node) { 4072 if (rule->dest_ctl != drop && count <= rule->q_index) { 4073 dev_warn(&pf->pdev->dev, 4074 "Existing user defined filter %d assigns flow to queue %d\n", 4075 rule->fd_id, rule->q_index); 4076 err = -EINVAL; 4077 } 4078 } 4079 4080 if (err) { 4081 dev_err(&pf->pdev->dev, 4082 "Existing filter rules must be deleted to reduce combined channel count to %d\n", 4083 count); 4084 return err; 4085 } 4086 4087 /* update feature limits from largest to smallest supported values */ 4088 /* TODO: Flow director limit, DCB etc */ 4089 4090 /* use rss_reconfig to rebuild with new queue count and update traffic 4091 * class queue mapping 4092 */ 4093 new_count = i40e_reconfig_rss_queues(pf, count); 4094 if (new_count > 0) 4095 return 0; 4096 else 4097 return -EINVAL; 4098 } 4099 4100 /** 4101 * i40e_get_rxfh_key_size - get the RSS hash key size 4102 * @netdev: network interface device structure 4103 * 4104 * Returns the table size. 4105 **/ 4106 static u32 i40e_get_rxfh_key_size(struct net_device *netdev) 4107 { 4108 return I40E_HKEY_ARRAY_SIZE; 4109 } 4110 4111 /** 4112 * i40e_get_rxfh_indir_size - get the rx flow hash indirection table size 4113 * @netdev: network interface device structure 4114 * 4115 * Returns the table size. 4116 **/ 4117 static u32 i40e_get_rxfh_indir_size(struct net_device *netdev) 4118 { 4119 return I40E_HLUT_ARRAY_SIZE; 4120 } 4121 4122 /** 4123 * i40e_get_rxfh - get the rx flow hash indirection table 4124 * @netdev: network interface device structure 4125 * @indir: indirection table 4126 * @key: hash key 4127 * @hfunc: hash function 4128 * 4129 * Reads the indirection table directly from the hardware. Returns 0 on 4130 * success. 4131 **/ 4132 static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, 4133 u8 *hfunc) 4134 { 4135 struct i40e_netdev_priv *np = netdev_priv(netdev); 4136 struct i40e_vsi *vsi = np->vsi; 4137 u8 *lut, *seed = NULL; 4138 int ret; 4139 u16 i; 4140 4141 if (hfunc) 4142 *hfunc = ETH_RSS_HASH_TOP; 4143 4144 if (!indir) 4145 return 0; 4146 4147 seed = key; 4148 lut = kzalloc(I40E_HLUT_ARRAY_SIZE, GFP_KERNEL); 4149 if (!lut) 4150 return -ENOMEM; 4151 ret = i40e_get_rss(vsi, seed, lut, I40E_HLUT_ARRAY_SIZE); 4152 if (ret) 4153 goto out; 4154 for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++) 4155 indir[i] = (u32)(lut[i]); 4156 4157 out: 4158 kfree(lut); 4159 4160 return ret; 4161 } 4162 4163 /** 4164 * i40e_set_rxfh - set the rx flow hash indirection table 4165 * @netdev: network interface device structure 4166 * @indir: indirection table 4167 * @key: hash key 4168 * 4169 * Returns -EINVAL if the table specifies an invalid queue id, otherwise 4170 * returns 0 after programming the table. 4171 **/ 4172 static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir, 4173 const u8 *key, const u8 hfunc) 4174 { 4175 struct i40e_netdev_priv *np = netdev_priv(netdev); 4176 struct i40e_vsi *vsi = np->vsi; 4177 struct i40e_pf *pf = vsi->back; 4178 u8 *seed = NULL; 4179 u16 i; 4180 4181 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) 4182 return -EOPNOTSUPP; 4183 4184 if (key) { 4185 if (!vsi->rss_hkey_user) { 4186 vsi->rss_hkey_user = kzalloc(I40E_HKEY_ARRAY_SIZE, 4187 GFP_KERNEL); 4188 if (!vsi->rss_hkey_user) 4189 return -ENOMEM; 4190 } 4191 memcpy(vsi->rss_hkey_user, key, I40E_HKEY_ARRAY_SIZE); 4192 seed = vsi->rss_hkey_user; 4193 } 4194 if (!vsi->rss_lut_user) { 4195 vsi->rss_lut_user = kzalloc(I40E_HLUT_ARRAY_SIZE, GFP_KERNEL); 4196 if (!vsi->rss_lut_user) 4197 return -ENOMEM; 4198 } 4199 4200 /* Each 32 bits pointed by 'indir' is stored with a lut entry */ 4201 if (indir) 4202 for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++) 4203 vsi->rss_lut_user[i] = (u8)(indir[i]); 4204 else 4205 i40e_fill_rss_lut(pf, vsi->rss_lut_user, I40E_HLUT_ARRAY_SIZE, 4206 vsi->rss_size); 4207 4208 return i40e_config_rss(vsi, seed, vsi->rss_lut_user, 4209 I40E_HLUT_ARRAY_SIZE); 4210 } 4211 4212 /** 4213 * i40e_get_priv_flags - report device private flags 4214 * @dev: network interface device structure 4215 * 4216 * The get string set count and the string set should be matched for each 4217 * flag returned. Add new strings for each flag to the i40e_gstrings_priv_flags 4218 * array. 4219 * 4220 * Returns a u32 bitmap of flags. 4221 **/ 4222 static u32 i40e_get_priv_flags(struct net_device *dev) 4223 { 4224 struct i40e_netdev_priv *np = netdev_priv(dev); 4225 struct i40e_vsi *vsi = np->vsi; 4226 struct i40e_pf *pf = vsi->back; 4227 u32 i, j, ret_flags = 0; 4228 4229 for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) { 4230 const struct i40e_priv_flags *priv_flags; 4231 4232 priv_flags = &i40e_gstrings_priv_flags[i]; 4233 4234 if (priv_flags->flag & pf->flags) 4235 ret_flags |= BIT(i); 4236 } 4237 4238 if (pf->hw.pf_id != 0) 4239 return ret_flags; 4240 4241 for (j = 0; j < I40E_GL_PRIV_FLAGS_STR_LEN; j++) { 4242 const struct i40e_priv_flags *priv_flags; 4243 4244 priv_flags = &i40e_gl_gstrings_priv_flags[j]; 4245 4246 if (priv_flags->flag & pf->flags) 4247 ret_flags |= BIT(i + j); 4248 } 4249 4250 return ret_flags; 4251 } 4252 4253 /** 4254 * i40e_set_priv_flags - set private flags 4255 * @dev: network interface device structure 4256 * @flags: bit flags to be set 4257 **/ 4258 static int i40e_set_priv_flags(struct net_device *dev, u32 flags) 4259 { 4260 struct i40e_netdev_priv *np = netdev_priv(dev); 4261 struct i40e_vsi *vsi = np->vsi; 4262 struct i40e_pf *pf = vsi->back; 4263 u32 orig_flags, new_flags, changed_flags; 4264 u32 i, j; 4265 4266 orig_flags = READ_ONCE(pf->flags); 4267 new_flags = orig_flags; 4268 4269 for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) { 4270 const struct i40e_priv_flags *priv_flags; 4271 4272 priv_flags = &i40e_gstrings_priv_flags[i]; 4273 4274 if (flags & BIT(i)) 4275 new_flags |= priv_flags->flag; 4276 else 4277 new_flags &= ~(priv_flags->flag); 4278 4279 /* If this is a read-only flag, it can't be changed */ 4280 if (priv_flags->read_only && 4281 ((orig_flags ^ new_flags) & ~BIT(i))) 4282 return -EOPNOTSUPP; 4283 } 4284 4285 if (pf->hw.pf_id != 0) 4286 goto flags_complete; 4287 4288 for (j = 0; j < I40E_GL_PRIV_FLAGS_STR_LEN; j++) { 4289 const struct i40e_priv_flags *priv_flags; 4290 4291 priv_flags = &i40e_gl_gstrings_priv_flags[j]; 4292 4293 if (flags & BIT(i + j)) 4294 new_flags |= priv_flags->flag; 4295 else 4296 new_flags &= ~(priv_flags->flag); 4297 4298 /* If this is a read-only flag, it can't be changed */ 4299 if (priv_flags->read_only && 4300 ((orig_flags ^ new_flags) & ~BIT(i))) 4301 return -EOPNOTSUPP; 4302 } 4303 4304 flags_complete: 4305 /* Before we finalize any flag changes, we need to perform some 4306 * checks to ensure that the changes are supported and safe. 4307 */ 4308 4309 /* ATR eviction is not supported on all devices */ 4310 if ((new_flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) && 4311 !(pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)) 4312 return -EOPNOTSUPP; 4313 4314 /* Compare and exchange the new flags into place. If we failed, that 4315 * is if cmpxchg returns anything but the old value, this means that 4316 * something else has modified the flags variable since we copied it 4317 * originally. We'll just punt with an error and log something in the 4318 * message buffer. 4319 */ 4320 if (cmpxchg(&pf->flags, orig_flags, new_flags) != orig_flags) { 4321 dev_warn(&pf->pdev->dev, 4322 "Unable to update pf->flags as it was modified by another thread...\n"); 4323 return -EAGAIN; 4324 } 4325 4326 changed_flags = orig_flags ^ new_flags; 4327 4328 /* Process any additional changes needed as a result of flag changes. 4329 * The changed_flags value reflects the list of bits that were 4330 * changed in the code above. 4331 */ 4332 4333 /* Flush current ATR settings if ATR was disabled */ 4334 if ((changed_flags & I40E_FLAG_FD_ATR_ENABLED) && 4335 !(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) { 4336 pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED; 4337 set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); 4338 } 4339 4340 if (changed_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT) { 4341 u16 sw_flags = 0, valid_flags = 0; 4342 int ret; 4343 4344 if (!(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) 4345 sw_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC; 4346 valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC; 4347 ret = i40e_aq_set_switch_config(&pf->hw, sw_flags, valid_flags, 4348 0, NULL); 4349 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) { 4350 dev_info(&pf->pdev->dev, 4351 "couldn't set switch config bits, err %s aq_err %s\n", 4352 i40e_stat_str(&pf->hw, ret), 4353 i40e_aq_str(&pf->hw, 4354 pf->hw.aq.asq_last_status)); 4355 /* not a fatal problem, just keep going */ 4356 } 4357 } 4358 4359 /* Issue reset to cause things to take effect, as additional bits 4360 * are added we will need to create a mask of bits requiring reset 4361 */ 4362 if (changed_flags & (I40E_FLAG_VEB_STATS_ENABLED | 4363 I40E_FLAG_LEGACY_RX | 4364 I40E_FLAG_SOURCE_PRUNING_DISABLED)) 4365 i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true); 4366 4367 return 0; 4368 } 4369 4370 /** 4371 * i40e_get_module_info - get (Q)SFP+ module type info 4372 * @netdev: network interface device structure 4373 * @modinfo: module EEPROM size and layout information structure 4374 **/ 4375 static int i40e_get_module_info(struct net_device *netdev, 4376 struct ethtool_modinfo *modinfo) 4377 { 4378 struct i40e_netdev_priv *np = netdev_priv(netdev); 4379 struct i40e_vsi *vsi = np->vsi; 4380 struct i40e_pf *pf = vsi->back; 4381 struct i40e_hw *hw = &pf->hw; 4382 u32 sff8472_comp = 0; 4383 u32 sff8472_swap = 0; 4384 u32 sff8636_rev = 0; 4385 i40e_status status; 4386 u32 type = 0; 4387 4388 /* Check if firmware supports reading module EEPROM. */ 4389 if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) { 4390 netdev_err(vsi->netdev, "Module EEPROM memory read not supported. Please update the NVM image.\n"); 4391 return -EINVAL; 4392 } 4393 4394 status = i40e_update_link_info(hw); 4395 if (status) 4396 return -EIO; 4397 4398 if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_EMPTY) { 4399 netdev_err(vsi->netdev, "Cannot read module EEPROM memory. No module connected.\n"); 4400 return -EINVAL; 4401 } 4402 4403 type = hw->phy.link_info.module_type[0]; 4404 4405 switch (type) { 4406 case I40E_MODULE_TYPE_SFP: 4407 status = i40e_aq_get_phy_register(hw, 4408 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, 4409 I40E_I2C_EEPROM_DEV_ADDR, 4410 I40E_MODULE_SFF_8472_COMP, 4411 &sff8472_comp, NULL); 4412 if (status) 4413 return -EIO; 4414 4415 status = i40e_aq_get_phy_register(hw, 4416 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, 4417 I40E_I2C_EEPROM_DEV_ADDR, 4418 I40E_MODULE_SFF_8472_SWAP, 4419 &sff8472_swap, NULL); 4420 if (status) 4421 return -EIO; 4422 4423 /* Check if the module requires address swap to access 4424 * the other EEPROM memory page. 4425 */ 4426 if (sff8472_swap & I40E_MODULE_SFF_ADDR_MODE) { 4427 netdev_warn(vsi->netdev, "Module address swap to access page 0xA2 is not supported.\n"); 4428 modinfo->type = ETH_MODULE_SFF_8079; 4429 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; 4430 } else if (sff8472_comp == 0x00) { 4431 /* Module is not SFF-8472 compliant */ 4432 modinfo->type = ETH_MODULE_SFF_8079; 4433 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; 4434 } else { 4435 modinfo->type = ETH_MODULE_SFF_8472; 4436 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 4437 } 4438 break; 4439 case I40E_MODULE_TYPE_QSFP_PLUS: 4440 /* Read from memory page 0. */ 4441 status = i40e_aq_get_phy_register(hw, 4442 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, 4443 0, 4444 I40E_MODULE_REVISION_ADDR, 4445 &sff8636_rev, NULL); 4446 if (status) 4447 return -EIO; 4448 /* Determine revision compliance byte */ 4449 if (sff8636_rev > 0x02) { 4450 /* Module is SFF-8636 compliant */ 4451 modinfo->type = ETH_MODULE_SFF_8636; 4452 modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN; 4453 } else { 4454 modinfo->type = ETH_MODULE_SFF_8436; 4455 modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN; 4456 } 4457 break; 4458 case I40E_MODULE_TYPE_QSFP28: 4459 modinfo->type = ETH_MODULE_SFF_8636; 4460 modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN; 4461 break; 4462 default: 4463 netdev_err(vsi->netdev, "Module type unrecognized\n"); 4464 return -EINVAL; 4465 } 4466 return 0; 4467 } 4468 4469 /** 4470 * i40e_get_module_eeprom - fills buffer with (Q)SFP+ module memory contents 4471 * @netdev: network interface device structure 4472 * @ee: EEPROM dump request structure 4473 * @data: buffer to be filled with EEPROM contents 4474 **/ 4475 static int i40e_get_module_eeprom(struct net_device *netdev, 4476 struct ethtool_eeprom *ee, 4477 u8 *data) 4478 { 4479 struct i40e_netdev_priv *np = netdev_priv(netdev); 4480 struct i40e_vsi *vsi = np->vsi; 4481 struct i40e_pf *pf = vsi->back; 4482 struct i40e_hw *hw = &pf->hw; 4483 bool is_sfp = false; 4484 i40e_status status; 4485 u32 value = 0; 4486 int i; 4487 4488 if (!ee || !ee->len || !data) 4489 return -EINVAL; 4490 4491 if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP) 4492 is_sfp = true; 4493 4494 for (i = 0; i < ee->len; i++) { 4495 u32 offset = i + ee->offset; 4496 u32 addr = is_sfp ? I40E_I2C_EEPROM_DEV_ADDR : 0; 4497 4498 /* Check if we need to access the other memory page */ 4499 if (is_sfp) { 4500 if (offset >= ETH_MODULE_SFF_8079_LEN) { 4501 offset -= ETH_MODULE_SFF_8079_LEN; 4502 addr = I40E_I2C_EEPROM_DEV_ADDR2; 4503 } 4504 } else { 4505 while (offset >= ETH_MODULE_SFF_8436_LEN) { 4506 /* Compute memory page number and offset. */ 4507 offset -= ETH_MODULE_SFF_8436_LEN / 2; 4508 addr++; 4509 } 4510 } 4511 4512 status = i40e_aq_get_phy_register(hw, 4513 I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, 4514 addr, offset, &value, NULL); 4515 if (status) 4516 return -EIO; 4517 data[i] = value; 4518 } 4519 return 0; 4520 } 4521 4522 static const struct ethtool_ops i40e_ethtool_ops = { 4523 .get_drvinfo = i40e_get_drvinfo, 4524 .get_regs_len = i40e_get_regs_len, 4525 .get_regs = i40e_get_regs, 4526 .nway_reset = i40e_nway_reset, 4527 .get_link = ethtool_op_get_link, 4528 .get_wol = i40e_get_wol, 4529 .set_wol = i40e_set_wol, 4530 .set_eeprom = i40e_set_eeprom, 4531 .get_eeprom_len = i40e_get_eeprom_len, 4532 .get_eeprom = i40e_get_eeprom, 4533 .get_ringparam = i40e_get_ringparam, 4534 .set_ringparam = i40e_set_ringparam, 4535 .get_pauseparam = i40e_get_pauseparam, 4536 .set_pauseparam = i40e_set_pauseparam, 4537 .get_msglevel = i40e_get_msglevel, 4538 .set_msglevel = i40e_set_msglevel, 4539 .get_rxnfc = i40e_get_rxnfc, 4540 .set_rxnfc = i40e_set_rxnfc, 4541 .self_test = i40e_diag_test, 4542 .get_strings = i40e_get_strings, 4543 .set_phys_id = i40e_set_phys_id, 4544 .get_sset_count = i40e_get_sset_count, 4545 .get_ethtool_stats = i40e_get_ethtool_stats, 4546 .get_coalesce = i40e_get_coalesce, 4547 .set_coalesce = i40e_set_coalesce, 4548 .get_rxfh_key_size = i40e_get_rxfh_key_size, 4549 .get_rxfh_indir_size = i40e_get_rxfh_indir_size, 4550 .get_rxfh = i40e_get_rxfh, 4551 .set_rxfh = i40e_set_rxfh, 4552 .get_channels = i40e_get_channels, 4553 .set_channels = i40e_set_channels, 4554 .get_module_info = i40e_get_module_info, 4555 .get_module_eeprom = i40e_get_module_eeprom, 4556 .get_ts_info = i40e_get_ts_info, 4557 .get_priv_flags = i40e_get_priv_flags, 4558 .set_priv_flags = i40e_set_priv_flags, 4559 .get_per_queue_coalesce = i40e_get_per_queue_coalesce, 4560 .set_per_queue_coalesce = i40e_set_per_queue_coalesce, 4561 .get_link_ksettings = i40e_get_link_ksettings, 4562 .set_link_ksettings = i40e_set_link_ksettings, 4563 }; 4564 4565 void i40e_set_ethtool_ops(struct net_device *netdev) 4566 { 4567 netdev->ethtool_ops = &i40e_ethtool_ops; 4568 } 4569