1 /****************************************************************************** 2 * 3 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * redistributing this file, you may do so under either license. 5 * 6 * GPL LICENSE SUMMARY 7 * 8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 11 * Copyright(c) 2018 Intel Corporation 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of version 2 of the GNU General Public License as 15 * published by the Free Software Foundation. 16 * 17 * This program is distributed in the hope that it will be useful, but 18 * WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 20 * General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; if not, write to the Free Software 24 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, 25 * USA 26 * 27 * The full GNU General Public License is included in this distribution 28 * in the file called COPYING. 29 * 30 * Contact Information: 31 * Intel Linux Wireless <linuxwifi@intel.com> 32 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 33 * 34 * BSD LICENSE 35 * 36 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. 37 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 38 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 39 * Copyright(c) 2018 Intel Corporation 40 * All rights reserved. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 46 * * Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * * Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in 50 * the documentation and/or other materials provided with the 51 * distribution. 52 * * Neither the name Intel Corporation nor the names of its 53 * contributors may be used to endorse or promote products derived 54 * from this software without specific prior written permission. 55 * 56 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 57 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 58 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 59 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 60 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 61 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 62 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 63 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 64 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 65 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 66 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 67 *****************************************************************************/ 68 #include <linux/types.h> 69 #include <linux/slab.h> 70 #include <linux/export.h> 71 #include <linux/etherdevice.h> 72 #include <linux/pci.h> 73 #include <linux/firmware.h> 74 75 #include "iwl-drv.h" 76 #include "iwl-modparams.h" 77 #include "iwl-nvm-parse.h" 78 #include "iwl-prph.h" 79 #include "iwl-io.h" 80 #include "iwl-csr.h" 81 #include "fw/acpi.h" 82 #include "fw/api/nvm-reg.h" 83 #include "fw/api/commands.h" 84 #include "fw/api/cmdhdr.h" 85 #include "fw/img.h" 86 87 /* NVM offsets (in words) definitions */ 88 enum nvm_offsets { 89 /* NVM HW-Section offset (in words) definitions */ 90 SUBSYSTEM_ID = 0x0A, 91 HW_ADDR = 0x15, 92 93 /* NVM SW-Section offset (in words) definitions */ 94 NVM_SW_SECTION = 0x1C0, 95 NVM_VERSION = 0, 96 RADIO_CFG = 1, 97 SKU = 2, 98 N_HW_ADDRS = 3, 99 NVM_CHANNELS = 0x1E0 - NVM_SW_SECTION, 100 101 /* NVM calibration section offset (in words) definitions */ 102 NVM_CALIB_SECTION = 0x2B8, 103 XTAL_CALIB = 0x316 - NVM_CALIB_SECTION, 104 105 /* NVM REGULATORY -Section offset (in words) definitions */ 106 NVM_CHANNELS_SDP = 0, 107 }; 108 109 enum ext_nvm_offsets { 110 /* NVM HW-Section offset (in words) definitions */ 111 MAC_ADDRESS_OVERRIDE_EXT_NVM = 1, 112 113 /* NVM SW-Section offset (in words) definitions */ 114 NVM_VERSION_EXT_NVM = 0, 115 RADIO_CFG_FAMILY_EXT_NVM = 0, 116 SKU_FAMILY_8000 = 2, 117 N_HW_ADDRS_FAMILY_8000 = 3, 118 119 /* NVM REGULATORY -Section offset (in words) definitions */ 120 NVM_CHANNELS_EXTENDED = 0, 121 NVM_LAR_OFFSET_OLD = 0x4C7, 122 NVM_LAR_OFFSET = 0x507, 123 NVM_LAR_ENABLED = 0x7, 124 }; 125 126 /* SKU Capabilities (actual values from NVM definition) */ 127 enum nvm_sku_bits { 128 NVM_SKU_CAP_BAND_24GHZ = BIT(0), 129 NVM_SKU_CAP_BAND_52GHZ = BIT(1), 130 NVM_SKU_CAP_11N_ENABLE = BIT(2), 131 NVM_SKU_CAP_11AC_ENABLE = BIT(3), 132 NVM_SKU_CAP_MIMO_DISABLE = BIT(5), 133 }; 134 135 /* 136 * These are the channel numbers in the order that they are stored in the NVM 137 */ 138 static const u8 iwl_nvm_channels[] = { 139 /* 2.4 GHz */ 140 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 141 /* 5 GHz */ 142 36, 40, 44 , 48, 52, 56, 60, 64, 143 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 144 149, 153, 157, 161, 165 145 }; 146 147 static const u8 iwl_ext_nvm_channels[] = { 148 /* 2.4 GHz */ 149 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 150 /* 5 GHz */ 151 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92, 152 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 153 149, 153, 157, 161, 165, 169, 173, 177, 181 154 }; 155 156 #define IWL_NVM_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels) 157 #define IWL_NVM_NUM_CHANNELS_EXT ARRAY_SIZE(iwl_ext_nvm_channels) 158 #define NUM_2GHZ_CHANNELS 14 159 #define NUM_2GHZ_CHANNELS_EXT 14 160 #define FIRST_2GHZ_HT_MINUS 5 161 #define LAST_2GHZ_HT_PLUS 9 162 #define LAST_5GHZ_HT 165 163 #define LAST_5GHZ_HT_FAMILY_8000 181 164 #define N_HW_ADDR_MASK 0xF 165 166 /* rate data (static) */ 167 static struct ieee80211_rate iwl_cfg80211_rates[] = { 168 { .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, }, 169 { .bitrate = 2 * 10, .hw_value = 1, .hw_value_short = 1, 170 .flags = IEEE80211_RATE_SHORT_PREAMBLE, }, 171 { .bitrate = 5.5 * 10, .hw_value = 2, .hw_value_short = 2, 172 .flags = IEEE80211_RATE_SHORT_PREAMBLE, }, 173 { .bitrate = 11 * 10, .hw_value = 3, .hw_value_short = 3, 174 .flags = IEEE80211_RATE_SHORT_PREAMBLE, }, 175 { .bitrate = 6 * 10, .hw_value = 4, .hw_value_short = 4, }, 176 { .bitrate = 9 * 10, .hw_value = 5, .hw_value_short = 5, }, 177 { .bitrate = 12 * 10, .hw_value = 6, .hw_value_short = 6, }, 178 { .bitrate = 18 * 10, .hw_value = 7, .hw_value_short = 7, }, 179 { .bitrate = 24 * 10, .hw_value = 8, .hw_value_short = 8, }, 180 { .bitrate = 36 * 10, .hw_value = 9, .hw_value_short = 9, }, 181 { .bitrate = 48 * 10, .hw_value = 10, .hw_value_short = 10, }, 182 { .bitrate = 54 * 10, .hw_value = 11, .hw_value_short = 11, }, 183 }; 184 #define RATES_24_OFFS 0 185 #define N_RATES_24 ARRAY_SIZE(iwl_cfg80211_rates) 186 #define RATES_52_OFFS 4 187 #define N_RATES_52 (N_RATES_24 - RATES_52_OFFS) 188 189 /** 190 * enum iwl_nvm_channel_flags - channel flags in NVM 191 * @NVM_CHANNEL_VALID: channel is usable for this SKU/geo 192 * @NVM_CHANNEL_IBSS: usable as an IBSS channel 193 * @NVM_CHANNEL_ACTIVE: active scanning allowed 194 * @NVM_CHANNEL_RADAR: radar detection required 195 * @NVM_CHANNEL_INDOOR_ONLY: only indoor use is allowed 196 * @NVM_CHANNEL_GO_CONCURRENT: GO operation is allowed when connected to BSS 197 * on same channel on 2.4 or same UNII band on 5.2 198 * @NVM_CHANNEL_UNIFORM: uniform spreading required 199 * @NVM_CHANNEL_20MHZ: 20 MHz channel okay 200 * @NVM_CHANNEL_40MHZ: 40 MHz channel okay 201 * @NVM_CHANNEL_80MHZ: 80 MHz channel okay 202 * @NVM_CHANNEL_160MHZ: 160 MHz channel okay 203 * @NVM_CHANNEL_DC_HIGH: DC HIGH required/allowed (?) 204 */ 205 enum iwl_nvm_channel_flags { 206 NVM_CHANNEL_VALID = BIT(0), 207 NVM_CHANNEL_IBSS = BIT(1), 208 NVM_CHANNEL_ACTIVE = BIT(3), 209 NVM_CHANNEL_RADAR = BIT(4), 210 NVM_CHANNEL_INDOOR_ONLY = BIT(5), 211 NVM_CHANNEL_GO_CONCURRENT = BIT(6), 212 NVM_CHANNEL_UNIFORM = BIT(7), 213 NVM_CHANNEL_20MHZ = BIT(8), 214 NVM_CHANNEL_40MHZ = BIT(9), 215 NVM_CHANNEL_80MHZ = BIT(10), 216 NVM_CHANNEL_160MHZ = BIT(11), 217 NVM_CHANNEL_DC_HIGH = BIT(12), 218 }; 219 220 static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level, 221 int chan, u16 flags) 222 { 223 #define CHECK_AND_PRINT_I(x) \ 224 ((flags & NVM_CHANNEL_##x) ? " " #x : "") 225 226 if (!(flags & NVM_CHANNEL_VALID)) { 227 IWL_DEBUG_DEV(dev, level, "Ch. %d: 0x%x: No traffic\n", 228 chan, flags); 229 return; 230 } 231 232 /* Note: already can print up to 101 characters, 110 is the limit! */ 233 IWL_DEBUG_DEV(dev, level, 234 "Ch. %d: 0x%x:%s%s%s%s%s%s%s%s%s%s%s%s\n", 235 chan, flags, 236 CHECK_AND_PRINT_I(VALID), 237 CHECK_AND_PRINT_I(IBSS), 238 CHECK_AND_PRINT_I(ACTIVE), 239 CHECK_AND_PRINT_I(RADAR), 240 CHECK_AND_PRINT_I(INDOOR_ONLY), 241 CHECK_AND_PRINT_I(GO_CONCURRENT), 242 CHECK_AND_PRINT_I(UNIFORM), 243 CHECK_AND_PRINT_I(20MHZ), 244 CHECK_AND_PRINT_I(40MHZ), 245 CHECK_AND_PRINT_I(80MHZ), 246 CHECK_AND_PRINT_I(160MHZ), 247 CHECK_AND_PRINT_I(DC_HIGH)); 248 #undef CHECK_AND_PRINT_I 249 } 250 251 static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz, 252 u16 nvm_flags, const struct iwl_cfg *cfg) 253 { 254 u32 flags = IEEE80211_CHAN_NO_HT40; 255 u32 last_5ghz_ht = LAST_5GHZ_HT; 256 257 if (cfg->nvm_type == IWL_NVM_EXT) 258 last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000; 259 260 if (!is_5ghz && (nvm_flags & NVM_CHANNEL_40MHZ)) { 261 if (ch_num <= LAST_2GHZ_HT_PLUS) 262 flags &= ~IEEE80211_CHAN_NO_HT40PLUS; 263 if (ch_num >= FIRST_2GHZ_HT_MINUS) 264 flags &= ~IEEE80211_CHAN_NO_HT40MINUS; 265 } else if (ch_num <= last_5ghz_ht && (nvm_flags & NVM_CHANNEL_40MHZ)) { 266 if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0) 267 flags &= ~IEEE80211_CHAN_NO_HT40PLUS; 268 else 269 flags &= ~IEEE80211_CHAN_NO_HT40MINUS; 270 } 271 if (!(nvm_flags & NVM_CHANNEL_80MHZ)) 272 flags |= IEEE80211_CHAN_NO_80MHZ; 273 if (!(nvm_flags & NVM_CHANNEL_160MHZ)) 274 flags |= IEEE80211_CHAN_NO_160MHZ; 275 276 if (!(nvm_flags & NVM_CHANNEL_IBSS)) 277 flags |= IEEE80211_CHAN_NO_IR; 278 279 if (!(nvm_flags & NVM_CHANNEL_ACTIVE)) 280 flags |= IEEE80211_CHAN_NO_IR; 281 282 if (nvm_flags & NVM_CHANNEL_RADAR) 283 flags |= IEEE80211_CHAN_RADAR; 284 285 if (nvm_flags & NVM_CHANNEL_INDOOR_ONLY) 286 flags |= IEEE80211_CHAN_INDOOR_ONLY; 287 288 /* Set the GO concurrent flag only in case that NO_IR is set. 289 * Otherwise it is meaningless 290 */ 291 if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) && 292 (flags & IEEE80211_CHAN_NO_IR)) 293 flags |= IEEE80211_CHAN_IR_CONCURRENT; 294 295 return flags; 296 } 297 298 static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, 299 struct iwl_nvm_data *data, 300 const __le16 * const nvm_ch_flags, 301 u32 sbands_flags) 302 { 303 int ch_idx; 304 int n_channels = 0; 305 struct ieee80211_channel *channel; 306 u16 ch_flags; 307 int num_of_ch, num_2ghz_channels; 308 const u8 *nvm_chan; 309 310 if (cfg->nvm_type != IWL_NVM_EXT) { 311 num_of_ch = IWL_NVM_NUM_CHANNELS; 312 nvm_chan = &iwl_nvm_channels[0]; 313 num_2ghz_channels = NUM_2GHZ_CHANNELS; 314 } else { 315 num_of_ch = IWL_NVM_NUM_CHANNELS_EXT; 316 nvm_chan = &iwl_ext_nvm_channels[0]; 317 num_2ghz_channels = NUM_2GHZ_CHANNELS_EXT; 318 } 319 320 for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { 321 bool is_5ghz = (ch_idx >= num_2ghz_channels); 322 323 ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx); 324 325 if (is_5ghz && !data->sku_cap_band_52ghz_enable) 326 continue; 327 328 /* workaround to disable wide channels in 5GHz */ 329 if ((sbands_flags & IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ) && 330 is_5ghz) { 331 ch_flags &= ~(NVM_CHANNEL_40MHZ | 332 NVM_CHANNEL_80MHZ | 333 NVM_CHANNEL_160MHZ); 334 } 335 336 if (ch_flags & NVM_CHANNEL_160MHZ) 337 data->vht160_supported = true; 338 339 if (!(sbands_flags & IWL_NVM_SBANDS_FLAGS_LAR) && 340 !(ch_flags & NVM_CHANNEL_VALID)) { 341 /* 342 * Channels might become valid later if lar is 343 * supported, hence we still want to add them to 344 * the list of supported channels to cfg80211. 345 */ 346 iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM, 347 nvm_chan[ch_idx], ch_flags); 348 continue; 349 } 350 351 channel = &data->channels[n_channels]; 352 n_channels++; 353 354 channel->hw_value = nvm_chan[ch_idx]; 355 channel->band = is_5ghz ? 356 NL80211_BAND_5GHZ : NL80211_BAND_2GHZ; 357 channel->center_freq = 358 ieee80211_channel_to_frequency( 359 channel->hw_value, channel->band); 360 361 /* Initialize regulatory-based run-time data */ 362 363 /* 364 * Default value - highest tx power value. max_power 365 * is not used in mvm, and is used for backwards compatibility 366 */ 367 channel->max_power = IWL_DEFAULT_MAX_TX_POWER; 368 369 /* don't put limitations in case we're using LAR */ 370 if (!(sbands_flags & IWL_NVM_SBANDS_FLAGS_LAR)) 371 channel->flags = iwl_get_channel_flags(nvm_chan[ch_idx], 372 ch_idx, is_5ghz, 373 ch_flags, cfg); 374 else 375 channel->flags = 0; 376 377 iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM, 378 channel->hw_value, ch_flags); 379 IWL_DEBUG_EEPROM(dev, "Ch. %d: %ddBm\n", 380 channel->hw_value, channel->max_power); 381 } 382 383 return n_channels; 384 } 385 386 static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg, 387 struct iwl_nvm_data *data, 388 struct ieee80211_sta_vht_cap *vht_cap, 389 u8 tx_chains, u8 rx_chains) 390 { 391 int num_rx_ants = num_of_ant(rx_chains); 392 int num_tx_ants = num_of_ant(tx_chains); 393 unsigned int max_ampdu_exponent = (cfg->max_vht_ampdu_exponent ?: 394 IEEE80211_VHT_MAX_AMPDU_1024K); 395 396 vht_cap->vht_supported = true; 397 398 vht_cap->cap = IEEE80211_VHT_CAP_SHORT_GI_80 | 399 IEEE80211_VHT_CAP_RXSTBC_1 | 400 IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 401 3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT | 402 max_ampdu_exponent << 403 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; 404 405 if (data->vht160_supported) 406 vht_cap->cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ | 407 IEEE80211_VHT_CAP_SHORT_GI_160; 408 409 if (cfg->vht_mu_mimo_supported) 410 vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; 411 412 if (cfg->ht_params->ldpc) 413 vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC; 414 415 if (data->sku_cap_mimo_disabled) { 416 num_rx_ants = 1; 417 num_tx_ants = 1; 418 } 419 420 if (num_tx_ants > 1) 421 vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC; 422 else 423 vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN; 424 425 switch (iwlwifi_mod_params.amsdu_size) { 426 case IWL_AMSDU_DEF: 427 if (cfg->mq_rx_supported) 428 vht_cap->cap |= 429 IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; 430 else 431 vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895; 432 break; 433 case IWL_AMSDU_2K: 434 if (cfg->mq_rx_supported) 435 vht_cap->cap |= 436 IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; 437 else 438 WARN(1, "RB size of 2K is not supported by this device\n"); 439 break; 440 case IWL_AMSDU_4K: 441 vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895; 442 break; 443 case IWL_AMSDU_8K: 444 vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991; 445 break; 446 case IWL_AMSDU_12K: 447 vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; 448 break; 449 default: 450 break; 451 } 452 453 vht_cap->vht_mcs.rx_mcs_map = 454 cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | 455 IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 | 456 IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 | 457 IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 | 458 IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 | 459 IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 | 460 IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 | 461 IEEE80211_VHT_MCS_NOT_SUPPORTED << 14); 462 463 if (num_rx_ants == 1 || cfg->rx_with_siso_diversity) { 464 vht_cap->cap |= IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN; 465 /* this works because NOT_SUPPORTED == 3 */ 466 vht_cap->vht_mcs.rx_mcs_map |= 467 cpu_to_le16(IEEE80211_VHT_MCS_NOT_SUPPORTED << 2); 468 } 469 470 vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map; 471 } 472 473 static struct ieee80211_sband_iftype_data iwl_he_capa = { 474 .types_mask = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP), 475 .he_cap = { 476 .has_he = true, 477 .he_cap_elem = { 478 .mac_cap_info[0] = 479 IEEE80211_HE_MAC_CAP0_HTC_HE, 480 .mac_cap_info[1] = 481 IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | 482 IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8, 483 .mac_cap_info[2] = 484 IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP | 485 IEEE80211_HE_MAC_CAP2_ACK_EN, 486 .mac_cap_info[3] = 487 IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU | 488 IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2, 489 .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU, 490 .phy_cap_info[0] = 491 IEEE80211_HE_PHY_CAP0_DUAL_BAND | 492 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G | 493 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | 494 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G, 495 .phy_cap_info[1] = 496 IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | 497 IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | 498 IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS, 499 .phy_cap_info[2] = 500 IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | 501 IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | 502 IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ, 503 .phy_cap_info[3] = 504 IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK | 505 IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 | 506 IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK | 507 IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1, 508 .phy_cap_info[4] = 509 IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE | 510 IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 | 511 IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8, 512 .phy_cap_info[5] = 513 IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 | 514 IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2, 515 .phy_cap_info[6] = 516 IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, 517 .phy_cap_info[7] = 518 IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR | 519 IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI | 520 IEEE80211_HE_PHY_CAP7_MAX_NC_7, 521 .phy_cap_info[8] = 522 IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI | 523 IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G | 524 IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU | 525 IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU, 526 }, 527 /* 528 * Set default Tx/Rx HE MCS NSS Support field. Indicate support 529 * for up to 2 spatial streams and all MCS, without any special 530 * cases 531 */ 532 .he_mcs_nss_supp = { 533 .rx_mcs_80 = cpu_to_le16(0xfffa), 534 .tx_mcs_80 = cpu_to_le16(0xfffa), 535 .rx_mcs_160 = cpu_to_le16(0xfffa), 536 .tx_mcs_160 = cpu_to_le16(0xfffa), 537 .rx_mcs_80p80 = cpu_to_le16(0xffff), 538 .tx_mcs_80p80 = cpu_to_le16(0xffff), 539 }, 540 /* 541 * Set default PPE thresholds, with PPET16 set to 0, PPET8 set 542 * to 7 543 */ 544 .ppe_thres = {0x61, 0x1c, 0xc7, 0x71}, 545 }, 546 }; 547 548 static void iwl_init_he_hw_capab(struct ieee80211_supported_band *sband, 549 u8 tx_chains, u8 rx_chains) 550 { 551 if (sband->band == NL80211_BAND_2GHZ || 552 sband->band == NL80211_BAND_5GHZ) 553 sband->iftype_data = &iwl_he_capa; 554 else 555 return; 556 557 sband->n_iftype_data = 1; 558 559 /* If not 2x2, we need to indicate 1x1 in the Midamble RX Max NSTS */ 560 if ((tx_chains & rx_chains) != ANT_AB) { 561 iwl_he_capa.he_cap.he_cap_elem.phy_cap_info[1] &= 562 ~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS; 563 iwl_he_capa.he_cap.he_cap_elem.phy_cap_info[2] &= 564 ~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_MAX_NSTS; 565 } 566 } 567 568 static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, 569 struct iwl_nvm_data *data, 570 const __le16 *nvm_ch_flags, u8 tx_chains, 571 u8 rx_chains, u32 sbands_flags) 572 { 573 int n_channels; 574 int n_used = 0; 575 struct ieee80211_supported_band *sband; 576 577 n_channels = iwl_init_channel_map(dev, cfg, data, nvm_ch_flags, 578 sbands_flags); 579 sband = &data->bands[NL80211_BAND_2GHZ]; 580 sband->band = NL80211_BAND_2GHZ; 581 sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS]; 582 sband->n_bitrates = N_RATES_24; 583 n_used += iwl_init_sband_channels(data, sband, n_channels, 584 NL80211_BAND_2GHZ); 585 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_2GHZ, 586 tx_chains, rx_chains); 587 588 if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax) 589 iwl_init_he_hw_capab(sband, tx_chains, rx_chains); 590 591 sband = &data->bands[NL80211_BAND_5GHZ]; 592 sband->band = NL80211_BAND_5GHZ; 593 sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS]; 594 sband->n_bitrates = N_RATES_52; 595 n_used += iwl_init_sband_channels(data, sband, n_channels, 596 NL80211_BAND_5GHZ); 597 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_5GHZ, 598 tx_chains, rx_chains); 599 if (data->sku_cap_11ac_enable && !iwlwifi_mod_params.disable_11ac) 600 iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap, 601 tx_chains, rx_chains); 602 603 if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax) 604 iwl_init_he_hw_capab(sband, tx_chains, rx_chains); 605 606 if (n_channels != n_used) 607 IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n", 608 n_used, n_channels); 609 } 610 611 static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw, 612 const __le16 *phy_sku) 613 { 614 if (cfg->nvm_type != IWL_NVM_EXT) 615 return le16_to_cpup(nvm_sw + SKU); 616 617 return le32_to_cpup((__le32 *)(phy_sku + SKU_FAMILY_8000)); 618 } 619 620 static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw) 621 { 622 if (cfg->nvm_type != IWL_NVM_EXT) 623 return le16_to_cpup(nvm_sw + NVM_VERSION); 624 else 625 return le32_to_cpup((__le32 *)(nvm_sw + 626 NVM_VERSION_EXT_NVM)); 627 } 628 629 static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw, 630 const __le16 *phy_sku) 631 { 632 if (cfg->nvm_type != IWL_NVM_EXT) 633 return le16_to_cpup(nvm_sw + RADIO_CFG); 634 635 return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_EXT_NVM)); 636 637 } 638 639 static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg, const __le16 *nvm_sw) 640 { 641 int n_hw_addr; 642 643 if (cfg->nvm_type != IWL_NVM_EXT) 644 return le16_to_cpup(nvm_sw + N_HW_ADDRS); 645 646 n_hw_addr = le32_to_cpup((__le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000)); 647 648 return n_hw_addr & N_HW_ADDR_MASK; 649 } 650 651 static void iwl_set_radio_cfg(const struct iwl_cfg *cfg, 652 struct iwl_nvm_data *data, 653 u32 radio_cfg) 654 { 655 if (cfg->nvm_type != IWL_NVM_EXT) { 656 data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK(radio_cfg); 657 data->radio_cfg_step = NVM_RF_CFG_STEP_MSK(radio_cfg); 658 data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK(radio_cfg); 659 data->radio_cfg_pnum = NVM_RF_CFG_PNUM_MSK(radio_cfg); 660 return; 661 } 662 663 /* set the radio configuration for family 8000 */ 664 data->radio_cfg_type = EXT_NVM_RF_CFG_TYPE_MSK(radio_cfg); 665 data->radio_cfg_step = EXT_NVM_RF_CFG_STEP_MSK(radio_cfg); 666 data->radio_cfg_dash = EXT_NVM_RF_CFG_DASH_MSK(radio_cfg); 667 data->radio_cfg_pnum = EXT_NVM_RF_CFG_FLAVOR_MSK(radio_cfg); 668 data->valid_tx_ant = EXT_NVM_RF_CFG_TX_ANT_MSK(radio_cfg); 669 data->valid_rx_ant = EXT_NVM_RF_CFG_RX_ANT_MSK(radio_cfg); 670 } 671 672 static void iwl_flip_hw_address(__le32 mac_addr0, __le32 mac_addr1, u8 *dest) 673 { 674 const u8 *hw_addr; 675 676 hw_addr = (const u8 *)&mac_addr0; 677 dest[0] = hw_addr[3]; 678 dest[1] = hw_addr[2]; 679 dest[2] = hw_addr[1]; 680 dest[3] = hw_addr[0]; 681 682 hw_addr = (const u8 *)&mac_addr1; 683 dest[4] = hw_addr[1]; 684 dest[5] = hw_addr[0]; 685 } 686 687 static void iwl_set_hw_address_from_csr(struct iwl_trans *trans, 688 struct iwl_nvm_data *data) 689 { 690 __le32 mac_addr0 = 691 cpu_to_le32(iwl_read32(trans, 692 trans->cfg->csr->mac_addr0_strap)); 693 __le32 mac_addr1 = 694 cpu_to_le32(iwl_read32(trans, 695 trans->cfg->csr->mac_addr1_strap)); 696 697 iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); 698 /* 699 * If the OEM fused a valid address, use it instead of the one in the 700 * OTP 701 */ 702 if (is_valid_ether_addr(data->hw_addr)) 703 return; 704 705 mac_addr0 = cpu_to_le32(iwl_read32(trans, 706 trans->cfg->csr->mac_addr0_otp)); 707 mac_addr1 = cpu_to_le32(iwl_read32(trans, 708 trans->cfg->csr->mac_addr1_otp)); 709 710 iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); 711 } 712 713 static void iwl_set_hw_address_family_8000(struct iwl_trans *trans, 714 const struct iwl_cfg *cfg, 715 struct iwl_nvm_data *data, 716 const __le16 *mac_override, 717 const __be16 *nvm_hw) 718 { 719 const u8 *hw_addr; 720 721 if (mac_override) { 722 static const u8 reserved_mac[] = { 723 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00 724 }; 725 726 hw_addr = (const u8 *)(mac_override + 727 MAC_ADDRESS_OVERRIDE_EXT_NVM); 728 729 /* 730 * Store the MAC address from MAO section. 731 * No byte swapping is required in MAO section 732 */ 733 memcpy(data->hw_addr, hw_addr, ETH_ALEN); 734 735 /* 736 * Force the use of the OTP MAC address in case of reserved MAC 737 * address in the NVM, or if address is given but invalid. 738 */ 739 if (is_valid_ether_addr(data->hw_addr) && 740 memcmp(reserved_mac, hw_addr, ETH_ALEN) != 0) 741 return; 742 743 IWL_ERR(trans, 744 "mac address from nvm override section is not valid\n"); 745 } 746 747 if (nvm_hw) { 748 /* read the mac address from WFMP registers */ 749 __le32 mac_addr0 = cpu_to_le32(iwl_trans_read_prph(trans, 750 WFMP_MAC_ADDR_0)); 751 __le32 mac_addr1 = cpu_to_le32(iwl_trans_read_prph(trans, 752 WFMP_MAC_ADDR_1)); 753 754 iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); 755 756 return; 757 } 758 759 IWL_ERR(trans, "mac address is not found\n"); 760 } 761 762 static int iwl_set_hw_address(struct iwl_trans *trans, 763 const struct iwl_cfg *cfg, 764 struct iwl_nvm_data *data, const __be16 *nvm_hw, 765 const __le16 *mac_override) 766 { 767 if (cfg->mac_addr_from_csr) { 768 iwl_set_hw_address_from_csr(trans, data); 769 } else if (cfg->nvm_type != IWL_NVM_EXT) { 770 const u8 *hw_addr = (const u8 *)(nvm_hw + HW_ADDR); 771 772 /* The byte order is little endian 16 bit, meaning 214365 */ 773 data->hw_addr[0] = hw_addr[1]; 774 data->hw_addr[1] = hw_addr[0]; 775 data->hw_addr[2] = hw_addr[3]; 776 data->hw_addr[3] = hw_addr[2]; 777 data->hw_addr[4] = hw_addr[5]; 778 data->hw_addr[5] = hw_addr[4]; 779 } else { 780 iwl_set_hw_address_family_8000(trans, cfg, data, 781 mac_override, nvm_hw); 782 } 783 784 if (!is_valid_ether_addr(data->hw_addr)) { 785 IWL_ERR(trans, "no valid mac address was found\n"); 786 return -EINVAL; 787 } 788 789 IWL_INFO(trans, "base HW address: %pM\n", data->hw_addr); 790 791 return 0; 792 } 793 794 static bool 795 iwl_nvm_no_wide_in_5ghz(struct device *dev, const struct iwl_cfg *cfg, 796 const __be16 *nvm_hw) 797 { 798 /* 799 * Workaround a bug in Indonesia SKUs where the regulatory in 800 * some 7000-family OTPs erroneously allow wide channels in 801 * 5GHz. To check for Indonesia, we take the SKU value from 802 * bits 1-4 in the subsystem ID and check if it is either 5 or 803 * 9. In those cases, we need to force-disable wide channels 804 * in 5GHz otherwise the FW will throw a sysassert when we try 805 * to use them. 806 */ 807 if (cfg->device_family == IWL_DEVICE_FAMILY_7000) { 808 /* 809 * Unlike the other sections in the NVM, the hw 810 * section uses big-endian. 811 */ 812 u16 subsystem_id = be16_to_cpup(nvm_hw + SUBSYSTEM_ID); 813 u8 sku = (subsystem_id & 0x1e) >> 1; 814 815 if (sku == 5 || sku == 9) { 816 IWL_DEBUG_EEPROM(dev, 817 "disabling wide channels in 5GHz (0x%0x %d)\n", 818 subsystem_id, sku); 819 return true; 820 } 821 } 822 823 return false; 824 } 825 826 struct iwl_nvm_data * 827 iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, 828 const __be16 *nvm_hw, const __le16 *nvm_sw, 829 const __le16 *nvm_calib, const __le16 *regulatory, 830 const __le16 *mac_override, const __le16 *phy_sku, 831 u8 tx_chains, u8 rx_chains, bool lar_fw_supported) 832 { 833 struct device *dev = trans->dev; 834 struct iwl_nvm_data *data; 835 bool lar_enabled; 836 u32 sku, radio_cfg; 837 u32 sbands_flags = 0; 838 u16 lar_config; 839 const __le16 *ch_section; 840 841 if (cfg->nvm_type != IWL_NVM_EXT) 842 data = kzalloc(sizeof(*data) + 843 sizeof(struct ieee80211_channel) * 844 IWL_NVM_NUM_CHANNELS, 845 GFP_KERNEL); 846 else 847 data = kzalloc(sizeof(*data) + 848 sizeof(struct ieee80211_channel) * 849 IWL_NVM_NUM_CHANNELS_EXT, 850 GFP_KERNEL); 851 if (!data) 852 return NULL; 853 854 data->nvm_version = iwl_get_nvm_version(cfg, nvm_sw); 855 856 radio_cfg = iwl_get_radio_cfg(cfg, nvm_sw, phy_sku); 857 iwl_set_radio_cfg(cfg, data, radio_cfg); 858 if (data->valid_tx_ant) 859 tx_chains &= data->valid_tx_ant; 860 if (data->valid_rx_ant) 861 rx_chains &= data->valid_rx_ant; 862 863 sku = iwl_get_sku(cfg, nvm_sw, phy_sku); 864 data->sku_cap_band_24ghz_enable = sku & NVM_SKU_CAP_BAND_24GHZ; 865 data->sku_cap_band_52ghz_enable = sku & NVM_SKU_CAP_BAND_52GHZ; 866 data->sku_cap_11n_enable = sku & NVM_SKU_CAP_11N_ENABLE; 867 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) 868 data->sku_cap_11n_enable = false; 869 data->sku_cap_11ac_enable = data->sku_cap_11n_enable && 870 (sku & NVM_SKU_CAP_11AC_ENABLE); 871 data->sku_cap_mimo_disabled = sku & NVM_SKU_CAP_MIMO_DISABLE; 872 873 data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw); 874 875 if (cfg->nvm_type != IWL_NVM_EXT) { 876 /* Checking for required sections */ 877 if (!nvm_calib) { 878 IWL_ERR(trans, 879 "Can't parse empty Calib NVM sections\n"); 880 kfree(data); 881 return NULL; 882 } 883 884 ch_section = cfg->nvm_type == IWL_NVM_SDP ? 885 ®ulatory[NVM_CHANNELS_SDP] : 886 &nvm_sw[NVM_CHANNELS]; 887 888 /* in family 8000 Xtal calibration values moved to OTP */ 889 data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB); 890 data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1); 891 lar_enabled = true; 892 } else { 893 u16 lar_offset = data->nvm_version < 0xE39 ? 894 NVM_LAR_OFFSET_OLD : 895 NVM_LAR_OFFSET; 896 897 lar_config = le16_to_cpup(regulatory + lar_offset); 898 data->lar_enabled = !!(lar_config & 899 NVM_LAR_ENABLED); 900 lar_enabled = data->lar_enabled; 901 ch_section = ®ulatory[NVM_CHANNELS_EXTENDED]; 902 } 903 904 /* If no valid mac address was found - bail out */ 905 if (iwl_set_hw_address(trans, cfg, data, nvm_hw, mac_override)) { 906 kfree(data); 907 return NULL; 908 } 909 910 if (lar_fw_supported && lar_enabled) 911 sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR; 912 913 if (iwl_nvm_no_wide_in_5ghz(dev, cfg, nvm_hw)) 914 sbands_flags |= IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ; 915 916 iwl_init_sbands(dev, cfg, data, ch_section, tx_chains, rx_chains, 917 sbands_flags); 918 data->calib_version = 255; 919 920 return data; 921 } 922 IWL_EXPORT_SYMBOL(iwl_parse_nvm_data); 923 924 static u32 iwl_nvm_get_regdom_bw_flags(const u8 *nvm_chan, 925 int ch_idx, u16 nvm_flags, 926 const struct iwl_cfg *cfg) 927 { 928 u32 flags = NL80211_RRF_NO_HT40; 929 u32 last_5ghz_ht = LAST_5GHZ_HT; 930 931 if (cfg->nvm_type == IWL_NVM_EXT) 932 last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000; 933 934 if (ch_idx < NUM_2GHZ_CHANNELS && 935 (nvm_flags & NVM_CHANNEL_40MHZ)) { 936 if (nvm_chan[ch_idx] <= LAST_2GHZ_HT_PLUS) 937 flags &= ~NL80211_RRF_NO_HT40PLUS; 938 if (nvm_chan[ch_idx] >= FIRST_2GHZ_HT_MINUS) 939 flags &= ~NL80211_RRF_NO_HT40MINUS; 940 } else if (nvm_chan[ch_idx] <= last_5ghz_ht && 941 (nvm_flags & NVM_CHANNEL_40MHZ)) { 942 if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0) 943 flags &= ~NL80211_RRF_NO_HT40PLUS; 944 else 945 flags &= ~NL80211_RRF_NO_HT40MINUS; 946 } 947 948 if (!(nvm_flags & NVM_CHANNEL_80MHZ)) 949 flags |= NL80211_RRF_NO_80MHZ; 950 if (!(nvm_flags & NVM_CHANNEL_160MHZ)) 951 flags |= NL80211_RRF_NO_160MHZ; 952 953 if (!(nvm_flags & NVM_CHANNEL_ACTIVE)) 954 flags |= NL80211_RRF_NO_IR; 955 956 if (nvm_flags & NVM_CHANNEL_RADAR) 957 flags |= NL80211_RRF_DFS; 958 959 if (nvm_flags & NVM_CHANNEL_INDOOR_ONLY) 960 flags |= NL80211_RRF_NO_OUTDOOR; 961 962 /* Set the GO concurrent flag only in case that NO_IR is set. 963 * Otherwise it is meaningless 964 */ 965 if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) && 966 (flags & NL80211_RRF_NO_IR)) 967 flags |= NL80211_RRF_GO_CONCURRENT; 968 969 return flags; 970 } 971 972 struct regdb_ptrs { 973 struct ieee80211_wmm_rule *rule; 974 u32 token; 975 }; 976 977 struct ieee80211_regdomain * 978 iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, 979 int num_of_ch, __le32 *channels, u16 fw_mcc, 980 u16 geo_info) 981 { 982 int ch_idx; 983 u16 ch_flags; 984 u32 reg_rule_flags, prev_reg_rule_flags = 0; 985 const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ? 986 iwl_ext_nvm_channels : iwl_nvm_channels; 987 struct ieee80211_regdomain *regd, *copy_rd; 988 int size_of_regd, regd_to_copy, wmms_to_copy; 989 int size_of_wmms = 0; 990 struct ieee80211_reg_rule *rule; 991 struct ieee80211_wmm_rule *wmm_rule, *d_wmm, *s_wmm; 992 struct regdb_ptrs *regdb_ptrs; 993 enum nl80211_band band; 994 int center_freq, prev_center_freq = 0; 995 int valid_rules = 0, n_wmms = 0; 996 int i; 997 bool new_rule; 998 int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ? 999 IWL_NVM_NUM_CHANNELS_EXT : IWL_NVM_NUM_CHANNELS; 1000 1001 if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES)) 1002 return ERR_PTR(-EINVAL); 1003 1004 if (WARN_ON(num_of_ch > max_num_ch)) 1005 num_of_ch = max_num_ch; 1006 1007 IWL_DEBUG_DEV(dev, IWL_DL_LAR, "building regdom for %d channels\n", 1008 num_of_ch); 1009 1010 /* build a regdomain rule for every valid channel */ 1011 size_of_regd = 1012 sizeof(struct ieee80211_regdomain) + 1013 num_of_ch * sizeof(struct ieee80211_reg_rule); 1014 1015 if (geo_info & GEO_WMM_ETSI_5GHZ_INFO) 1016 size_of_wmms = 1017 num_of_ch * sizeof(struct ieee80211_wmm_rule); 1018 1019 regd = kzalloc(size_of_regd + size_of_wmms, GFP_KERNEL); 1020 if (!regd) 1021 return ERR_PTR(-ENOMEM); 1022 1023 regdb_ptrs = kcalloc(num_of_ch, sizeof(*regdb_ptrs), GFP_KERNEL); 1024 if (!regdb_ptrs) { 1025 copy_rd = ERR_PTR(-ENOMEM); 1026 goto out; 1027 } 1028 1029 /* set alpha2 from FW. */ 1030 regd->alpha2[0] = fw_mcc >> 8; 1031 regd->alpha2[1] = fw_mcc & 0xff; 1032 1033 wmm_rule = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd); 1034 1035 for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { 1036 ch_flags = (u16)__le32_to_cpup(channels + ch_idx); 1037 band = (ch_idx < NUM_2GHZ_CHANNELS) ? 1038 NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; 1039 center_freq = ieee80211_channel_to_frequency(nvm_chan[ch_idx], 1040 band); 1041 new_rule = false; 1042 1043 if (!(ch_flags & NVM_CHANNEL_VALID)) { 1044 iwl_nvm_print_channel_flags(dev, IWL_DL_LAR, 1045 nvm_chan[ch_idx], ch_flags); 1046 continue; 1047 } 1048 1049 reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx, 1050 ch_flags, cfg); 1051 1052 /* we can't continue the same rule */ 1053 if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags || 1054 center_freq - prev_center_freq > 20) { 1055 valid_rules++; 1056 new_rule = true; 1057 } 1058 1059 rule = ®d->reg_rules[valid_rules - 1]; 1060 1061 if (new_rule) 1062 rule->freq_range.start_freq_khz = 1063 MHZ_TO_KHZ(center_freq - 10); 1064 1065 rule->freq_range.end_freq_khz = MHZ_TO_KHZ(center_freq + 10); 1066 1067 /* this doesn't matter - not used by FW */ 1068 rule->power_rule.max_antenna_gain = DBI_TO_MBI(6); 1069 rule->power_rule.max_eirp = 1070 DBM_TO_MBM(IWL_DEFAULT_MAX_TX_POWER); 1071 1072 rule->flags = reg_rule_flags; 1073 1074 /* rely on auto-calculation to merge BW of contiguous chans */ 1075 rule->flags |= NL80211_RRF_AUTO_BW; 1076 rule->freq_range.max_bandwidth_khz = 0; 1077 1078 prev_center_freq = center_freq; 1079 prev_reg_rule_flags = reg_rule_flags; 1080 1081 iwl_nvm_print_channel_flags(dev, IWL_DL_LAR, 1082 nvm_chan[ch_idx], ch_flags); 1083 1084 if (!(geo_info & GEO_WMM_ETSI_5GHZ_INFO) || 1085 band == NL80211_BAND_2GHZ) 1086 continue; 1087 1088 if (!reg_query_regdb_wmm(regd->alpha2, center_freq, 1089 ®db_ptrs[n_wmms].token, wmm_rule)) { 1090 /* Add only new rules */ 1091 for (i = 0; i < n_wmms; i++) { 1092 if (regdb_ptrs[i].token == 1093 regdb_ptrs[n_wmms].token) { 1094 rule->wmm_rule = regdb_ptrs[i].rule; 1095 break; 1096 } 1097 } 1098 if (i == n_wmms) { 1099 rule->wmm_rule = wmm_rule; 1100 regdb_ptrs[n_wmms++].rule = wmm_rule; 1101 wmm_rule++; 1102 } 1103 } 1104 } 1105 1106 regd->n_reg_rules = valid_rules; 1107 regd->n_wmm_rules = n_wmms; 1108 1109 /* 1110 * Narrow down regdom for unused regulatory rules to prevent hole 1111 * between reg rules to wmm rules. 1112 */ 1113 regd_to_copy = sizeof(struct ieee80211_regdomain) + 1114 valid_rules * sizeof(struct ieee80211_reg_rule); 1115 1116 wmms_to_copy = sizeof(struct ieee80211_wmm_rule) * n_wmms; 1117 1118 copy_rd = kzalloc(regd_to_copy + wmms_to_copy, GFP_KERNEL); 1119 if (!copy_rd) { 1120 copy_rd = ERR_PTR(-ENOMEM); 1121 goto out; 1122 } 1123 1124 memcpy(copy_rd, regd, regd_to_copy); 1125 memcpy((u8 *)copy_rd + regd_to_copy, (u8 *)regd + size_of_regd, 1126 wmms_to_copy); 1127 1128 d_wmm = (struct ieee80211_wmm_rule *)((u8 *)copy_rd + regd_to_copy); 1129 s_wmm = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd); 1130 1131 for (i = 0; i < regd->n_reg_rules; i++) { 1132 if (!regd->reg_rules[i].wmm_rule) 1133 continue; 1134 1135 copy_rd->reg_rules[i].wmm_rule = d_wmm + 1136 (regd->reg_rules[i].wmm_rule - s_wmm); 1137 } 1138 1139 out: 1140 kfree(regdb_ptrs); 1141 kfree(regd); 1142 return copy_rd; 1143 } 1144 IWL_EXPORT_SYMBOL(iwl_parse_nvm_mcc_info); 1145 1146 #define IWL_MAX_NVM_SECTION_SIZE 0x1b58 1147 #define IWL_MAX_EXT_NVM_SECTION_SIZE 0x1ffc 1148 #define MAX_NVM_FILE_LEN 16384 1149 1150 void iwl_nvm_fixups(u32 hw_id, unsigned int section, u8 *data, 1151 unsigned int len) 1152 { 1153 #define IWL_4165_DEVICE_ID 0x5501 1154 #define NVM_SKU_CAP_MIMO_DISABLE BIT(5) 1155 1156 if (section == NVM_SECTION_TYPE_PHY_SKU && 1157 hw_id == IWL_4165_DEVICE_ID && data && len >= 5 && 1158 (data[4] & NVM_SKU_CAP_MIMO_DISABLE)) 1159 /* OTP 0x52 bug work around: it's a 1x1 device */ 1160 data[3] = ANT_B | (ANT_B << 4); 1161 } 1162 IWL_EXPORT_SYMBOL(iwl_nvm_fixups); 1163 1164 /* 1165 * Reads external NVM from a file into mvm->nvm_sections 1166 * 1167 * HOW TO CREATE THE NVM FILE FORMAT: 1168 * ------------------------------ 1169 * 1. create hex file, format: 1170 * 3800 -> header 1171 * 0000 -> header 1172 * 5a40 -> data 1173 * 1174 * rev - 6 bit (word1) 1175 * len - 10 bit (word1) 1176 * id - 4 bit (word2) 1177 * rsv - 12 bit (word2) 1178 * 1179 * 2. flip 8bits with 8 bits per line to get the right NVM file format 1180 * 1181 * 3. create binary file from the hex file 1182 * 1183 * 4. save as "iNVM_xxx.bin" under /lib/firmware 1184 */ 1185 int iwl_read_external_nvm(struct iwl_trans *trans, 1186 const char *nvm_file_name, 1187 struct iwl_nvm_section *nvm_sections) 1188 { 1189 int ret, section_size; 1190 u16 section_id; 1191 const struct firmware *fw_entry; 1192 const struct { 1193 __le16 word1; 1194 __le16 word2; 1195 u8 data[]; 1196 } *file_sec; 1197 const u8 *eof; 1198 u8 *temp; 1199 int max_section_size; 1200 const __le32 *dword_buff; 1201 1202 #define NVM_WORD1_LEN(x) (8 * (x & 0x03FF)) 1203 #define NVM_WORD2_ID(x) (x >> 12) 1204 #define EXT_NVM_WORD2_LEN(x) (2 * (((x) & 0xFF) << 8 | (x) >> 8)) 1205 #define EXT_NVM_WORD1_ID(x) ((x) >> 4) 1206 #define NVM_HEADER_0 (0x2A504C54) 1207 #define NVM_HEADER_1 (0x4E564D2A) 1208 #define NVM_HEADER_SIZE (4 * sizeof(u32)) 1209 1210 IWL_DEBUG_EEPROM(trans->dev, "Read from external NVM\n"); 1211 1212 /* Maximal size depends on NVM version */ 1213 if (trans->cfg->nvm_type != IWL_NVM_EXT) 1214 max_section_size = IWL_MAX_NVM_SECTION_SIZE; 1215 else 1216 max_section_size = IWL_MAX_EXT_NVM_SECTION_SIZE; 1217 1218 /* 1219 * Obtain NVM image via request_firmware. Since we already used 1220 * request_firmware_nowait() for the firmware binary load and only 1221 * get here after that we assume the NVM request can be satisfied 1222 * synchronously. 1223 */ 1224 ret = request_firmware(&fw_entry, nvm_file_name, trans->dev); 1225 if (ret) { 1226 IWL_ERR(trans, "ERROR: %s isn't available %d\n", 1227 nvm_file_name, ret); 1228 return ret; 1229 } 1230 1231 IWL_INFO(trans, "Loaded NVM file %s (%zu bytes)\n", 1232 nvm_file_name, fw_entry->size); 1233 1234 if (fw_entry->size > MAX_NVM_FILE_LEN) { 1235 IWL_ERR(trans, "NVM file too large\n"); 1236 ret = -EINVAL; 1237 goto out; 1238 } 1239 1240 eof = fw_entry->data + fw_entry->size; 1241 dword_buff = (__le32 *)fw_entry->data; 1242 1243 /* some NVM file will contain a header. 1244 * The header is identified by 2 dwords header as follow: 1245 * dword[0] = 0x2A504C54 1246 * dword[1] = 0x4E564D2A 1247 * 1248 * This header must be skipped when providing the NVM data to the FW. 1249 */ 1250 if (fw_entry->size > NVM_HEADER_SIZE && 1251 dword_buff[0] == cpu_to_le32(NVM_HEADER_0) && 1252 dword_buff[1] == cpu_to_le32(NVM_HEADER_1)) { 1253 file_sec = (void *)(fw_entry->data + NVM_HEADER_SIZE); 1254 IWL_INFO(trans, "NVM Version %08X\n", le32_to_cpu(dword_buff[2])); 1255 IWL_INFO(trans, "NVM Manufacturing date %08X\n", 1256 le32_to_cpu(dword_buff[3])); 1257 1258 /* nvm file validation, dword_buff[2] holds the file version */ 1259 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 && 1260 CSR_HW_REV_STEP(trans->hw_rev) == SILICON_C_STEP && 1261 le32_to_cpu(dword_buff[2]) < 0xE4A) { 1262 ret = -EFAULT; 1263 goto out; 1264 } 1265 } else { 1266 file_sec = (void *)fw_entry->data; 1267 } 1268 1269 while (true) { 1270 if (file_sec->data > eof) { 1271 IWL_ERR(trans, 1272 "ERROR - NVM file too short for section header\n"); 1273 ret = -EINVAL; 1274 break; 1275 } 1276 1277 /* check for EOF marker */ 1278 if (!file_sec->word1 && !file_sec->word2) { 1279 ret = 0; 1280 break; 1281 } 1282 1283 if (trans->cfg->nvm_type != IWL_NVM_EXT) { 1284 section_size = 1285 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1)); 1286 section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2)); 1287 } else { 1288 section_size = 2 * EXT_NVM_WORD2_LEN( 1289 le16_to_cpu(file_sec->word2)); 1290 section_id = EXT_NVM_WORD1_ID( 1291 le16_to_cpu(file_sec->word1)); 1292 } 1293 1294 if (section_size > max_section_size) { 1295 IWL_ERR(trans, "ERROR - section too large (%d)\n", 1296 section_size); 1297 ret = -EINVAL; 1298 break; 1299 } 1300 1301 if (!section_size) { 1302 IWL_ERR(trans, "ERROR - section empty\n"); 1303 ret = -EINVAL; 1304 break; 1305 } 1306 1307 if (file_sec->data + section_size > eof) { 1308 IWL_ERR(trans, 1309 "ERROR - NVM file too short for section (%d bytes)\n", 1310 section_size); 1311 ret = -EINVAL; 1312 break; 1313 } 1314 1315 if (WARN(section_id >= NVM_MAX_NUM_SECTIONS, 1316 "Invalid NVM section ID %d\n", section_id)) { 1317 ret = -EINVAL; 1318 break; 1319 } 1320 1321 temp = kmemdup(file_sec->data, section_size, GFP_KERNEL); 1322 if (!temp) { 1323 ret = -ENOMEM; 1324 break; 1325 } 1326 1327 iwl_nvm_fixups(trans->hw_id, section_id, temp, section_size); 1328 1329 kfree(nvm_sections[section_id].data); 1330 nvm_sections[section_id].data = temp; 1331 nvm_sections[section_id].length = section_size; 1332 1333 /* advance to the next section */ 1334 file_sec = (void *)(file_sec->data + section_size); 1335 } 1336 out: 1337 release_firmware(fw_entry); 1338 return ret; 1339 } 1340 IWL_EXPORT_SYMBOL(iwl_read_external_nvm); 1341 1342 struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, 1343 const struct iwl_fw *fw) 1344 { 1345 struct iwl_nvm_get_info cmd = {}; 1346 struct iwl_nvm_get_info_rsp *rsp; 1347 struct iwl_nvm_data *nvm; 1348 struct iwl_host_cmd hcmd = { 1349 .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, 1350 .data = { &cmd, }, 1351 .len = { sizeof(cmd) }, 1352 .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_GET_INFO) 1353 }; 1354 int ret; 1355 bool lar_fw_supported = !iwlwifi_mod_params.lar_disable && 1356 fw_has_capa(&fw->ucode_capa, 1357 IWL_UCODE_TLV_CAPA_LAR_SUPPORT); 1358 u32 mac_flags; 1359 u32 sbands_flags = 0; 1360 1361 ret = iwl_trans_send_cmd(trans, &hcmd); 1362 if (ret) 1363 return ERR_PTR(ret); 1364 1365 if (WARN(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp), 1366 "Invalid payload len in NVM response from FW %d", 1367 iwl_rx_packet_payload_len(hcmd.resp_pkt))) { 1368 ret = -EINVAL; 1369 goto out; 1370 } 1371 1372 rsp = (void *)hcmd.resp_pkt->data; 1373 if (le32_to_cpu(rsp->general.flags) & NVM_GENERAL_FLAGS_EMPTY_OTP) 1374 IWL_INFO(trans, "OTP is empty\n"); 1375 1376 nvm = kzalloc(sizeof(*nvm) + 1377 sizeof(struct ieee80211_channel) * IWL_NUM_CHANNELS, 1378 GFP_KERNEL); 1379 if (!nvm) { 1380 ret = -ENOMEM; 1381 goto out; 1382 } 1383 1384 iwl_set_hw_address_from_csr(trans, nvm); 1385 /* TODO: if platform NVM has MAC address - override it here */ 1386 1387 if (!is_valid_ether_addr(nvm->hw_addr)) { 1388 IWL_ERR(trans, "no valid mac address was found\n"); 1389 ret = -EINVAL; 1390 goto err_free; 1391 } 1392 1393 IWL_INFO(trans, "base HW address: %pM\n", nvm->hw_addr); 1394 1395 /* Initialize general data */ 1396 nvm->nvm_version = le16_to_cpu(rsp->general.nvm_version); 1397 1398 /* Initialize MAC sku data */ 1399 mac_flags = le32_to_cpu(rsp->mac_sku.mac_sku_flags); 1400 nvm->sku_cap_11ac_enable = 1401 !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AC_ENABLED); 1402 nvm->sku_cap_11n_enable = 1403 !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11N_ENABLED); 1404 nvm->sku_cap_11ax_enable = 1405 !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AX_ENABLED); 1406 nvm->sku_cap_band_24ghz_enable = 1407 !!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED); 1408 nvm->sku_cap_band_52ghz_enable = 1409 !!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED); 1410 nvm->sku_cap_mimo_disabled = 1411 !!(mac_flags & NVM_MAC_SKU_FLAGS_MIMO_DISABLED); 1412 1413 /* Initialize PHY sku data */ 1414 nvm->valid_tx_ant = (u8)le32_to_cpu(rsp->phy_sku.tx_chains); 1415 nvm->valid_rx_ant = (u8)le32_to_cpu(rsp->phy_sku.rx_chains); 1416 1417 if (le32_to_cpu(rsp->regulatory.lar_enabled) && lar_fw_supported) { 1418 nvm->lar_enabled = true; 1419 sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR; 1420 } 1421 1422 iwl_init_sbands(trans->dev, trans->cfg, nvm, 1423 rsp->regulatory.channel_profile, 1424 nvm->valid_tx_ant & fw->valid_tx_ant, 1425 nvm->valid_rx_ant & fw->valid_rx_ant, 1426 sbands_flags); 1427 1428 iwl_free_resp(&hcmd); 1429 return nvm; 1430 1431 err_free: 1432 kfree(nvm); 1433 out: 1434 iwl_free_resp(&hcmd); 1435 return ERR_PTR(ret); 1436 } 1437 IWL_EXPORT_SYMBOL(iwl_get_nvm); 1438