1 /****************************************************************************** 2 * 3 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * redistributing this file, you may do so under either license. 5 * 6 * GPL LICENSE SUMMARY 7 * 8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 11 * Copyright(c) 2018 Intel Corporation 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of version 2 of the GNU General Public License as 15 * published by the Free Software Foundation. 16 * 17 * This program is distributed in the hope that it will be useful, but 18 * WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 20 * General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; if not, write to the Free Software 24 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, 25 * USA 26 * 27 * The full GNU General Public License is included in this distribution 28 * in the file called COPYING. 29 * 30 * Contact Information: 31 * Intel Linux Wireless <linuxwifi@intel.com> 32 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 33 * 34 * BSD LICENSE 35 * 36 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. 37 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 38 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 39 * Copyright(c) 2018 Intel Corporation 40 * All rights reserved. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 46 * * Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * * Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in 50 * the documentation and/or other materials provided with the 51 * distribution. 52 * * Neither the name Intel Corporation nor the names of its 53 * contributors may be used to endorse or promote products derived 54 * from this software without specific prior written permission. 55 * 56 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 57 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 58 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 59 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 60 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 61 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 62 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 63 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 64 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 65 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 66 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 67 *****************************************************************************/ 68 #include <linux/types.h> 69 #include <linux/slab.h> 70 #include <linux/export.h> 71 #include <linux/etherdevice.h> 72 #include <linux/pci.h> 73 #include <linux/firmware.h> 74 75 #include "iwl-drv.h" 76 #include "iwl-modparams.h" 77 #include "iwl-nvm-parse.h" 78 #include "iwl-prph.h" 79 #include "iwl-io.h" 80 #include "iwl-csr.h" 81 #include "fw/acpi.h" 82 #include "fw/api/nvm-reg.h" 83 #include "fw/api/commands.h" 84 #include "fw/api/cmdhdr.h" 85 #include "fw/img.h" 86 87 /* NVM offsets (in words) definitions */ 88 enum nvm_offsets { 89 /* NVM HW-Section offset (in words) definitions */ 90 SUBSYSTEM_ID = 0x0A, 91 HW_ADDR = 0x15, 92 93 /* NVM SW-Section offset (in words) definitions */ 94 NVM_SW_SECTION = 0x1C0, 95 NVM_VERSION = 0, 96 RADIO_CFG = 1, 97 SKU = 2, 98 N_HW_ADDRS = 3, 99 NVM_CHANNELS = 0x1E0 - NVM_SW_SECTION, 100 101 /* NVM calibration section offset (in words) definitions */ 102 NVM_CALIB_SECTION = 0x2B8, 103 XTAL_CALIB = 0x316 - NVM_CALIB_SECTION, 104 105 /* NVM REGULATORY -Section offset (in words) definitions */ 106 NVM_CHANNELS_SDP = 0, 107 }; 108 109 enum ext_nvm_offsets { 110 /* NVM HW-Section offset (in words) definitions */ 111 MAC_ADDRESS_OVERRIDE_EXT_NVM = 1, 112 113 /* NVM SW-Section offset (in words) definitions */ 114 NVM_VERSION_EXT_NVM = 0, 115 RADIO_CFG_FAMILY_EXT_NVM = 0, 116 SKU_FAMILY_8000 = 2, 117 N_HW_ADDRS_FAMILY_8000 = 3, 118 119 /* NVM REGULATORY -Section offset (in words) definitions */ 120 NVM_CHANNELS_EXTENDED = 0, 121 NVM_LAR_OFFSET_OLD = 0x4C7, 122 NVM_LAR_OFFSET = 0x507, 123 NVM_LAR_ENABLED = 0x7, 124 }; 125 126 /* SKU Capabilities (actual values from NVM definition) */ 127 enum nvm_sku_bits { 128 NVM_SKU_CAP_BAND_24GHZ = BIT(0), 129 NVM_SKU_CAP_BAND_52GHZ = BIT(1), 130 NVM_SKU_CAP_11N_ENABLE = BIT(2), 131 NVM_SKU_CAP_11AC_ENABLE = BIT(3), 132 NVM_SKU_CAP_MIMO_DISABLE = BIT(5), 133 }; 134 135 /* 136 * These are the channel numbers in the order that they are stored in the NVM 137 */ 138 static const u8 iwl_nvm_channels[] = { 139 /* 2.4 GHz */ 140 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 141 /* 5 GHz */ 142 36, 40, 44 , 48, 52, 56, 60, 64, 143 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 144 149, 153, 157, 161, 165 145 }; 146 147 static const u8 iwl_ext_nvm_channels[] = { 148 /* 2.4 GHz */ 149 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 150 /* 5 GHz */ 151 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92, 152 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 153 149, 153, 157, 161, 165, 169, 173, 177, 181 154 }; 155 156 #define IWL_NVM_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels) 157 #define IWL_NVM_NUM_CHANNELS_EXT ARRAY_SIZE(iwl_ext_nvm_channels) 158 #define NUM_2GHZ_CHANNELS 14 159 #define NUM_2GHZ_CHANNELS_EXT 14 160 #define FIRST_2GHZ_HT_MINUS 5 161 #define LAST_2GHZ_HT_PLUS 9 162 #define LAST_5GHZ_HT 165 163 #define LAST_5GHZ_HT_FAMILY_8000 181 164 #define N_HW_ADDR_MASK 0xF 165 166 /* rate data (static) */ 167 static struct ieee80211_rate iwl_cfg80211_rates[] = { 168 { .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, }, 169 { .bitrate = 2 * 10, .hw_value = 1, .hw_value_short = 1, 170 .flags = IEEE80211_RATE_SHORT_PREAMBLE, }, 171 { .bitrate = 5.5 * 10, .hw_value = 2, .hw_value_short = 2, 172 .flags = IEEE80211_RATE_SHORT_PREAMBLE, }, 173 { .bitrate = 11 * 10, .hw_value = 3, .hw_value_short = 3, 174 .flags = IEEE80211_RATE_SHORT_PREAMBLE, }, 175 { .bitrate = 6 * 10, .hw_value = 4, .hw_value_short = 4, }, 176 { .bitrate = 9 * 10, .hw_value = 5, .hw_value_short = 5, }, 177 { .bitrate = 12 * 10, .hw_value = 6, .hw_value_short = 6, }, 178 { .bitrate = 18 * 10, .hw_value = 7, .hw_value_short = 7, }, 179 { .bitrate = 24 * 10, .hw_value = 8, .hw_value_short = 8, }, 180 { .bitrate = 36 * 10, .hw_value = 9, .hw_value_short = 9, }, 181 { .bitrate = 48 * 10, .hw_value = 10, .hw_value_short = 10, }, 182 { .bitrate = 54 * 10, .hw_value = 11, .hw_value_short = 11, }, 183 }; 184 #define RATES_24_OFFS 0 185 #define N_RATES_24 ARRAY_SIZE(iwl_cfg80211_rates) 186 #define RATES_52_OFFS 4 187 #define N_RATES_52 (N_RATES_24 - RATES_52_OFFS) 188 189 /** 190 * enum iwl_nvm_channel_flags - channel flags in NVM 191 * @NVM_CHANNEL_VALID: channel is usable for this SKU/geo 192 * @NVM_CHANNEL_IBSS: usable as an IBSS channel 193 * @NVM_CHANNEL_ACTIVE: active scanning allowed 194 * @NVM_CHANNEL_RADAR: radar detection required 195 * @NVM_CHANNEL_INDOOR_ONLY: only indoor use is allowed 196 * @NVM_CHANNEL_GO_CONCURRENT: GO operation is allowed when connected to BSS 197 * on same channel on 2.4 or same UNII band on 5.2 198 * @NVM_CHANNEL_UNIFORM: uniform spreading required 199 * @NVM_CHANNEL_20MHZ: 20 MHz channel okay 200 * @NVM_CHANNEL_40MHZ: 40 MHz channel okay 201 * @NVM_CHANNEL_80MHZ: 80 MHz channel okay 202 * @NVM_CHANNEL_160MHZ: 160 MHz channel okay 203 * @NVM_CHANNEL_DC_HIGH: DC HIGH required/allowed (?) 204 */ 205 enum iwl_nvm_channel_flags { 206 NVM_CHANNEL_VALID = BIT(0), 207 NVM_CHANNEL_IBSS = BIT(1), 208 NVM_CHANNEL_ACTIVE = BIT(3), 209 NVM_CHANNEL_RADAR = BIT(4), 210 NVM_CHANNEL_INDOOR_ONLY = BIT(5), 211 NVM_CHANNEL_GO_CONCURRENT = BIT(6), 212 NVM_CHANNEL_UNIFORM = BIT(7), 213 NVM_CHANNEL_20MHZ = BIT(8), 214 NVM_CHANNEL_40MHZ = BIT(9), 215 NVM_CHANNEL_80MHZ = BIT(10), 216 NVM_CHANNEL_160MHZ = BIT(11), 217 NVM_CHANNEL_DC_HIGH = BIT(12), 218 }; 219 220 static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level, 221 int chan, u16 flags) 222 { 223 #define CHECK_AND_PRINT_I(x) \ 224 ((flags & NVM_CHANNEL_##x) ? " " #x : "") 225 226 if (!(flags & NVM_CHANNEL_VALID)) { 227 IWL_DEBUG_DEV(dev, level, "Ch. %d: 0x%x: No traffic\n", 228 chan, flags); 229 return; 230 } 231 232 /* Note: already can print up to 101 characters, 110 is the limit! */ 233 IWL_DEBUG_DEV(dev, level, 234 "Ch. %d: 0x%x:%s%s%s%s%s%s%s%s%s%s%s%s\n", 235 chan, flags, 236 CHECK_AND_PRINT_I(VALID), 237 CHECK_AND_PRINT_I(IBSS), 238 CHECK_AND_PRINT_I(ACTIVE), 239 CHECK_AND_PRINT_I(RADAR), 240 CHECK_AND_PRINT_I(INDOOR_ONLY), 241 CHECK_AND_PRINT_I(GO_CONCURRENT), 242 CHECK_AND_PRINT_I(UNIFORM), 243 CHECK_AND_PRINT_I(20MHZ), 244 CHECK_AND_PRINT_I(40MHZ), 245 CHECK_AND_PRINT_I(80MHZ), 246 CHECK_AND_PRINT_I(160MHZ), 247 CHECK_AND_PRINT_I(DC_HIGH)); 248 #undef CHECK_AND_PRINT_I 249 } 250 251 static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz, 252 u16 nvm_flags, const struct iwl_cfg *cfg) 253 { 254 u32 flags = IEEE80211_CHAN_NO_HT40; 255 u32 last_5ghz_ht = LAST_5GHZ_HT; 256 257 if (cfg->nvm_type == IWL_NVM_EXT) 258 last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000; 259 260 if (!is_5ghz && (nvm_flags & NVM_CHANNEL_40MHZ)) { 261 if (ch_num <= LAST_2GHZ_HT_PLUS) 262 flags &= ~IEEE80211_CHAN_NO_HT40PLUS; 263 if (ch_num >= FIRST_2GHZ_HT_MINUS) 264 flags &= ~IEEE80211_CHAN_NO_HT40MINUS; 265 } else if (ch_num <= last_5ghz_ht && (nvm_flags & NVM_CHANNEL_40MHZ)) { 266 if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0) 267 flags &= ~IEEE80211_CHAN_NO_HT40PLUS; 268 else 269 flags &= ~IEEE80211_CHAN_NO_HT40MINUS; 270 } 271 if (!(nvm_flags & NVM_CHANNEL_80MHZ)) 272 flags |= IEEE80211_CHAN_NO_80MHZ; 273 if (!(nvm_flags & NVM_CHANNEL_160MHZ)) 274 flags |= IEEE80211_CHAN_NO_160MHZ; 275 276 if (!(nvm_flags & NVM_CHANNEL_IBSS)) 277 flags |= IEEE80211_CHAN_NO_IR; 278 279 if (!(nvm_flags & NVM_CHANNEL_ACTIVE)) 280 flags |= IEEE80211_CHAN_NO_IR; 281 282 if (nvm_flags & NVM_CHANNEL_RADAR) 283 flags |= IEEE80211_CHAN_RADAR; 284 285 if (nvm_flags & NVM_CHANNEL_INDOOR_ONLY) 286 flags |= IEEE80211_CHAN_INDOOR_ONLY; 287 288 /* Set the GO concurrent flag only in case that NO_IR is set. 289 * Otherwise it is meaningless 290 */ 291 if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) && 292 (flags & IEEE80211_CHAN_NO_IR)) 293 flags |= IEEE80211_CHAN_IR_CONCURRENT; 294 295 return flags; 296 } 297 298 static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, 299 struct iwl_nvm_data *data, 300 const __le16 * const nvm_ch_flags, 301 u32 sbands_flags) 302 { 303 int ch_idx; 304 int n_channels = 0; 305 struct ieee80211_channel *channel; 306 u16 ch_flags; 307 int num_of_ch, num_2ghz_channels; 308 const u8 *nvm_chan; 309 310 if (cfg->nvm_type != IWL_NVM_EXT) { 311 num_of_ch = IWL_NVM_NUM_CHANNELS; 312 nvm_chan = &iwl_nvm_channels[0]; 313 num_2ghz_channels = NUM_2GHZ_CHANNELS; 314 } else { 315 num_of_ch = IWL_NVM_NUM_CHANNELS_EXT; 316 nvm_chan = &iwl_ext_nvm_channels[0]; 317 num_2ghz_channels = NUM_2GHZ_CHANNELS_EXT; 318 } 319 320 for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { 321 bool is_5ghz = (ch_idx >= num_2ghz_channels); 322 323 ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx); 324 325 if (is_5ghz && !data->sku_cap_band_52ghz_enable) 326 continue; 327 328 /* workaround to disable wide channels in 5GHz */ 329 if ((sbands_flags & IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ) && 330 is_5ghz) { 331 ch_flags &= ~(NVM_CHANNEL_40MHZ | 332 NVM_CHANNEL_80MHZ | 333 NVM_CHANNEL_160MHZ); 334 } 335 336 if (ch_flags & NVM_CHANNEL_160MHZ) 337 data->vht160_supported = true; 338 339 if (!(sbands_flags & IWL_NVM_SBANDS_FLAGS_LAR) && 340 !(ch_flags & NVM_CHANNEL_VALID)) { 341 /* 342 * Channels might become valid later if lar is 343 * supported, hence we still want to add them to 344 * the list of supported channels to cfg80211. 345 */ 346 iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM, 347 nvm_chan[ch_idx], ch_flags); 348 continue; 349 } 350 351 channel = &data->channels[n_channels]; 352 n_channels++; 353 354 channel->hw_value = nvm_chan[ch_idx]; 355 channel->band = is_5ghz ? 356 NL80211_BAND_5GHZ : NL80211_BAND_2GHZ; 357 channel->center_freq = 358 ieee80211_channel_to_frequency( 359 channel->hw_value, channel->band); 360 361 /* Initialize regulatory-based run-time data */ 362 363 /* 364 * Default value - highest tx power value. max_power 365 * is not used in mvm, and is used for backwards compatibility 366 */ 367 channel->max_power = IWL_DEFAULT_MAX_TX_POWER; 368 369 /* don't put limitations in case we're using LAR */ 370 if (!(sbands_flags & IWL_NVM_SBANDS_FLAGS_LAR)) 371 channel->flags = iwl_get_channel_flags(nvm_chan[ch_idx], 372 ch_idx, is_5ghz, 373 ch_flags, cfg); 374 else 375 channel->flags = 0; 376 377 iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM, 378 channel->hw_value, ch_flags); 379 IWL_DEBUG_EEPROM(dev, "Ch. %d: %ddBm\n", 380 channel->hw_value, channel->max_power); 381 } 382 383 return n_channels; 384 } 385 386 static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg, 387 struct iwl_nvm_data *data, 388 struct ieee80211_sta_vht_cap *vht_cap, 389 u8 tx_chains, u8 rx_chains) 390 { 391 int num_rx_ants = num_of_ant(rx_chains); 392 int num_tx_ants = num_of_ant(tx_chains); 393 unsigned int max_ampdu_exponent = (cfg->max_vht_ampdu_exponent ?: 394 IEEE80211_VHT_MAX_AMPDU_1024K); 395 396 vht_cap->vht_supported = true; 397 398 vht_cap->cap = IEEE80211_VHT_CAP_SHORT_GI_80 | 399 IEEE80211_VHT_CAP_RXSTBC_1 | 400 IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 401 3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT | 402 max_ampdu_exponent << 403 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; 404 405 if (data->vht160_supported) 406 vht_cap->cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ | 407 IEEE80211_VHT_CAP_SHORT_GI_160; 408 409 if (cfg->vht_mu_mimo_supported) 410 vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; 411 412 if (cfg->ht_params->ldpc) 413 vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC; 414 415 if (data->sku_cap_mimo_disabled) { 416 num_rx_ants = 1; 417 num_tx_ants = 1; 418 } 419 420 if (num_tx_ants > 1) 421 vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC; 422 else 423 vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN; 424 425 switch (iwlwifi_mod_params.amsdu_size) { 426 case IWL_AMSDU_DEF: 427 if (cfg->mq_rx_supported) 428 vht_cap->cap |= 429 IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; 430 else 431 vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895; 432 break; 433 case IWL_AMSDU_4K: 434 vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895; 435 break; 436 case IWL_AMSDU_8K: 437 vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991; 438 break; 439 case IWL_AMSDU_12K: 440 vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; 441 break; 442 default: 443 break; 444 } 445 446 vht_cap->vht_mcs.rx_mcs_map = 447 cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | 448 IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 | 449 IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 | 450 IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 | 451 IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 | 452 IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 | 453 IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 | 454 IEEE80211_VHT_MCS_NOT_SUPPORTED << 14); 455 456 if (num_rx_ants == 1 || cfg->rx_with_siso_diversity) { 457 vht_cap->cap |= IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN; 458 /* this works because NOT_SUPPORTED == 3 */ 459 vht_cap->vht_mcs.rx_mcs_map |= 460 cpu_to_le16(IEEE80211_VHT_MCS_NOT_SUPPORTED << 2); 461 } 462 463 vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map; 464 } 465 466 static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, 467 struct iwl_nvm_data *data, 468 const __le16 *nvm_ch_flags, u8 tx_chains, 469 u8 rx_chains, u32 sbands_flags) 470 { 471 int n_channels; 472 int n_used = 0; 473 struct ieee80211_supported_band *sband; 474 475 n_channels = iwl_init_channel_map(dev, cfg, data, nvm_ch_flags, 476 sbands_flags); 477 sband = &data->bands[NL80211_BAND_2GHZ]; 478 sband->band = NL80211_BAND_2GHZ; 479 sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS]; 480 sband->n_bitrates = N_RATES_24; 481 n_used += iwl_init_sband_channels(data, sband, n_channels, 482 NL80211_BAND_2GHZ); 483 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_2GHZ, 484 tx_chains, rx_chains); 485 486 sband = &data->bands[NL80211_BAND_5GHZ]; 487 sband->band = NL80211_BAND_5GHZ; 488 sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS]; 489 sband->n_bitrates = N_RATES_52; 490 n_used += iwl_init_sband_channels(data, sband, n_channels, 491 NL80211_BAND_5GHZ); 492 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_5GHZ, 493 tx_chains, rx_chains); 494 if (data->sku_cap_11ac_enable && !iwlwifi_mod_params.disable_11ac) 495 iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap, 496 tx_chains, rx_chains); 497 498 if (n_channels != n_used) 499 IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n", 500 n_used, n_channels); 501 } 502 503 static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw, 504 const __le16 *phy_sku) 505 { 506 if (cfg->nvm_type != IWL_NVM_EXT) 507 return le16_to_cpup(nvm_sw + SKU); 508 509 return le32_to_cpup((__le32 *)(phy_sku + SKU_FAMILY_8000)); 510 } 511 512 static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw) 513 { 514 if (cfg->nvm_type != IWL_NVM_EXT) 515 return le16_to_cpup(nvm_sw + NVM_VERSION); 516 else 517 return le32_to_cpup((__le32 *)(nvm_sw + 518 NVM_VERSION_EXT_NVM)); 519 } 520 521 static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw, 522 const __le16 *phy_sku) 523 { 524 if (cfg->nvm_type != IWL_NVM_EXT) 525 return le16_to_cpup(nvm_sw + RADIO_CFG); 526 527 return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_EXT_NVM)); 528 529 } 530 531 static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg, const __le16 *nvm_sw) 532 { 533 int n_hw_addr; 534 535 if (cfg->nvm_type != IWL_NVM_EXT) 536 return le16_to_cpup(nvm_sw + N_HW_ADDRS); 537 538 n_hw_addr = le32_to_cpup((__le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000)); 539 540 return n_hw_addr & N_HW_ADDR_MASK; 541 } 542 543 static void iwl_set_radio_cfg(const struct iwl_cfg *cfg, 544 struct iwl_nvm_data *data, 545 u32 radio_cfg) 546 { 547 if (cfg->nvm_type != IWL_NVM_EXT) { 548 data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK(radio_cfg); 549 data->radio_cfg_step = NVM_RF_CFG_STEP_MSK(radio_cfg); 550 data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK(radio_cfg); 551 data->radio_cfg_pnum = NVM_RF_CFG_PNUM_MSK(radio_cfg); 552 return; 553 } 554 555 /* set the radio configuration for family 8000 */ 556 data->radio_cfg_type = EXT_NVM_RF_CFG_TYPE_MSK(radio_cfg); 557 data->radio_cfg_step = EXT_NVM_RF_CFG_STEP_MSK(radio_cfg); 558 data->radio_cfg_dash = EXT_NVM_RF_CFG_DASH_MSK(radio_cfg); 559 data->radio_cfg_pnum = EXT_NVM_RF_CFG_FLAVOR_MSK(radio_cfg); 560 data->valid_tx_ant = EXT_NVM_RF_CFG_TX_ANT_MSK(radio_cfg); 561 data->valid_rx_ant = EXT_NVM_RF_CFG_RX_ANT_MSK(radio_cfg); 562 } 563 564 static void iwl_flip_hw_address(__le32 mac_addr0, __le32 mac_addr1, u8 *dest) 565 { 566 const u8 *hw_addr; 567 568 hw_addr = (const u8 *)&mac_addr0; 569 dest[0] = hw_addr[3]; 570 dest[1] = hw_addr[2]; 571 dest[2] = hw_addr[1]; 572 dest[3] = hw_addr[0]; 573 574 hw_addr = (const u8 *)&mac_addr1; 575 dest[4] = hw_addr[1]; 576 dest[5] = hw_addr[0]; 577 } 578 579 static void iwl_set_hw_address_from_csr(struct iwl_trans *trans, 580 struct iwl_nvm_data *data) 581 { 582 __le32 mac_addr0 = 583 cpu_to_le32(iwl_read32(trans, 584 trans->cfg->csr->mac_addr0_strap)); 585 __le32 mac_addr1 = 586 cpu_to_le32(iwl_read32(trans, 587 trans->cfg->csr->mac_addr1_strap)); 588 589 iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); 590 /* 591 * If the OEM fused a valid address, use it instead of the one in the 592 * OTP 593 */ 594 if (is_valid_ether_addr(data->hw_addr)) 595 return; 596 597 mac_addr0 = cpu_to_le32(iwl_read32(trans, 598 trans->cfg->csr->mac_addr0_otp)); 599 mac_addr1 = cpu_to_le32(iwl_read32(trans, 600 trans->cfg->csr->mac_addr1_otp)); 601 602 iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); 603 } 604 605 static void iwl_set_hw_address_family_8000(struct iwl_trans *trans, 606 const struct iwl_cfg *cfg, 607 struct iwl_nvm_data *data, 608 const __le16 *mac_override, 609 const __be16 *nvm_hw) 610 { 611 const u8 *hw_addr; 612 613 if (mac_override) { 614 static const u8 reserved_mac[] = { 615 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00 616 }; 617 618 hw_addr = (const u8 *)(mac_override + 619 MAC_ADDRESS_OVERRIDE_EXT_NVM); 620 621 /* 622 * Store the MAC address from MAO section. 623 * No byte swapping is required in MAO section 624 */ 625 memcpy(data->hw_addr, hw_addr, ETH_ALEN); 626 627 /* 628 * Force the use of the OTP MAC address in case of reserved MAC 629 * address in the NVM, or if address is given but invalid. 630 */ 631 if (is_valid_ether_addr(data->hw_addr) && 632 memcmp(reserved_mac, hw_addr, ETH_ALEN) != 0) 633 return; 634 635 IWL_ERR(trans, 636 "mac address from nvm override section is not valid\n"); 637 } 638 639 if (nvm_hw) { 640 /* read the mac address from WFMP registers */ 641 __le32 mac_addr0 = cpu_to_le32(iwl_trans_read_prph(trans, 642 WFMP_MAC_ADDR_0)); 643 __le32 mac_addr1 = cpu_to_le32(iwl_trans_read_prph(trans, 644 WFMP_MAC_ADDR_1)); 645 646 iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); 647 648 return; 649 } 650 651 IWL_ERR(trans, "mac address is not found\n"); 652 } 653 654 static int iwl_set_hw_address(struct iwl_trans *trans, 655 const struct iwl_cfg *cfg, 656 struct iwl_nvm_data *data, const __be16 *nvm_hw, 657 const __le16 *mac_override) 658 { 659 if (cfg->mac_addr_from_csr) { 660 iwl_set_hw_address_from_csr(trans, data); 661 } else if (cfg->nvm_type != IWL_NVM_EXT) { 662 const u8 *hw_addr = (const u8 *)(nvm_hw + HW_ADDR); 663 664 /* The byte order is little endian 16 bit, meaning 214365 */ 665 data->hw_addr[0] = hw_addr[1]; 666 data->hw_addr[1] = hw_addr[0]; 667 data->hw_addr[2] = hw_addr[3]; 668 data->hw_addr[3] = hw_addr[2]; 669 data->hw_addr[4] = hw_addr[5]; 670 data->hw_addr[5] = hw_addr[4]; 671 } else { 672 iwl_set_hw_address_family_8000(trans, cfg, data, 673 mac_override, nvm_hw); 674 } 675 676 if (!is_valid_ether_addr(data->hw_addr)) { 677 IWL_ERR(trans, "no valid mac address was found\n"); 678 return -EINVAL; 679 } 680 681 IWL_INFO(trans, "base HW address: %pM\n", data->hw_addr); 682 683 return 0; 684 } 685 686 static bool 687 iwl_nvm_no_wide_in_5ghz(struct device *dev, const struct iwl_cfg *cfg, 688 const __be16 *nvm_hw) 689 { 690 /* 691 * Workaround a bug in Indonesia SKUs where the regulatory in 692 * some 7000-family OTPs erroneously allow wide channels in 693 * 5GHz. To check for Indonesia, we take the SKU value from 694 * bits 1-4 in the subsystem ID and check if it is either 5 or 695 * 9. In those cases, we need to force-disable wide channels 696 * in 5GHz otherwise the FW will throw a sysassert when we try 697 * to use them. 698 */ 699 if (cfg->device_family == IWL_DEVICE_FAMILY_7000) { 700 /* 701 * Unlike the other sections in the NVM, the hw 702 * section uses big-endian. 703 */ 704 u16 subsystem_id = be16_to_cpup(nvm_hw + SUBSYSTEM_ID); 705 u8 sku = (subsystem_id & 0x1e) >> 1; 706 707 if (sku == 5 || sku == 9) { 708 IWL_DEBUG_EEPROM(dev, 709 "disabling wide channels in 5GHz (0x%0x %d)\n", 710 subsystem_id, sku); 711 return true; 712 } 713 } 714 715 return false; 716 } 717 718 struct iwl_nvm_data * 719 iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, 720 const __be16 *nvm_hw, const __le16 *nvm_sw, 721 const __le16 *nvm_calib, const __le16 *regulatory, 722 const __le16 *mac_override, const __le16 *phy_sku, 723 u8 tx_chains, u8 rx_chains, bool lar_fw_supported) 724 { 725 struct device *dev = trans->dev; 726 struct iwl_nvm_data *data; 727 bool lar_enabled; 728 u32 sku, radio_cfg; 729 u32 sbands_flags = 0; 730 u16 lar_config; 731 const __le16 *ch_section; 732 733 if (cfg->nvm_type != IWL_NVM_EXT) 734 data = kzalloc(sizeof(*data) + 735 sizeof(struct ieee80211_channel) * 736 IWL_NVM_NUM_CHANNELS, 737 GFP_KERNEL); 738 else 739 data = kzalloc(sizeof(*data) + 740 sizeof(struct ieee80211_channel) * 741 IWL_NVM_NUM_CHANNELS_EXT, 742 GFP_KERNEL); 743 if (!data) 744 return NULL; 745 746 data->nvm_version = iwl_get_nvm_version(cfg, nvm_sw); 747 748 radio_cfg = iwl_get_radio_cfg(cfg, nvm_sw, phy_sku); 749 iwl_set_radio_cfg(cfg, data, radio_cfg); 750 if (data->valid_tx_ant) 751 tx_chains &= data->valid_tx_ant; 752 if (data->valid_rx_ant) 753 rx_chains &= data->valid_rx_ant; 754 755 sku = iwl_get_sku(cfg, nvm_sw, phy_sku); 756 data->sku_cap_band_24ghz_enable = sku & NVM_SKU_CAP_BAND_24GHZ; 757 data->sku_cap_band_52ghz_enable = sku & NVM_SKU_CAP_BAND_52GHZ; 758 data->sku_cap_11n_enable = sku & NVM_SKU_CAP_11N_ENABLE; 759 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) 760 data->sku_cap_11n_enable = false; 761 data->sku_cap_11ac_enable = data->sku_cap_11n_enable && 762 (sku & NVM_SKU_CAP_11AC_ENABLE); 763 data->sku_cap_mimo_disabled = sku & NVM_SKU_CAP_MIMO_DISABLE; 764 765 data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw); 766 767 if (cfg->nvm_type != IWL_NVM_EXT) { 768 /* Checking for required sections */ 769 if (!nvm_calib) { 770 IWL_ERR(trans, 771 "Can't parse empty Calib NVM sections\n"); 772 kfree(data); 773 return NULL; 774 } 775 776 ch_section = cfg->nvm_type == IWL_NVM_SDP ? 777 ®ulatory[NVM_CHANNELS_SDP] : 778 &nvm_sw[NVM_CHANNELS]; 779 780 /* in family 8000 Xtal calibration values moved to OTP */ 781 data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB); 782 data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1); 783 lar_enabled = true; 784 } else { 785 u16 lar_offset = data->nvm_version < 0xE39 ? 786 NVM_LAR_OFFSET_OLD : 787 NVM_LAR_OFFSET; 788 789 lar_config = le16_to_cpup(regulatory + lar_offset); 790 data->lar_enabled = !!(lar_config & 791 NVM_LAR_ENABLED); 792 lar_enabled = data->lar_enabled; 793 ch_section = ®ulatory[NVM_CHANNELS_EXTENDED]; 794 } 795 796 /* If no valid mac address was found - bail out */ 797 if (iwl_set_hw_address(trans, cfg, data, nvm_hw, mac_override)) { 798 kfree(data); 799 return NULL; 800 } 801 802 if (lar_fw_supported && lar_enabled) 803 sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR; 804 805 if (iwl_nvm_no_wide_in_5ghz(dev, cfg, nvm_hw)) 806 sbands_flags |= IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ; 807 808 iwl_init_sbands(dev, cfg, data, ch_section, tx_chains, rx_chains, 809 sbands_flags); 810 data->calib_version = 255; 811 812 return data; 813 } 814 IWL_EXPORT_SYMBOL(iwl_parse_nvm_data); 815 816 static u32 iwl_nvm_get_regdom_bw_flags(const u8 *nvm_chan, 817 int ch_idx, u16 nvm_flags, 818 const struct iwl_cfg *cfg) 819 { 820 u32 flags = NL80211_RRF_NO_HT40; 821 u32 last_5ghz_ht = LAST_5GHZ_HT; 822 823 if (cfg->nvm_type == IWL_NVM_EXT) 824 last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000; 825 826 if (ch_idx < NUM_2GHZ_CHANNELS && 827 (nvm_flags & NVM_CHANNEL_40MHZ)) { 828 if (nvm_chan[ch_idx] <= LAST_2GHZ_HT_PLUS) 829 flags &= ~NL80211_RRF_NO_HT40PLUS; 830 if (nvm_chan[ch_idx] >= FIRST_2GHZ_HT_MINUS) 831 flags &= ~NL80211_RRF_NO_HT40MINUS; 832 } else if (nvm_chan[ch_idx] <= last_5ghz_ht && 833 (nvm_flags & NVM_CHANNEL_40MHZ)) { 834 if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0) 835 flags &= ~NL80211_RRF_NO_HT40PLUS; 836 else 837 flags &= ~NL80211_RRF_NO_HT40MINUS; 838 } 839 840 if (!(nvm_flags & NVM_CHANNEL_80MHZ)) 841 flags |= NL80211_RRF_NO_80MHZ; 842 if (!(nvm_flags & NVM_CHANNEL_160MHZ)) 843 flags |= NL80211_RRF_NO_160MHZ; 844 845 if (!(nvm_flags & NVM_CHANNEL_ACTIVE)) 846 flags |= NL80211_RRF_NO_IR; 847 848 if (nvm_flags & NVM_CHANNEL_RADAR) 849 flags |= NL80211_RRF_DFS; 850 851 if (nvm_flags & NVM_CHANNEL_INDOOR_ONLY) 852 flags |= NL80211_RRF_NO_OUTDOOR; 853 854 /* Set the GO concurrent flag only in case that NO_IR is set. 855 * Otherwise it is meaningless 856 */ 857 if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) && 858 (flags & NL80211_RRF_NO_IR)) 859 flags |= NL80211_RRF_GO_CONCURRENT; 860 861 return flags; 862 } 863 864 struct regdb_ptrs { 865 struct ieee80211_wmm_rule *rule; 866 u32 token; 867 }; 868 869 struct ieee80211_regdomain * 870 iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, 871 int num_of_ch, __le32 *channels, u16 fw_mcc, 872 u16 geo_info) 873 { 874 int ch_idx; 875 u16 ch_flags; 876 u32 reg_rule_flags, prev_reg_rule_flags = 0; 877 const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ? 878 iwl_ext_nvm_channels : iwl_nvm_channels; 879 struct ieee80211_regdomain *regd, *copy_rd; 880 int size_of_regd, regd_to_copy, wmms_to_copy; 881 int size_of_wmms = 0; 882 struct ieee80211_reg_rule *rule; 883 struct ieee80211_wmm_rule *wmm_rule, *d_wmm, *s_wmm; 884 struct regdb_ptrs *regdb_ptrs; 885 enum nl80211_band band; 886 int center_freq, prev_center_freq = 0; 887 int valid_rules = 0, n_wmms = 0; 888 int i; 889 bool new_rule; 890 int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ? 891 IWL_NVM_NUM_CHANNELS_EXT : IWL_NVM_NUM_CHANNELS; 892 893 if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES)) 894 return ERR_PTR(-EINVAL); 895 896 if (WARN_ON(num_of_ch > max_num_ch)) 897 num_of_ch = max_num_ch; 898 899 IWL_DEBUG_DEV(dev, IWL_DL_LAR, "building regdom for %d channels\n", 900 num_of_ch); 901 902 /* build a regdomain rule for every valid channel */ 903 size_of_regd = 904 sizeof(struct ieee80211_regdomain) + 905 num_of_ch * sizeof(struct ieee80211_reg_rule); 906 907 if (geo_info & GEO_WMM_ETSI_5GHZ_INFO) 908 size_of_wmms = 909 num_of_ch * sizeof(struct ieee80211_wmm_rule); 910 911 regd = kzalloc(size_of_regd + size_of_wmms, GFP_KERNEL); 912 if (!regd) 913 return ERR_PTR(-ENOMEM); 914 915 regdb_ptrs = kcalloc(num_of_ch, sizeof(*regdb_ptrs), GFP_KERNEL); 916 if (!regdb_ptrs) { 917 copy_rd = ERR_PTR(-ENOMEM); 918 goto out; 919 } 920 921 /* set alpha2 from FW. */ 922 regd->alpha2[0] = fw_mcc >> 8; 923 regd->alpha2[1] = fw_mcc & 0xff; 924 925 wmm_rule = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd); 926 927 for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { 928 ch_flags = (u16)__le32_to_cpup(channels + ch_idx); 929 band = (ch_idx < NUM_2GHZ_CHANNELS) ? 930 NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; 931 center_freq = ieee80211_channel_to_frequency(nvm_chan[ch_idx], 932 band); 933 new_rule = false; 934 935 if (!(ch_flags & NVM_CHANNEL_VALID)) { 936 iwl_nvm_print_channel_flags(dev, IWL_DL_LAR, 937 nvm_chan[ch_idx], ch_flags); 938 continue; 939 } 940 941 reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx, 942 ch_flags, cfg); 943 944 /* we can't continue the same rule */ 945 if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags || 946 center_freq - prev_center_freq > 20) { 947 valid_rules++; 948 new_rule = true; 949 } 950 951 rule = ®d->reg_rules[valid_rules - 1]; 952 953 if (new_rule) 954 rule->freq_range.start_freq_khz = 955 MHZ_TO_KHZ(center_freq - 10); 956 957 rule->freq_range.end_freq_khz = MHZ_TO_KHZ(center_freq + 10); 958 959 /* this doesn't matter - not used by FW */ 960 rule->power_rule.max_antenna_gain = DBI_TO_MBI(6); 961 rule->power_rule.max_eirp = 962 DBM_TO_MBM(IWL_DEFAULT_MAX_TX_POWER); 963 964 rule->flags = reg_rule_flags; 965 966 /* rely on auto-calculation to merge BW of contiguous chans */ 967 rule->flags |= NL80211_RRF_AUTO_BW; 968 rule->freq_range.max_bandwidth_khz = 0; 969 970 prev_center_freq = center_freq; 971 prev_reg_rule_flags = reg_rule_flags; 972 973 iwl_nvm_print_channel_flags(dev, IWL_DL_LAR, 974 nvm_chan[ch_idx], ch_flags); 975 976 if (!(geo_info & GEO_WMM_ETSI_5GHZ_INFO) || 977 band == NL80211_BAND_2GHZ) 978 continue; 979 980 if (!reg_query_regdb_wmm(regd->alpha2, center_freq, 981 ®db_ptrs[n_wmms].token, wmm_rule)) { 982 /* Add only new rules */ 983 for (i = 0; i < n_wmms; i++) { 984 if (regdb_ptrs[i].token == 985 regdb_ptrs[n_wmms].token) { 986 rule->wmm_rule = regdb_ptrs[i].rule; 987 break; 988 } 989 } 990 if (i == n_wmms) { 991 rule->wmm_rule = wmm_rule; 992 regdb_ptrs[n_wmms++].rule = wmm_rule; 993 wmm_rule++; 994 } 995 } 996 } 997 998 regd->n_reg_rules = valid_rules; 999 regd->n_wmm_rules = n_wmms; 1000 1001 /* 1002 * Narrow down regdom for unused regulatory rules to prevent hole 1003 * between reg rules to wmm rules. 1004 */ 1005 regd_to_copy = sizeof(struct ieee80211_regdomain) + 1006 valid_rules * sizeof(struct ieee80211_reg_rule); 1007 1008 wmms_to_copy = sizeof(struct ieee80211_wmm_rule) * n_wmms; 1009 1010 copy_rd = kzalloc(regd_to_copy + wmms_to_copy, GFP_KERNEL); 1011 if (!copy_rd) { 1012 copy_rd = ERR_PTR(-ENOMEM); 1013 goto out; 1014 } 1015 1016 memcpy(copy_rd, regd, regd_to_copy); 1017 memcpy((u8 *)copy_rd + regd_to_copy, (u8 *)regd + size_of_regd, 1018 wmms_to_copy); 1019 1020 d_wmm = (struct ieee80211_wmm_rule *)((u8 *)copy_rd + regd_to_copy); 1021 s_wmm = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd); 1022 1023 for (i = 0; i < regd->n_reg_rules; i++) { 1024 if (!regd->reg_rules[i].wmm_rule) 1025 continue; 1026 1027 copy_rd->reg_rules[i].wmm_rule = d_wmm + 1028 (regd->reg_rules[i].wmm_rule - s_wmm); 1029 } 1030 1031 out: 1032 kfree(regdb_ptrs); 1033 kfree(regd); 1034 return copy_rd; 1035 } 1036 IWL_EXPORT_SYMBOL(iwl_parse_nvm_mcc_info); 1037 1038 #define IWL_MAX_NVM_SECTION_SIZE 0x1b58 1039 #define IWL_MAX_EXT_NVM_SECTION_SIZE 0x1ffc 1040 #define MAX_NVM_FILE_LEN 16384 1041 1042 void iwl_nvm_fixups(u32 hw_id, unsigned int section, u8 *data, 1043 unsigned int len) 1044 { 1045 #define IWL_4165_DEVICE_ID 0x5501 1046 #define NVM_SKU_CAP_MIMO_DISABLE BIT(5) 1047 1048 if (section == NVM_SECTION_TYPE_PHY_SKU && 1049 hw_id == IWL_4165_DEVICE_ID && data && len >= 5 && 1050 (data[4] & NVM_SKU_CAP_MIMO_DISABLE)) 1051 /* OTP 0x52 bug work around: it's a 1x1 device */ 1052 data[3] = ANT_B | (ANT_B << 4); 1053 } 1054 IWL_EXPORT_SYMBOL(iwl_nvm_fixups); 1055 1056 /* 1057 * Reads external NVM from a file into mvm->nvm_sections 1058 * 1059 * HOW TO CREATE THE NVM FILE FORMAT: 1060 * ------------------------------ 1061 * 1. create hex file, format: 1062 * 3800 -> header 1063 * 0000 -> header 1064 * 5a40 -> data 1065 * 1066 * rev - 6 bit (word1) 1067 * len - 10 bit (word1) 1068 * id - 4 bit (word2) 1069 * rsv - 12 bit (word2) 1070 * 1071 * 2. flip 8bits with 8 bits per line to get the right NVM file format 1072 * 1073 * 3. create binary file from the hex file 1074 * 1075 * 4. save as "iNVM_xxx.bin" under /lib/firmware 1076 */ 1077 int iwl_read_external_nvm(struct iwl_trans *trans, 1078 const char *nvm_file_name, 1079 struct iwl_nvm_section *nvm_sections) 1080 { 1081 int ret, section_size; 1082 u16 section_id; 1083 const struct firmware *fw_entry; 1084 const struct { 1085 __le16 word1; 1086 __le16 word2; 1087 u8 data[]; 1088 } *file_sec; 1089 const u8 *eof; 1090 u8 *temp; 1091 int max_section_size; 1092 const __le32 *dword_buff; 1093 1094 #define NVM_WORD1_LEN(x) (8 * (x & 0x03FF)) 1095 #define NVM_WORD2_ID(x) (x >> 12) 1096 #define EXT_NVM_WORD2_LEN(x) (2 * (((x) & 0xFF) << 8 | (x) >> 8)) 1097 #define EXT_NVM_WORD1_ID(x) ((x) >> 4) 1098 #define NVM_HEADER_0 (0x2A504C54) 1099 #define NVM_HEADER_1 (0x4E564D2A) 1100 #define NVM_HEADER_SIZE (4 * sizeof(u32)) 1101 1102 IWL_DEBUG_EEPROM(trans->dev, "Read from external NVM\n"); 1103 1104 /* Maximal size depends on NVM version */ 1105 if (trans->cfg->nvm_type != IWL_NVM_EXT) 1106 max_section_size = IWL_MAX_NVM_SECTION_SIZE; 1107 else 1108 max_section_size = IWL_MAX_EXT_NVM_SECTION_SIZE; 1109 1110 /* 1111 * Obtain NVM image via request_firmware. Since we already used 1112 * request_firmware_nowait() for the firmware binary load and only 1113 * get here after that we assume the NVM request can be satisfied 1114 * synchronously. 1115 */ 1116 ret = request_firmware(&fw_entry, nvm_file_name, trans->dev); 1117 if (ret) { 1118 IWL_ERR(trans, "ERROR: %s isn't available %d\n", 1119 nvm_file_name, ret); 1120 return ret; 1121 } 1122 1123 IWL_INFO(trans, "Loaded NVM file %s (%zu bytes)\n", 1124 nvm_file_name, fw_entry->size); 1125 1126 if (fw_entry->size > MAX_NVM_FILE_LEN) { 1127 IWL_ERR(trans, "NVM file too large\n"); 1128 ret = -EINVAL; 1129 goto out; 1130 } 1131 1132 eof = fw_entry->data + fw_entry->size; 1133 dword_buff = (__le32 *)fw_entry->data; 1134 1135 /* some NVM file will contain a header. 1136 * The header is identified by 2 dwords header as follow: 1137 * dword[0] = 0x2A504C54 1138 * dword[1] = 0x4E564D2A 1139 * 1140 * This header must be skipped when providing the NVM data to the FW. 1141 */ 1142 if (fw_entry->size > NVM_HEADER_SIZE && 1143 dword_buff[0] == cpu_to_le32(NVM_HEADER_0) && 1144 dword_buff[1] == cpu_to_le32(NVM_HEADER_1)) { 1145 file_sec = (void *)(fw_entry->data + NVM_HEADER_SIZE); 1146 IWL_INFO(trans, "NVM Version %08X\n", le32_to_cpu(dword_buff[2])); 1147 IWL_INFO(trans, "NVM Manufacturing date %08X\n", 1148 le32_to_cpu(dword_buff[3])); 1149 1150 /* nvm file validation, dword_buff[2] holds the file version */ 1151 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 && 1152 CSR_HW_REV_STEP(trans->hw_rev) == SILICON_C_STEP && 1153 le32_to_cpu(dword_buff[2]) < 0xE4A) { 1154 ret = -EFAULT; 1155 goto out; 1156 } 1157 } else { 1158 file_sec = (void *)fw_entry->data; 1159 } 1160 1161 while (true) { 1162 if (file_sec->data > eof) { 1163 IWL_ERR(trans, 1164 "ERROR - NVM file too short for section header\n"); 1165 ret = -EINVAL; 1166 break; 1167 } 1168 1169 /* check for EOF marker */ 1170 if (!file_sec->word1 && !file_sec->word2) { 1171 ret = 0; 1172 break; 1173 } 1174 1175 if (trans->cfg->nvm_type != IWL_NVM_EXT) { 1176 section_size = 1177 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1)); 1178 section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2)); 1179 } else { 1180 section_size = 2 * EXT_NVM_WORD2_LEN( 1181 le16_to_cpu(file_sec->word2)); 1182 section_id = EXT_NVM_WORD1_ID( 1183 le16_to_cpu(file_sec->word1)); 1184 } 1185 1186 if (section_size > max_section_size) { 1187 IWL_ERR(trans, "ERROR - section too large (%d)\n", 1188 section_size); 1189 ret = -EINVAL; 1190 break; 1191 } 1192 1193 if (!section_size) { 1194 IWL_ERR(trans, "ERROR - section empty\n"); 1195 ret = -EINVAL; 1196 break; 1197 } 1198 1199 if (file_sec->data + section_size > eof) { 1200 IWL_ERR(trans, 1201 "ERROR - NVM file too short for section (%d bytes)\n", 1202 section_size); 1203 ret = -EINVAL; 1204 break; 1205 } 1206 1207 if (WARN(section_id >= NVM_MAX_NUM_SECTIONS, 1208 "Invalid NVM section ID %d\n", section_id)) { 1209 ret = -EINVAL; 1210 break; 1211 } 1212 1213 temp = kmemdup(file_sec->data, section_size, GFP_KERNEL); 1214 if (!temp) { 1215 ret = -ENOMEM; 1216 break; 1217 } 1218 1219 iwl_nvm_fixups(trans->hw_id, section_id, temp, section_size); 1220 1221 kfree(nvm_sections[section_id].data); 1222 nvm_sections[section_id].data = temp; 1223 nvm_sections[section_id].length = section_size; 1224 1225 /* advance to the next section */ 1226 file_sec = (void *)(file_sec->data + section_size); 1227 } 1228 out: 1229 release_firmware(fw_entry); 1230 return ret; 1231 } 1232 IWL_EXPORT_SYMBOL(iwl_read_external_nvm); 1233 1234 struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, 1235 const struct iwl_fw *fw) 1236 { 1237 struct iwl_nvm_get_info cmd = {}; 1238 struct iwl_nvm_get_info_rsp *rsp; 1239 struct iwl_nvm_data *nvm; 1240 struct iwl_host_cmd hcmd = { 1241 .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, 1242 .data = { &cmd, }, 1243 .len = { sizeof(cmd) }, 1244 .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_GET_INFO) 1245 }; 1246 int ret; 1247 bool lar_fw_supported = !iwlwifi_mod_params.lar_disable && 1248 fw_has_capa(&fw->ucode_capa, 1249 IWL_UCODE_TLV_CAPA_LAR_SUPPORT); 1250 u32 mac_flags; 1251 u32 sbands_flags = 0; 1252 1253 ret = iwl_trans_send_cmd(trans, &hcmd); 1254 if (ret) 1255 return ERR_PTR(ret); 1256 1257 if (WARN(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp), 1258 "Invalid payload len in NVM response from FW %d", 1259 iwl_rx_packet_payload_len(hcmd.resp_pkt))) { 1260 ret = -EINVAL; 1261 goto out; 1262 } 1263 1264 rsp = (void *)hcmd.resp_pkt->data; 1265 if (le32_to_cpu(rsp->general.flags) & NVM_GENERAL_FLAGS_EMPTY_OTP) 1266 IWL_INFO(trans, "OTP is empty\n"); 1267 1268 nvm = kzalloc(sizeof(*nvm) + 1269 sizeof(struct ieee80211_channel) * IWL_NUM_CHANNELS, 1270 GFP_KERNEL); 1271 if (!nvm) { 1272 ret = -ENOMEM; 1273 goto out; 1274 } 1275 1276 iwl_set_hw_address_from_csr(trans, nvm); 1277 /* TODO: if platform NVM has MAC address - override it here */ 1278 1279 if (!is_valid_ether_addr(nvm->hw_addr)) { 1280 IWL_ERR(trans, "no valid mac address was found\n"); 1281 ret = -EINVAL; 1282 goto err_free; 1283 } 1284 1285 IWL_INFO(trans, "base HW address: %pM\n", nvm->hw_addr); 1286 1287 /* Initialize general data */ 1288 nvm->nvm_version = le16_to_cpu(rsp->general.nvm_version); 1289 1290 /* Initialize MAC sku data */ 1291 mac_flags = le32_to_cpu(rsp->mac_sku.mac_sku_flags); 1292 nvm->sku_cap_11ac_enable = 1293 !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AC_ENABLED); 1294 nvm->sku_cap_11n_enable = 1295 !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11N_ENABLED); 1296 nvm->sku_cap_band_24ghz_enable = 1297 !!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED); 1298 nvm->sku_cap_band_52ghz_enable = 1299 !!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED); 1300 nvm->sku_cap_mimo_disabled = 1301 !!(mac_flags & NVM_MAC_SKU_FLAGS_MIMO_DISABLED); 1302 1303 /* Initialize PHY sku data */ 1304 nvm->valid_tx_ant = (u8)le32_to_cpu(rsp->phy_sku.tx_chains); 1305 nvm->valid_rx_ant = (u8)le32_to_cpu(rsp->phy_sku.rx_chains); 1306 1307 if (le32_to_cpu(rsp->regulatory.lar_enabled) && lar_fw_supported) { 1308 nvm->lar_enabled = true; 1309 sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR; 1310 } 1311 1312 iwl_init_sbands(trans->dev, trans->cfg, nvm, 1313 rsp->regulatory.channel_profile, 1314 nvm->valid_tx_ant & fw->valid_tx_ant, 1315 nvm->valid_rx_ant & fw->valid_rx_ant, 1316 sbands_flags); 1317 1318 iwl_free_resp(&hcmd); 1319 return nvm; 1320 1321 err_free: 1322 kfree(nvm); 1323 out: 1324 iwl_free_resp(&hcmd); 1325 return ERR_PTR(ret); 1326 } 1327 IWL_EXPORT_SYMBOL(iwl_get_nvm); 1328