1 /* 2 * Copyright (c) 2014-2017 Qualcomm Atheros, Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include <linux/types.h> 18 #include <linux/bitops.h> 19 #include <linux/bitfield.h> 20 #include "core.h" 21 #include "hw.h" 22 #include "hif.h" 23 #include "wmi-ops.h" 24 #include "bmi.h" 25 26 const struct ath10k_hw_regs qca988x_regs = { 27 .rtc_soc_base_address = 0x00004000, 28 .rtc_wmac_base_address = 0x00005000, 29 .soc_core_base_address = 0x00009000, 30 .wlan_mac_base_address = 0x00020000, 31 .ce_wrapper_base_address = 0x00057000, 32 .ce0_base_address = 0x00057400, 33 .ce1_base_address = 0x00057800, 34 .ce2_base_address = 0x00057c00, 35 .ce3_base_address = 0x00058000, 36 .ce4_base_address = 0x00058400, 37 .ce5_base_address = 0x00058800, 38 .ce6_base_address = 0x00058c00, 39 .ce7_base_address = 0x00059000, 40 .soc_reset_control_si0_rst_mask = 0x00000001, 41 .soc_reset_control_ce_rst_mask = 0x00040000, 42 .soc_chip_id_address = 0x000000ec, 43 .scratch_3_address = 0x00000030, 44 .fw_indicator_address = 0x00009030, 45 .pcie_local_base_address = 0x00080000, 46 .ce_wrap_intr_sum_host_msi_lsb = 0x00000008, 47 .ce_wrap_intr_sum_host_msi_mask = 0x0000ff00, 48 .pcie_intr_fw_mask = 0x00000400, 49 .pcie_intr_ce_mask_all = 0x0007f800, 50 .pcie_intr_clr_address = 0x00000014, 51 }; 52 53 const struct ath10k_hw_regs qca6174_regs = { 54 .rtc_soc_base_address = 0x00000800, 55 .rtc_wmac_base_address = 0x00001000, 56 .soc_core_base_address = 0x0003a000, 57 .wlan_mac_base_address = 0x00010000, 58 .ce_wrapper_base_address = 0x00034000, 59 .ce0_base_address = 0x00034400, 60 .ce1_base_address = 0x00034800, 61 .ce2_base_address = 0x00034c00, 62 .ce3_base_address = 0x00035000, 63 .ce4_base_address = 0x00035400, 64 .ce5_base_address = 0x00035800, 65 .ce6_base_address = 0x00035c00, 66 .ce7_base_address = 0x00036000, 67 .soc_reset_control_si0_rst_mask = 0x00000000, 68 .soc_reset_control_ce_rst_mask = 0x00000001, 69 .soc_chip_id_address = 0x000000f0, 70 .scratch_3_address = 0x00000028, 71 .fw_indicator_address = 0x0003a028, 72 .pcie_local_base_address = 0x00080000, 73 .ce_wrap_intr_sum_host_msi_lsb = 0x00000008, 74 .ce_wrap_intr_sum_host_msi_mask = 0x0000ff00, 75 .pcie_intr_fw_mask = 0x00000400, 76 .pcie_intr_ce_mask_all = 0x0007f800, 77 .pcie_intr_clr_address = 0x00000014, 78 .cpu_pll_init_address = 0x00404020, 79 .cpu_speed_address = 0x00404024, 80 .core_clk_div_address = 0x00404028, 81 }; 82 83 const struct ath10k_hw_regs qca99x0_regs = { 84 .rtc_soc_base_address = 0x00080000, 85 .rtc_wmac_base_address = 0x00000000, 86 .soc_core_base_address = 0x00082000, 87 .wlan_mac_base_address = 0x00030000, 88 .ce_wrapper_base_address = 0x0004d000, 89 .ce0_base_address = 0x0004a000, 90 .ce1_base_address = 0x0004a400, 91 .ce2_base_address = 0x0004a800, 92 .ce3_base_address = 0x0004ac00, 93 .ce4_base_address = 0x0004b000, 94 .ce5_base_address = 0x0004b400, 95 .ce6_base_address = 0x0004b800, 96 .ce7_base_address = 0x0004bc00, 97 /* Note: qca99x0 supports upto 12 Copy Engines. Other than address of 98 * CE0 and CE1 no other copy engine is directly referred in the code. 99 * It is not really necessary to assign address for newly supported 100 * CEs in this address table. 101 * Copy Engine Address 102 * CE8 0x0004c000 103 * CE9 0x0004c400 104 * CE10 0x0004c800 105 * CE11 0x0004cc00 106 */ 107 .soc_reset_control_si0_rst_mask = 0x00000001, 108 .soc_reset_control_ce_rst_mask = 0x00000100, 109 .soc_chip_id_address = 0x000000ec, 110 .scratch_3_address = 0x00040050, 111 .fw_indicator_address = 0x00040050, 112 .pcie_local_base_address = 0x00000000, 113 .ce_wrap_intr_sum_host_msi_lsb = 0x0000000c, 114 .ce_wrap_intr_sum_host_msi_mask = 0x00fff000, 115 .pcie_intr_fw_mask = 0x00100000, 116 .pcie_intr_ce_mask_all = 0x000fff00, 117 .pcie_intr_clr_address = 0x00000010, 118 }; 119 120 const struct ath10k_hw_regs qca4019_regs = { 121 .rtc_soc_base_address = 0x00080000, 122 .soc_core_base_address = 0x00082000, 123 .wlan_mac_base_address = 0x00030000, 124 .ce_wrapper_base_address = 0x0004d000, 125 .ce0_base_address = 0x0004a000, 126 .ce1_base_address = 0x0004a400, 127 .ce2_base_address = 0x0004a800, 128 .ce3_base_address = 0x0004ac00, 129 .ce4_base_address = 0x0004b000, 130 .ce5_base_address = 0x0004b400, 131 .ce6_base_address = 0x0004b800, 132 .ce7_base_address = 0x0004bc00, 133 /* qca4019 supports upto 12 copy engines. Since base address 134 * of ce8 to ce11 are not directly referred in the code, 135 * no need have them in separate members in this table. 136 * Copy Engine Address 137 * CE8 0x0004c000 138 * CE9 0x0004c400 139 * CE10 0x0004c800 140 * CE11 0x0004cc00 141 */ 142 .soc_reset_control_si0_rst_mask = 0x00000001, 143 .soc_reset_control_ce_rst_mask = 0x00000100, 144 .soc_chip_id_address = 0x000000ec, 145 .fw_indicator_address = 0x0004f00c, 146 .ce_wrap_intr_sum_host_msi_lsb = 0x0000000c, 147 .ce_wrap_intr_sum_host_msi_mask = 0x00fff000, 148 .pcie_intr_fw_mask = 0x00100000, 149 .pcie_intr_ce_mask_all = 0x000fff00, 150 .pcie_intr_clr_address = 0x00000010, 151 }; 152 153 const struct ath10k_hw_values qca988x_values = { 154 .rtc_state_val_on = 3, 155 .ce_count = 8, 156 .msi_assign_ce_max = 7, 157 .num_target_ce_config_wlan = 7, 158 .ce_desc_meta_data_mask = 0xFFFC, 159 .ce_desc_meta_data_lsb = 2, 160 }; 161 162 const struct ath10k_hw_values qca6174_values = { 163 .rtc_state_val_on = 3, 164 .ce_count = 8, 165 .msi_assign_ce_max = 7, 166 .num_target_ce_config_wlan = 7, 167 .ce_desc_meta_data_mask = 0xFFFC, 168 .ce_desc_meta_data_lsb = 2, 169 }; 170 171 const struct ath10k_hw_values qca99x0_values = { 172 .rtc_state_val_on = 5, 173 .ce_count = 12, 174 .msi_assign_ce_max = 12, 175 .num_target_ce_config_wlan = 10, 176 .ce_desc_meta_data_mask = 0xFFF0, 177 .ce_desc_meta_data_lsb = 4, 178 }; 179 180 const struct ath10k_hw_values qca9888_values = { 181 .rtc_state_val_on = 3, 182 .ce_count = 12, 183 .msi_assign_ce_max = 12, 184 .num_target_ce_config_wlan = 10, 185 .ce_desc_meta_data_mask = 0xFFF0, 186 .ce_desc_meta_data_lsb = 4, 187 }; 188 189 const struct ath10k_hw_values qca4019_values = { 190 .ce_count = 12, 191 .num_target_ce_config_wlan = 10, 192 .ce_desc_meta_data_mask = 0xFFF0, 193 .ce_desc_meta_data_lsb = 4, 194 }; 195 196 const struct ath10k_hw_regs wcn3990_regs = { 197 .rtc_soc_base_address = 0x00000000, 198 .rtc_wmac_base_address = 0x00000000, 199 .soc_core_base_address = 0x00000000, 200 .ce_wrapper_base_address = 0x0024C000, 201 .ce0_base_address = 0x00240000, 202 .ce1_base_address = 0x00241000, 203 .ce2_base_address = 0x00242000, 204 .ce3_base_address = 0x00243000, 205 .ce4_base_address = 0x00244000, 206 .ce5_base_address = 0x00245000, 207 .ce6_base_address = 0x00246000, 208 .ce7_base_address = 0x00247000, 209 .ce8_base_address = 0x00248000, 210 .ce9_base_address = 0x00249000, 211 .ce10_base_address = 0x0024A000, 212 .ce11_base_address = 0x0024B000, 213 .soc_chip_id_address = 0x000000f0, 214 .soc_reset_control_si0_rst_mask = 0x00000001, 215 .soc_reset_control_ce_rst_mask = 0x00000100, 216 .ce_wrap_intr_sum_host_msi_lsb = 0x0000000c, 217 .ce_wrap_intr_sum_host_msi_mask = 0x00fff000, 218 .pcie_intr_fw_mask = 0x00100000, 219 }; 220 221 static struct ath10k_hw_ce_regs_addr_map wcn3990_src_ring = { 222 .msb = 0x00000010, 223 .lsb = 0x00000010, 224 .mask = GENMASK(17, 17), 225 }; 226 227 static struct ath10k_hw_ce_regs_addr_map wcn3990_dst_ring = { 228 .msb = 0x00000012, 229 .lsb = 0x00000012, 230 .mask = GENMASK(18, 18), 231 }; 232 233 static struct ath10k_hw_ce_regs_addr_map wcn3990_dmax = { 234 .msb = 0x00000000, 235 .lsb = 0x00000000, 236 .mask = GENMASK(15, 0), 237 }; 238 239 static struct ath10k_hw_ce_ctrl1 wcn3990_ctrl1 = { 240 .addr = 0x00000018, 241 .src_ring = &wcn3990_src_ring, 242 .dst_ring = &wcn3990_dst_ring, 243 .dmax = &wcn3990_dmax, 244 }; 245 246 static struct ath10k_hw_ce_regs_addr_map wcn3990_host_ie_cc = { 247 .mask = GENMASK(0, 0), 248 }; 249 250 static struct ath10k_hw_ce_host_ie wcn3990_host_ie = { 251 .copy_complete = &wcn3990_host_ie_cc, 252 }; 253 254 static struct ath10k_hw_ce_host_wm_regs wcn3990_wm_reg = { 255 .dstr_lmask = 0x00000010, 256 .dstr_hmask = 0x00000008, 257 .srcr_lmask = 0x00000004, 258 .srcr_hmask = 0x00000002, 259 .cc_mask = 0x00000001, 260 .wm_mask = 0x0000001E, 261 .addr = 0x00000030, 262 }; 263 264 static struct ath10k_hw_ce_misc_regs wcn3990_misc_reg = { 265 .axi_err = 0x00000100, 266 .dstr_add_err = 0x00000200, 267 .srcr_len_err = 0x00000100, 268 .dstr_mlen_vio = 0x00000080, 269 .dstr_overflow = 0x00000040, 270 .srcr_overflow = 0x00000020, 271 .err_mask = 0x000003E0, 272 .addr = 0x00000038, 273 }; 274 275 static struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_low = { 276 .msb = 0x00000000, 277 .lsb = 0x00000010, 278 .mask = GENMASK(31, 16), 279 }; 280 281 static struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_high = { 282 .msb = 0x0000000f, 283 .lsb = 0x00000000, 284 .mask = GENMASK(15, 0), 285 }; 286 287 static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_src_ring = { 288 .addr = 0x0000004c, 289 .low_rst = 0x00000000, 290 .high_rst = 0x00000000, 291 .wm_low = &wcn3990_src_wm_low, 292 .wm_high = &wcn3990_src_wm_high, 293 }; 294 295 static struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_low = { 296 .lsb = 0x00000010, 297 .mask = GENMASK(31, 16), 298 }; 299 300 static struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_high = { 301 .msb = 0x0000000f, 302 .lsb = 0x00000000, 303 .mask = GENMASK(15, 0), 304 }; 305 306 static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_dst_ring = { 307 .addr = 0x00000050, 308 .low_rst = 0x00000000, 309 .high_rst = 0x00000000, 310 .wm_low = &wcn3990_dst_wm_low, 311 .wm_high = &wcn3990_dst_wm_high, 312 }; 313 314 static struct ath10k_hw_ce_ctrl1_upd wcn3990_ctrl1_upd = { 315 .shift = 19, 316 .mask = 0x00080000, 317 .enable = 0x00000000, 318 }; 319 320 const struct ath10k_hw_ce_regs wcn3990_ce_regs = { 321 .sr_base_addr = 0x00000000, 322 .sr_size_addr = 0x00000008, 323 .dr_base_addr = 0x0000000c, 324 .dr_size_addr = 0x00000014, 325 .misc_ie_addr = 0x00000034, 326 .sr_wr_index_addr = 0x0000003c, 327 .dst_wr_index_addr = 0x00000040, 328 .current_srri_addr = 0x00000044, 329 .current_drri_addr = 0x00000048, 330 .ce_rri_low = 0x0024C004, 331 .ce_rri_high = 0x0024C008, 332 .host_ie_addr = 0x0000002c, 333 .ctrl1_regs = &wcn3990_ctrl1, 334 .host_ie = &wcn3990_host_ie, 335 .wm_regs = &wcn3990_wm_reg, 336 .misc_regs = &wcn3990_misc_reg, 337 .wm_srcr = &wcn3990_wm_src_ring, 338 .wm_dstr = &wcn3990_wm_dst_ring, 339 .upd = &wcn3990_ctrl1_upd, 340 }; 341 342 const struct ath10k_hw_values wcn3990_values = { 343 .rtc_state_val_on = 5, 344 .ce_count = 12, 345 .msi_assign_ce_max = 12, 346 .num_target_ce_config_wlan = 12, 347 .ce_desc_meta_data_mask = 0xFFF0, 348 .ce_desc_meta_data_lsb = 4, 349 }; 350 351 static struct ath10k_hw_ce_regs_addr_map qcax_src_ring = { 352 .msb = 0x00000010, 353 .lsb = 0x00000010, 354 .mask = GENMASK(16, 16), 355 }; 356 357 static struct ath10k_hw_ce_regs_addr_map qcax_dst_ring = { 358 .msb = 0x00000011, 359 .lsb = 0x00000011, 360 .mask = GENMASK(17, 17), 361 }; 362 363 static struct ath10k_hw_ce_regs_addr_map qcax_dmax = { 364 .msb = 0x0000000f, 365 .lsb = 0x00000000, 366 .mask = GENMASK(15, 0), 367 }; 368 369 static struct ath10k_hw_ce_ctrl1 qcax_ctrl1 = { 370 .addr = 0x00000010, 371 .hw_mask = 0x0007ffff, 372 .sw_mask = 0x0007ffff, 373 .hw_wr_mask = 0x00000000, 374 .sw_wr_mask = 0x0007ffff, 375 .reset_mask = 0xffffffff, 376 .reset = 0x00000080, 377 .src_ring = &qcax_src_ring, 378 .dst_ring = &qcax_dst_ring, 379 .dmax = &qcax_dmax, 380 }; 381 382 static struct ath10k_hw_ce_regs_addr_map qcax_cmd_halt_status = { 383 .msb = 0x00000003, 384 .lsb = 0x00000003, 385 .mask = GENMASK(3, 3), 386 }; 387 388 static struct ath10k_hw_ce_cmd_halt qcax_cmd_halt = { 389 .msb = 0x00000000, 390 .mask = GENMASK(0, 0), 391 .status_reset = 0x00000000, 392 .status = &qcax_cmd_halt_status, 393 }; 394 395 static struct ath10k_hw_ce_regs_addr_map qcax_host_ie_cc = { 396 .msb = 0x00000000, 397 .lsb = 0x00000000, 398 .mask = GENMASK(0, 0), 399 }; 400 401 static struct ath10k_hw_ce_host_ie qcax_host_ie = { 402 .copy_complete_reset = 0x00000000, 403 .copy_complete = &qcax_host_ie_cc, 404 }; 405 406 static struct ath10k_hw_ce_host_wm_regs qcax_wm_reg = { 407 .dstr_lmask = 0x00000010, 408 .dstr_hmask = 0x00000008, 409 .srcr_lmask = 0x00000004, 410 .srcr_hmask = 0x00000002, 411 .cc_mask = 0x00000001, 412 .wm_mask = 0x0000001E, 413 .addr = 0x00000030, 414 }; 415 416 static struct ath10k_hw_ce_misc_regs qcax_misc_reg = { 417 .axi_err = 0x00000400, 418 .dstr_add_err = 0x00000200, 419 .srcr_len_err = 0x00000100, 420 .dstr_mlen_vio = 0x00000080, 421 .dstr_overflow = 0x00000040, 422 .srcr_overflow = 0x00000020, 423 .err_mask = 0x000007E0, 424 .addr = 0x00000038, 425 }; 426 427 static struct ath10k_hw_ce_regs_addr_map qcax_src_wm_low = { 428 .msb = 0x0000001f, 429 .lsb = 0x00000010, 430 .mask = GENMASK(31, 16), 431 }; 432 433 static struct ath10k_hw_ce_regs_addr_map qcax_src_wm_high = { 434 .msb = 0x0000000f, 435 .lsb = 0x00000000, 436 .mask = GENMASK(15, 0), 437 }; 438 439 static struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_src_ring = { 440 .addr = 0x0000004c, 441 .low_rst = 0x00000000, 442 .high_rst = 0x00000000, 443 .wm_low = &qcax_src_wm_low, 444 .wm_high = &qcax_src_wm_high, 445 }; 446 447 static struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_low = { 448 .lsb = 0x00000010, 449 .mask = GENMASK(31, 16), 450 }; 451 452 static struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_high = { 453 .msb = 0x0000000f, 454 .lsb = 0x00000000, 455 .mask = GENMASK(15, 0), 456 }; 457 458 static struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_dst_ring = { 459 .addr = 0x00000050, 460 .low_rst = 0x00000000, 461 .high_rst = 0x00000000, 462 .wm_low = &qcax_dst_wm_low, 463 .wm_high = &qcax_dst_wm_high, 464 }; 465 466 const struct ath10k_hw_ce_regs qcax_ce_regs = { 467 .sr_base_addr = 0x00000000, 468 .sr_size_addr = 0x00000004, 469 .dr_base_addr = 0x00000008, 470 .dr_size_addr = 0x0000000c, 471 .ce_cmd_addr = 0x00000018, 472 .misc_ie_addr = 0x00000034, 473 .sr_wr_index_addr = 0x0000003c, 474 .dst_wr_index_addr = 0x00000040, 475 .current_srri_addr = 0x00000044, 476 .current_drri_addr = 0x00000048, 477 .host_ie_addr = 0x0000002c, 478 .ctrl1_regs = &qcax_ctrl1, 479 .cmd_halt = &qcax_cmd_halt, 480 .host_ie = &qcax_host_ie, 481 .wm_regs = &qcax_wm_reg, 482 .misc_regs = &qcax_misc_reg, 483 .wm_srcr = &qcax_wm_src_ring, 484 .wm_dstr = &qcax_wm_dst_ring, 485 }; 486 487 const struct ath10k_hw_clk_params qca6174_clk[ATH10K_HW_REFCLK_COUNT] = { 488 { 489 .refclk = 48000000, 490 .div = 0xe, 491 .rnfrac = 0x2aaa8, 492 .settle_time = 2400, 493 .refdiv = 0, 494 .outdiv = 1, 495 }, 496 { 497 .refclk = 19200000, 498 .div = 0x24, 499 .rnfrac = 0x2aaa8, 500 .settle_time = 960, 501 .refdiv = 0, 502 .outdiv = 1, 503 }, 504 { 505 .refclk = 24000000, 506 .div = 0x1d, 507 .rnfrac = 0x15551, 508 .settle_time = 1200, 509 .refdiv = 0, 510 .outdiv = 1, 511 }, 512 { 513 .refclk = 26000000, 514 .div = 0x1b, 515 .rnfrac = 0x4ec4, 516 .settle_time = 1300, 517 .refdiv = 0, 518 .outdiv = 1, 519 }, 520 { 521 .refclk = 37400000, 522 .div = 0x12, 523 .rnfrac = 0x34b49, 524 .settle_time = 1870, 525 .refdiv = 0, 526 .outdiv = 1, 527 }, 528 { 529 .refclk = 38400000, 530 .div = 0x12, 531 .rnfrac = 0x15551, 532 .settle_time = 1920, 533 .refdiv = 0, 534 .outdiv = 1, 535 }, 536 { 537 .refclk = 40000000, 538 .div = 0x12, 539 .rnfrac = 0x26665, 540 .settle_time = 2000, 541 .refdiv = 0, 542 .outdiv = 1, 543 }, 544 { 545 .refclk = 52000000, 546 .div = 0x1b, 547 .rnfrac = 0x4ec4, 548 .settle_time = 2600, 549 .refdiv = 0, 550 .outdiv = 1, 551 }, 552 }; 553 554 void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey, 555 u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev) 556 { 557 u32 cc_fix = 0; 558 u32 rcc_fix = 0; 559 enum ath10k_hw_cc_wraparound_type wraparound_type; 560 561 survey->filled |= SURVEY_INFO_TIME | 562 SURVEY_INFO_TIME_BUSY; 563 564 wraparound_type = ar->hw_params.cc_wraparound_type; 565 566 if (cc < cc_prev || rcc < rcc_prev) { 567 switch (wraparound_type) { 568 case ATH10K_HW_CC_WRAP_SHIFTED_ALL: 569 if (cc < cc_prev) { 570 cc_fix = 0x7fffffff; 571 survey->filled &= ~SURVEY_INFO_TIME_BUSY; 572 } 573 break; 574 case ATH10K_HW_CC_WRAP_SHIFTED_EACH: 575 if (cc < cc_prev) 576 cc_fix = 0x7fffffff; 577 578 if (rcc < rcc_prev) 579 rcc_fix = 0x7fffffff; 580 break; 581 case ATH10K_HW_CC_WRAP_DISABLED: 582 break; 583 } 584 } 585 586 cc -= cc_prev - cc_fix; 587 rcc -= rcc_prev - rcc_fix; 588 589 survey->time = CCNT_TO_MSEC(ar, cc); 590 survey->time_busy = CCNT_TO_MSEC(ar, rcc); 591 } 592 593 /* The firmware does not support setting the coverage class. Instead this 594 * function monitors and modifies the corresponding MAC registers. 595 */ 596 static void ath10k_hw_qca988x_set_coverage_class(struct ath10k *ar, 597 s16 value) 598 { 599 u32 slottime_reg; 600 u32 slottime; 601 u32 timeout_reg; 602 u32 ack_timeout; 603 u32 cts_timeout; 604 u32 phyclk_reg; 605 u32 phyclk; 606 u64 fw_dbglog_mask; 607 u32 fw_dbglog_level; 608 609 mutex_lock(&ar->conf_mutex); 610 611 /* Only modify registers if the core is started. */ 612 if ((ar->state != ATH10K_STATE_ON) && 613 (ar->state != ATH10K_STATE_RESTARTED)) { 614 spin_lock_bh(&ar->data_lock); 615 /* Store config value for when radio boots up */ 616 ar->fw_coverage.coverage_class = value; 617 spin_unlock_bh(&ar->data_lock); 618 goto unlock; 619 } 620 621 /* Retrieve the current values of the two registers that need to be 622 * adjusted. 623 */ 624 slottime_reg = ath10k_hif_read32(ar, WLAN_MAC_BASE_ADDRESS + 625 WAVE1_PCU_GBL_IFS_SLOT); 626 timeout_reg = ath10k_hif_read32(ar, WLAN_MAC_BASE_ADDRESS + 627 WAVE1_PCU_ACK_CTS_TIMEOUT); 628 phyclk_reg = ath10k_hif_read32(ar, WLAN_MAC_BASE_ADDRESS + 629 WAVE1_PHYCLK); 630 phyclk = MS(phyclk_reg, WAVE1_PHYCLK_USEC) + 1; 631 632 if (value < 0) 633 value = ar->fw_coverage.coverage_class; 634 635 /* Break out if the coverage class and registers have the expected 636 * value. 637 */ 638 if (value == ar->fw_coverage.coverage_class && 639 slottime_reg == ar->fw_coverage.reg_slottime_conf && 640 timeout_reg == ar->fw_coverage.reg_ack_cts_timeout_conf && 641 phyclk_reg == ar->fw_coverage.reg_phyclk) 642 goto unlock; 643 644 /* Store new initial register values from the firmware. */ 645 if (slottime_reg != ar->fw_coverage.reg_slottime_conf) 646 ar->fw_coverage.reg_slottime_orig = slottime_reg; 647 if (timeout_reg != ar->fw_coverage.reg_ack_cts_timeout_conf) 648 ar->fw_coverage.reg_ack_cts_timeout_orig = timeout_reg; 649 ar->fw_coverage.reg_phyclk = phyclk_reg; 650 651 /* Calculate new value based on the (original) firmware calculation. */ 652 slottime_reg = ar->fw_coverage.reg_slottime_orig; 653 timeout_reg = ar->fw_coverage.reg_ack_cts_timeout_orig; 654 655 /* Do some sanity checks on the slottime register. */ 656 if (slottime_reg % phyclk) { 657 ath10k_warn(ar, 658 "failed to set coverage class: expected integer microsecond value in register\n"); 659 660 goto store_regs; 661 } 662 663 slottime = MS(slottime_reg, WAVE1_PCU_GBL_IFS_SLOT); 664 slottime = slottime / phyclk; 665 if (slottime != 9 && slottime != 20) { 666 ath10k_warn(ar, 667 "failed to set coverage class: expected slot time of 9 or 20us in HW register. It is %uus.\n", 668 slottime); 669 670 goto store_regs; 671 } 672 673 /* Recalculate the register values by adding the additional propagation 674 * delay (3us per coverage class). 675 */ 676 677 slottime = MS(slottime_reg, WAVE1_PCU_GBL_IFS_SLOT); 678 slottime += value * 3 * phyclk; 679 slottime = min_t(u32, slottime, WAVE1_PCU_GBL_IFS_SLOT_MAX); 680 slottime = SM(slottime, WAVE1_PCU_GBL_IFS_SLOT); 681 slottime_reg = (slottime_reg & ~WAVE1_PCU_GBL_IFS_SLOT_MASK) | slottime; 682 683 /* Update ack timeout (lower halfword). */ 684 ack_timeout = MS(timeout_reg, WAVE1_PCU_ACK_CTS_TIMEOUT_ACK); 685 ack_timeout += 3 * value * phyclk; 686 ack_timeout = min_t(u32, ack_timeout, WAVE1_PCU_ACK_CTS_TIMEOUT_MAX); 687 ack_timeout = SM(ack_timeout, WAVE1_PCU_ACK_CTS_TIMEOUT_ACK); 688 689 /* Update cts timeout (upper halfword). */ 690 cts_timeout = MS(timeout_reg, WAVE1_PCU_ACK_CTS_TIMEOUT_CTS); 691 cts_timeout += 3 * value * phyclk; 692 cts_timeout = min_t(u32, cts_timeout, WAVE1_PCU_ACK_CTS_TIMEOUT_MAX); 693 cts_timeout = SM(cts_timeout, WAVE1_PCU_ACK_CTS_TIMEOUT_CTS); 694 695 timeout_reg = ack_timeout | cts_timeout; 696 697 ath10k_hif_write32(ar, 698 WLAN_MAC_BASE_ADDRESS + WAVE1_PCU_GBL_IFS_SLOT, 699 slottime_reg); 700 ath10k_hif_write32(ar, 701 WLAN_MAC_BASE_ADDRESS + WAVE1_PCU_ACK_CTS_TIMEOUT, 702 timeout_reg); 703 704 /* Ensure we have a debug level of WARN set for the case that the 705 * coverage class is larger than 0. This is important as we need to 706 * set the registers again if the firmware does an internal reset and 707 * this way we will be notified of the event. 708 */ 709 fw_dbglog_mask = ath10k_debug_get_fw_dbglog_mask(ar); 710 fw_dbglog_level = ath10k_debug_get_fw_dbglog_level(ar); 711 712 if (value > 0) { 713 if (fw_dbglog_level > ATH10K_DBGLOG_LEVEL_WARN) 714 fw_dbglog_level = ATH10K_DBGLOG_LEVEL_WARN; 715 fw_dbglog_mask = ~0; 716 } 717 718 ath10k_wmi_dbglog_cfg(ar, fw_dbglog_mask, fw_dbglog_level); 719 720 store_regs: 721 /* After an error we will not retry setting the coverage class. */ 722 spin_lock_bh(&ar->data_lock); 723 ar->fw_coverage.coverage_class = value; 724 spin_unlock_bh(&ar->data_lock); 725 726 ar->fw_coverage.reg_slottime_conf = slottime_reg; 727 ar->fw_coverage.reg_ack_cts_timeout_conf = timeout_reg; 728 729 unlock: 730 mutex_unlock(&ar->conf_mutex); 731 } 732 733 /** 734 * ath10k_hw_qca6174_enable_pll_clock() - enable the qca6174 hw pll clock 735 * @ar: the ath10k blob 736 * 737 * This function is very hardware specific, the clock initialization 738 * steps is very sensitive and could lead to unknown crash, so they 739 * should be done in sequence. 740 * 741 * *** Be aware if you planned to refactor them. *** 742 * 743 * Return: 0 if successfully enable the pll, otherwise EINVAL 744 */ 745 static int ath10k_hw_qca6174_enable_pll_clock(struct ath10k *ar) 746 { 747 int ret, wait_limit; 748 u32 clk_div_addr, pll_init_addr, speed_addr; 749 u32 addr, reg_val, mem_val; 750 struct ath10k_hw_params *hw; 751 const struct ath10k_hw_clk_params *hw_clk; 752 753 hw = &ar->hw_params; 754 755 if (ar->regs->core_clk_div_address == 0 || 756 ar->regs->cpu_pll_init_address == 0 || 757 ar->regs->cpu_speed_address == 0) 758 return -EINVAL; 759 760 clk_div_addr = ar->regs->core_clk_div_address; 761 pll_init_addr = ar->regs->cpu_pll_init_address; 762 speed_addr = ar->regs->cpu_speed_address; 763 764 /* Read efuse register to find out the right hw clock configuration */ 765 addr = (RTC_SOC_BASE_ADDRESS | EFUSE_OFFSET); 766 ret = ath10k_bmi_read_soc_reg(ar, addr, ®_val); 767 if (ret) 768 return -EINVAL; 769 770 /* sanitize if the hw refclk index is out of the boundary */ 771 if (MS(reg_val, EFUSE_XTAL_SEL) > ATH10K_HW_REFCLK_COUNT) 772 return -EINVAL; 773 774 hw_clk = &hw->hw_clk[MS(reg_val, EFUSE_XTAL_SEL)]; 775 776 /* Set the rnfrac and outdiv params to bb_pll register */ 777 addr = (RTC_SOC_BASE_ADDRESS | BB_PLL_CONFIG_OFFSET); 778 ret = ath10k_bmi_read_soc_reg(ar, addr, ®_val); 779 if (ret) 780 return -EINVAL; 781 782 reg_val &= ~(BB_PLL_CONFIG_FRAC_MASK | BB_PLL_CONFIG_OUTDIV_MASK); 783 reg_val |= (SM(hw_clk->rnfrac, BB_PLL_CONFIG_FRAC) | 784 SM(hw_clk->outdiv, BB_PLL_CONFIG_OUTDIV)); 785 ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val); 786 if (ret) 787 return -EINVAL; 788 789 /* Set the correct settle time value to pll_settle register */ 790 addr = (RTC_WMAC_BASE_ADDRESS | WLAN_PLL_SETTLE_OFFSET); 791 ret = ath10k_bmi_read_soc_reg(ar, addr, ®_val); 792 if (ret) 793 return -EINVAL; 794 795 reg_val &= ~WLAN_PLL_SETTLE_TIME_MASK; 796 reg_val |= SM(hw_clk->settle_time, WLAN_PLL_SETTLE_TIME); 797 ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val); 798 if (ret) 799 return -EINVAL; 800 801 /* Set the clock_ctrl div to core_clk_ctrl register */ 802 addr = (RTC_SOC_BASE_ADDRESS | SOC_CORE_CLK_CTRL_OFFSET); 803 ret = ath10k_bmi_read_soc_reg(ar, addr, ®_val); 804 if (ret) 805 return -EINVAL; 806 807 reg_val &= ~SOC_CORE_CLK_CTRL_DIV_MASK; 808 reg_val |= SM(1, SOC_CORE_CLK_CTRL_DIV); 809 ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val); 810 if (ret) 811 return -EINVAL; 812 813 /* Set the clock_div register */ 814 mem_val = 1; 815 ret = ath10k_bmi_write_memory(ar, clk_div_addr, &mem_val, 816 sizeof(mem_val)); 817 if (ret) 818 return -EINVAL; 819 820 /* Configure the pll_control register */ 821 addr = (RTC_WMAC_BASE_ADDRESS | WLAN_PLL_CONTROL_OFFSET); 822 ret = ath10k_bmi_read_soc_reg(ar, addr, ®_val); 823 if (ret) 824 return -EINVAL; 825 826 reg_val |= (SM(hw_clk->refdiv, WLAN_PLL_CONTROL_REFDIV) | 827 SM(hw_clk->div, WLAN_PLL_CONTROL_DIV) | 828 SM(1, WLAN_PLL_CONTROL_NOPWD)); 829 ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val); 830 if (ret) 831 return -EINVAL; 832 833 /* busy wait (max 1s) the rtc_sync status register indicate ready */ 834 wait_limit = 100000; 835 addr = (RTC_WMAC_BASE_ADDRESS | RTC_SYNC_STATUS_OFFSET); 836 do { 837 ret = ath10k_bmi_read_soc_reg(ar, addr, ®_val); 838 if (ret) 839 return -EINVAL; 840 841 if (!MS(reg_val, RTC_SYNC_STATUS_PLL_CHANGING)) 842 break; 843 844 wait_limit--; 845 udelay(10); 846 847 } while (wait_limit > 0); 848 849 if (MS(reg_val, RTC_SYNC_STATUS_PLL_CHANGING)) 850 return -EINVAL; 851 852 /* Unset the pll_bypass in pll_control register */ 853 addr = (RTC_WMAC_BASE_ADDRESS | WLAN_PLL_CONTROL_OFFSET); 854 ret = ath10k_bmi_read_soc_reg(ar, addr, ®_val); 855 if (ret) 856 return -EINVAL; 857 858 reg_val &= ~WLAN_PLL_CONTROL_BYPASS_MASK; 859 reg_val |= SM(0, WLAN_PLL_CONTROL_BYPASS); 860 ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val); 861 if (ret) 862 return -EINVAL; 863 864 /* busy wait (max 1s) the rtc_sync status register indicate ready */ 865 wait_limit = 100000; 866 addr = (RTC_WMAC_BASE_ADDRESS | RTC_SYNC_STATUS_OFFSET); 867 do { 868 ret = ath10k_bmi_read_soc_reg(ar, addr, ®_val); 869 if (ret) 870 return -EINVAL; 871 872 if (!MS(reg_val, RTC_SYNC_STATUS_PLL_CHANGING)) 873 break; 874 875 wait_limit--; 876 udelay(10); 877 878 } while (wait_limit > 0); 879 880 if (MS(reg_val, RTC_SYNC_STATUS_PLL_CHANGING)) 881 return -EINVAL; 882 883 /* Enable the hardware cpu clock register */ 884 addr = (RTC_SOC_BASE_ADDRESS | SOC_CPU_CLOCK_OFFSET); 885 ret = ath10k_bmi_read_soc_reg(ar, addr, ®_val); 886 if (ret) 887 return -EINVAL; 888 889 reg_val &= ~SOC_CPU_CLOCK_STANDARD_MASK; 890 reg_val |= SM(1, SOC_CPU_CLOCK_STANDARD); 891 ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val); 892 if (ret) 893 return -EINVAL; 894 895 /* unset the nopwd from pll_control register */ 896 addr = (RTC_WMAC_BASE_ADDRESS | WLAN_PLL_CONTROL_OFFSET); 897 ret = ath10k_bmi_read_soc_reg(ar, addr, ®_val); 898 if (ret) 899 return -EINVAL; 900 901 reg_val &= ~WLAN_PLL_CONTROL_NOPWD_MASK; 902 ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val); 903 if (ret) 904 return -EINVAL; 905 906 /* enable the pll_init register */ 907 mem_val = 1; 908 ret = ath10k_bmi_write_memory(ar, pll_init_addr, &mem_val, 909 sizeof(mem_val)); 910 if (ret) 911 return -EINVAL; 912 913 /* set the target clock frequency to speed register */ 914 ret = ath10k_bmi_write_memory(ar, speed_addr, &hw->target_cpu_freq, 915 sizeof(hw->target_cpu_freq)); 916 if (ret) 917 return -EINVAL; 918 919 return 0; 920 } 921 922 /* Program CPU_ADDR_MSB to allow different memory 923 * region access. 924 */ 925 static void ath10k_hw_map_target_mem(struct ath10k *ar, u32 msb) 926 { 927 u32 address = SOC_CORE_BASE_ADDRESS + FW_RAM_CONFIG_ADDRESS; 928 929 ath10k_hif_write32(ar, address, msb); 930 } 931 932 /* 1. Write to memory region of target, such as IRAM adn DRAM. 933 * 2. Target address( 0 ~ 00100000 & 0x00400000~0x00500000) 934 * can be written directly. See ath10k_pci_targ_cpu_to_ce_addr() too. 935 * 3. In order to access the region other than the above, 936 * we need to set the value of register CPU_ADDR_MSB. 937 * 4. Target memory access space is limited to 1M size. If the size is larger 938 * than 1M, need to split it and program CPU_ADDR_MSB accordingly. 939 */ 940 static int ath10k_hw_diag_segment_msb_download(struct ath10k *ar, 941 const void *buffer, 942 u32 address, 943 u32 length) 944 { 945 u32 addr = address & REGION_ACCESS_SIZE_MASK; 946 int ret, remain_size, size; 947 const u8 *buf; 948 949 ath10k_hw_map_target_mem(ar, CPU_ADDR_MSB_REGION_VAL(address)); 950 951 if (addr + length > REGION_ACCESS_SIZE_LIMIT) { 952 size = REGION_ACCESS_SIZE_LIMIT - addr; 953 remain_size = length - size; 954 955 ret = ath10k_hif_diag_write(ar, address, buffer, size); 956 if (ret) { 957 ath10k_warn(ar, 958 "failed to download the first %d bytes segment to address:0x%x: %d\n", 959 size, address, ret); 960 goto done; 961 } 962 963 /* Change msb to the next memory region*/ 964 ath10k_hw_map_target_mem(ar, 965 CPU_ADDR_MSB_REGION_VAL(address) + 1); 966 buf = buffer + size; 967 ret = ath10k_hif_diag_write(ar, 968 address & ~REGION_ACCESS_SIZE_MASK, 969 buf, remain_size); 970 if (ret) { 971 ath10k_warn(ar, 972 "failed to download the second %d bytes segment to address:0x%x: %d\n", 973 remain_size, 974 address & ~REGION_ACCESS_SIZE_MASK, 975 ret); 976 goto done; 977 } 978 } else { 979 ret = ath10k_hif_diag_write(ar, address, buffer, length); 980 if (ret) { 981 ath10k_warn(ar, 982 "failed to download the only %d bytes segment to address:0x%x: %d\n", 983 length, address, ret); 984 goto done; 985 } 986 } 987 988 done: 989 /* Change msb to DRAM */ 990 ath10k_hw_map_target_mem(ar, 991 CPU_ADDR_MSB_REGION_VAL(DRAM_BASE_ADDRESS)); 992 return ret; 993 } 994 995 static int ath10k_hw_diag_segment_download(struct ath10k *ar, 996 const void *buffer, 997 u32 address, 998 u32 length) 999 { 1000 if (address >= DRAM_BASE_ADDRESS + REGION_ACCESS_SIZE_LIMIT) 1001 /* Needs to change MSB for memory write */ 1002 return ath10k_hw_diag_segment_msb_download(ar, buffer, 1003 address, length); 1004 else 1005 return ath10k_hif_diag_write(ar, address, buffer, length); 1006 } 1007 1008 int ath10k_hw_diag_fast_download(struct ath10k *ar, 1009 u32 address, 1010 const void *buffer, 1011 u32 length) 1012 { 1013 const u8 *buf = buffer; 1014 bool sgmt_end = false; 1015 u32 base_addr = 0; 1016 u32 base_len = 0; 1017 u32 left = 0; 1018 struct bmi_segmented_file_header *hdr; 1019 struct bmi_segmented_metadata *metadata; 1020 int ret = 0; 1021 1022 if (length < sizeof(*hdr)) 1023 return -EINVAL; 1024 1025 /* check firmware header. If it has no correct magic number 1026 * or it's compressed, returns error. 1027 */ 1028 hdr = (struct bmi_segmented_file_header *)buf; 1029 if (__le32_to_cpu(hdr->magic_num) != BMI_SGMTFILE_MAGIC_NUM) { 1030 ath10k_dbg(ar, ATH10K_DBG_BOOT, 1031 "Not a supported firmware, magic_num:0x%x\n", 1032 hdr->magic_num); 1033 return -EINVAL; 1034 } 1035 1036 if (hdr->file_flags != 0) { 1037 ath10k_dbg(ar, ATH10K_DBG_BOOT, 1038 "Not a supported firmware, file_flags:0x%x\n", 1039 hdr->file_flags); 1040 return -EINVAL; 1041 } 1042 1043 metadata = (struct bmi_segmented_metadata *)hdr->data; 1044 left = length - sizeof(*hdr); 1045 1046 while (left > 0) { 1047 if (left < sizeof(*metadata)) { 1048 ath10k_warn(ar, "firmware segment is truncated: %d\n", 1049 left); 1050 ret = -EINVAL; 1051 break; 1052 } 1053 base_addr = __le32_to_cpu(metadata->addr); 1054 base_len = __le32_to_cpu(metadata->length); 1055 buf = metadata->data; 1056 left -= sizeof(*metadata); 1057 1058 switch (base_len) { 1059 case BMI_SGMTFILE_BEGINADDR: 1060 /* base_addr is the start address to run */ 1061 ret = ath10k_bmi_set_start(ar, base_addr); 1062 base_len = 0; 1063 break; 1064 case BMI_SGMTFILE_DONE: 1065 /* no more segment */ 1066 base_len = 0; 1067 sgmt_end = true; 1068 ret = 0; 1069 break; 1070 case BMI_SGMTFILE_BDDATA: 1071 case BMI_SGMTFILE_EXEC: 1072 ath10k_warn(ar, 1073 "firmware has unsupported segment:%d\n", 1074 base_len); 1075 ret = -EINVAL; 1076 break; 1077 default: 1078 if (base_len > left) { 1079 /* sanity check */ 1080 ath10k_warn(ar, 1081 "firmware has invalid segment length, %d > %d\n", 1082 base_len, left); 1083 ret = -EINVAL; 1084 break; 1085 } 1086 1087 ret = ath10k_hw_diag_segment_download(ar, 1088 buf, 1089 base_addr, 1090 base_len); 1091 1092 if (ret) 1093 ath10k_warn(ar, 1094 "failed to download firmware via diag interface:%d\n", 1095 ret); 1096 break; 1097 } 1098 1099 if (ret || sgmt_end) 1100 break; 1101 1102 metadata = (struct bmi_segmented_metadata *)(buf + base_len); 1103 left -= base_len; 1104 } 1105 1106 if (ret == 0) 1107 ath10k_dbg(ar, ATH10K_DBG_BOOT, 1108 "boot firmware fast diag download successfully.\n"); 1109 return ret; 1110 } 1111 1112 const struct ath10k_hw_ops qca988x_ops = { 1113 .set_coverage_class = ath10k_hw_qca988x_set_coverage_class, 1114 }; 1115 1116 static int ath10k_qca99x0_rx_desc_get_l3_pad_bytes(struct htt_rx_desc *rxd) 1117 { 1118 return MS(__le32_to_cpu(rxd->msdu_end.qca99x0.info1), 1119 RX_MSDU_END_INFO1_L3_HDR_PAD); 1120 } 1121 1122 const struct ath10k_hw_ops qca99x0_ops = { 1123 .rx_desc_get_l3_pad_bytes = ath10k_qca99x0_rx_desc_get_l3_pad_bytes, 1124 }; 1125 1126 const struct ath10k_hw_ops qca6174_ops = { 1127 .set_coverage_class = ath10k_hw_qca988x_set_coverage_class, 1128 .enable_pll_clk = ath10k_hw_qca6174_enable_pll_clock, 1129 }; 1130 1131 const struct ath10k_hw_ops wcn3990_ops = {}; 1132