1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2018-2021 Intel Corporation 4 */ 5 #include <linux/firmware.h> 6 #include "iwl-drv.h" 7 #include "iwl-trans.h" 8 #include "iwl-dbg-tlv.h" 9 #include "fw/dbg.h" 10 #include "fw/runtime.h" 11 12 /** 13 * enum iwl_dbg_tlv_type - debug TLV types 14 * @IWL_DBG_TLV_TYPE_DEBUG_INFO: debug info TLV 15 * @IWL_DBG_TLV_TYPE_BUF_ALLOC: buffer allocation TLV 16 * @IWL_DBG_TLV_TYPE_HCMD: host command TLV 17 * @IWL_DBG_TLV_TYPE_REGION: region TLV 18 * @IWL_DBG_TLV_TYPE_TRIGGER: trigger TLV 19 * @IWL_DBG_TLV_TYPE_NUM: number of debug TLVs 20 */ 21 enum iwl_dbg_tlv_type { 22 IWL_DBG_TLV_TYPE_DEBUG_INFO = 23 IWL_UCODE_TLV_TYPE_DEBUG_INFO - IWL_UCODE_TLV_DEBUG_BASE, 24 IWL_DBG_TLV_TYPE_BUF_ALLOC, 25 IWL_DBG_TLV_TYPE_HCMD, 26 IWL_DBG_TLV_TYPE_REGION, 27 IWL_DBG_TLV_TYPE_TRIGGER, 28 IWL_DBG_TLV_TYPE_NUM, 29 }; 30 31 /** 32 * struct iwl_dbg_tlv_ver_data - debug TLV version struct 33 * @min_ver: min version supported 34 * @max_ver: max version supported 35 */ 36 struct iwl_dbg_tlv_ver_data { 37 int min_ver; 38 int max_ver; 39 }; 40 41 /** 42 * struct iwl_dbg_tlv_timer_node - timer node struct 43 * @list: list of &struct iwl_dbg_tlv_timer_node 44 * @timer: timer 45 * @fwrt: &struct iwl_fw_runtime 46 * @tlv: TLV attach to the timer node 47 */ 48 struct iwl_dbg_tlv_timer_node { 49 struct list_head list; 50 struct timer_list timer; 51 struct iwl_fw_runtime *fwrt; 52 struct iwl_ucode_tlv *tlv; 53 }; 54 55 static const struct iwl_dbg_tlv_ver_data 56 dbg_ver_table[IWL_DBG_TLV_TYPE_NUM] = { 57 [IWL_DBG_TLV_TYPE_DEBUG_INFO] = {.min_ver = 1, .max_ver = 1,}, 58 [IWL_DBG_TLV_TYPE_BUF_ALLOC] = {.min_ver = 1, .max_ver = 1,}, 59 [IWL_DBG_TLV_TYPE_HCMD] = {.min_ver = 1, .max_ver = 1,}, 60 [IWL_DBG_TLV_TYPE_REGION] = {.min_ver = 1, .max_ver = 2,}, 61 [IWL_DBG_TLV_TYPE_TRIGGER] = {.min_ver = 1, .max_ver = 1,}, 62 }; 63 64 static int iwl_dbg_tlv_add(const struct iwl_ucode_tlv *tlv, 65 struct list_head *list) 66 { 67 u32 len = le32_to_cpu(tlv->length); 68 struct iwl_dbg_tlv_node *node; 69 70 node = kzalloc(sizeof(*node) + len, GFP_KERNEL); 71 if (!node) 72 return -ENOMEM; 73 74 memcpy(&node->tlv, tlv, sizeof(node->tlv) + len); 75 list_add_tail(&node->list, list); 76 77 return 0; 78 } 79 80 static bool iwl_dbg_tlv_ver_support(const struct iwl_ucode_tlv *tlv) 81 { 82 const struct iwl_fw_ini_header *hdr = (const void *)&tlv->data[0]; 83 u32 type = le32_to_cpu(tlv->type); 84 u32 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE; 85 u32 ver = le32_to_cpu(hdr->version); 86 87 if (ver < dbg_ver_table[tlv_idx].min_ver || 88 ver > dbg_ver_table[tlv_idx].max_ver) 89 return false; 90 91 return true; 92 } 93 94 static int iwl_dbg_tlv_alloc_debug_info(struct iwl_trans *trans, 95 const struct iwl_ucode_tlv *tlv) 96 { 97 const struct iwl_fw_ini_debug_info_tlv *debug_info = (const void *)tlv->data; 98 99 if (le32_to_cpu(tlv->length) != sizeof(*debug_info)) 100 return -EINVAL; 101 102 IWL_DEBUG_FW(trans, "WRT: Loading debug cfg: %s\n", 103 debug_info->debug_cfg_name); 104 105 return iwl_dbg_tlv_add(tlv, &trans->dbg.debug_info_tlv_list); 106 } 107 108 static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans *trans, 109 const struct iwl_ucode_tlv *tlv) 110 { 111 const struct iwl_fw_ini_allocation_tlv *alloc = (const void *)tlv->data; 112 u32 buf_location; 113 u32 alloc_id; 114 115 if (le32_to_cpu(tlv->length) != sizeof(*alloc)) 116 return -EINVAL; 117 118 buf_location = le32_to_cpu(alloc->buf_location); 119 alloc_id = le32_to_cpu(alloc->alloc_id); 120 121 if (buf_location == IWL_FW_INI_LOCATION_INVALID || 122 buf_location >= IWL_FW_INI_LOCATION_NUM) 123 goto err; 124 125 if (alloc_id == IWL_FW_INI_ALLOCATION_INVALID || 126 alloc_id >= IWL_FW_INI_ALLOCATION_NUM) 127 goto err; 128 129 if (buf_location == IWL_FW_INI_LOCATION_NPK_PATH && 130 alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1) 131 goto err; 132 133 if (buf_location == IWL_FW_INI_LOCATION_SRAM_PATH && 134 alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1 && 135 alloc_id != IWL_FW_INI_ALLOCATION_ID_INTERNAL) 136 goto err; 137 138 trans->dbg.fw_mon_cfg[alloc_id] = *alloc; 139 140 return 0; 141 err: 142 IWL_ERR(trans, 143 "WRT: Invalid allocation id %u and/or location id %u for allocation TLV\n", 144 alloc_id, buf_location); 145 return -EINVAL; 146 } 147 148 static int iwl_dbg_tlv_alloc_hcmd(struct iwl_trans *trans, 149 const struct iwl_ucode_tlv *tlv) 150 { 151 const struct iwl_fw_ini_hcmd_tlv *hcmd = (const void *)tlv->data; 152 u32 tp = le32_to_cpu(hcmd->time_point); 153 154 if (le32_to_cpu(tlv->length) <= sizeof(*hcmd)) 155 return -EINVAL; 156 157 /* Host commands can not be sent in early time point since the FW 158 * is not ready 159 */ 160 if (tp == IWL_FW_INI_TIME_POINT_INVALID || 161 tp >= IWL_FW_INI_TIME_POINT_NUM || 162 tp == IWL_FW_INI_TIME_POINT_EARLY) { 163 IWL_ERR(trans, 164 "WRT: Invalid time point %u for host command TLV\n", 165 tp); 166 return -EINVAL; 167 } 168 169 return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].hcmd_list); 170 } 171 172 static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans, 173 const struct iwl_ucode_tlv *tlv) 174 { 175 const struct iwl_fw_ini_region_tlv *reg = (const void *)tlv->data; 176 struct iwl_ucode_tlv **active_reg; 177 u32 id = le32_to_cpu(reg->id); 178 u32 type = le32_to_cpu(reg->type); 179 u32 tlv_len = sizeof(*tlv) + le32_to_cpu(tlv->length); 180 181 /* 182 * The higher part of the ID in version 2 is irrelevant for 183 * us, so mask it out. 184 */ 185 if (le32_to_cpu(reg->hdr.version) == 2) 186 id &= IWL_FW_INI_REGION_V2_MASK; 187 188 if (le32_to_cpu(tlv->length) < sizeof(*reg)) 189 return -EINVAL; 190 191 /* for safe use of a string from FW, limit it to IWL_FW_INI_MAX_NAME */ 192 IWL_DEBUG_FW(trans, "WRT: parsing region: %.*s\n", 193 IWL_FW_INI_MAX_NAME, reg->name); 194 195 if (id >= IWL_FW_INI_MAX_REGION_ID) { 196 IWL_ERR(trans, "WRT: Invalid region id %u\n", id); 197 return -EINVAL; 198 } 199 200 if (type <= IWL_FW_INI_REGION_INVALID || 201 type >= IWL_FW_INI_REGION_NUM) { 202 IWL_ERR(trans, "WRT: Invalid region type %u\n", type); 203 return -EINVAL; 204 } 205 206 if (type == IWL_FW_INI_REGION_PCI_IOSF_CONFIG && 207 !trans->ops->read_config32) { 208 IWL_ERR(trans, "WRT: Unsupported region type %u\n", type); 209 return -EOPNOTSUPP; 210 } 211 212 active_reg = &trans->dbg.active_regions[id]; 213 if (*active_reg) { 214 IWL_WARN(trans, "WRT: Overriding region id %u\n", id); 215 216 kfree(*active_reg); 217 } 218 219 *active_reg = kmemdup(tlv, tlv_len, GFP_KERNEL); 220 if (!*active_reg) 221 return -ENOMEM; 222 223 IWL_DEBUG_FW(trans, "WRT: Enabling region id %u type %u\n", id, type); 224 225 return 0; 226 } 227 228 static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans, 229 const struct iwl_ucode_tlv *tlv) 230 { 231 const struct iwl_fw_ini_trigger_tlv *trig = (const void *)tlv->data; 232 struct iwl_fw_ini_trigger_tlv *dup_trig; 233 u32 tp = le32_to_cpu(trig->time_point); 234 struct iwl_ucode_tlv *dup = NULL; 235 int ret; 236 237 if (le32_to_cpu(tlv->length) < sizeof(*trig)) 238 return -EINVAL; 239 240 if (tp <= IWL_FW_INI_TIME_POINT_INVALID || 241 tp >= IWL_FW_INI_TIME_POINT_NUM) { 242 IWL_ERR(trans, 243 "WRT: Invalid time point %u for trigger TLV\n", 244 tp); 245 return -EINVAL; 246 } 247 248 if (!le32_to_cpu(trig->occurrences)) { 249 dup = kmemdup(tlv, sizeof(*tlv) + le32_to_cpu(tlv->length), 250 GFP_KERNEL); 251 if (!dup) 252 return -ENOMEM; 253 dup_trig = (void *)dup->data; 254 dup_trig->occurrences = cpu_to_le32(-1); 255 tlv = dup; 256 } 257 258 ret = iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list); 259 kfree(dup); 260 261 return ret; 262 } 263 264 static int (*dbg_tlv_alloc[])(struct iwl_trans *trans, 265 const struct iwl_ucode_tlv *tlv) = { 266 [IWL_DBG_TLV_TYPE_DEBUG_INFO] = iwl_dbg_tlv_alloc_debug_info, 267 [IWL_DBG_TLV_TYPE_BUF_ALLOC] = iwl_dbg_tlv_alloc_buf_alloc, 268 [IWL_DBG_TLV_TYPE_HCMD] = iwl_dbg_tlv_alloc_hcmd, 269 [IWL_DBG_TLV_TYPE_REGION] = iwl_dbg_tlv_alloc_region, 270 [IWL_DBG_TLV_TYPE_TRIGGER] = iwl_dbg_tlv_alloc_trigger, 271 }; 272 273 void iwl_dbg_tlv_alloc(struct iwl_trans *trans, const struct iwl_ucode_tlv *tlv, 274 bool ext) 275 { 276 const struct iwl_fw_ini_header *hdr = (const void *)&tlv->data[0]; 277 u32 type = le32_to_cpu(tlv->type); 278 u32 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE; 279 u32 domain = le32_to_cpu(hdr->domain); 280 enum iwl_ini_cfg_state *cfg_state = ext ? 281 &trans->dbg.external_ini_cfg : &trans->dbg.internal_ini_cfg; 282 int ret; 283 284 if (domain != IWL_FW_INI_DOMAIN_ALWAYS_ON && 285 !(domain & trans->dbg.domains_bitmap)) { 286 IWL_DEBUG_FW(trans, 287 "WRT: Skipping TLV with disabled domain 0x%0x (0x%0x)\n", 288 domain, trans->dbg.domains_bitmap); 289 return; 290 } 291 292 if (tlv_idx >= ARRAY_SIZE(dbg_tlv_alloc) || !dbg_tlv_alloc[tlv_idx]) { 293 IWL_ERR(trans, "WRT: Unsupported TLV type 0x%x\n", type); 294 goto out_err; 295 } 296 297 if (!iwl_dbg_tlv_ver_support(tlv)) { 298 IWL_ERR(trans, "WRT: Unsupported TLV 0x%x version %u\n", type, 299 le32_to_cpu(hdr->version)); 300 goto out_err; 301 } 302 303 ret = dbg_tlv_alloc[tlv_idx](trans, tlv); 304 if (ret) { 305 IWL_ERR(trans, 306 "WRT: Failed to allocate TLV 0x%x, ret %d, (ext=%d)\n", 307 type, ret, ext); 308 goto out_err; 309 } 310 311 if (*cfg_state == IWL_INI_CFG_STATE_NOT_LOADED) 312 *cfg_state = IWL_INI_CFG_STATE_LOADED; 313 314 return; 315 316 out_err: 317 *cfg_state = IWL_INI_CFG_STATE_CORRUPTED; 318 } 319 320 void iwl_dbg_tlv_del_timers(struct iwl_trans *trans) 321 { 322 struct list_head *timer_list = &trans->dbg.periodic_trig_list; 323 struct iwl_dbg_tlv_timer_node *node, *tmp; 324 325 list_for_each_entry_safe(node, tmp, timer_list, list) { 326 del_timer(&node->timer); 327 list_del(&node->list); 328 kfree(node); 329 } 330 } 331 IWL_EXPORT_SYMBOL(iwl_dbg_tlv_del_timers); 332 333 static void iwl_dbg_tlv_fragments_free(struct iwl_trans *trans, 334 enum iwl_fw_ini_allocation_id alloc_id) 335 { 336 struct iwl_fw_mon *fw_mon; 337 int i; 338 339 if (alloc_id <= IWL_FW_INI_ALLOCATION_INVALID || 340 alloc_id >= IWL_FW_INI_ALLOCATION_NUM) 341 return; 342 343 fw_mon = &trans->dbg.fw_mon_ini[alloc_id]; 344 345 for (i = 0; i < fw_mon->num_frags; i++) { 346 struct iwl_dram_data *frag = &fw_mon->frags[i]; 347 348 dma_free_coherent(trans->dev, frag->size, frag->block, 349 frag->physical); 350 351 frag->physical = 0; 352 frag->block = NULL; 353 frag->size = 0; 354 } 355 356 kfree(fw_mon->frags); 357 fw_mon->frags = NULL; 358 fw_mon->num_frags = 0; 359 } 360 361 void iwl_dbg_tlv_free(struct iwl_trans *trans) 362 { 363 struct iwl_dbg_tlv_node *tlv_node, *tlv_node_tmp; 364 int i; 365 366 iwl_dbg_tlv_del_timers(trans); 367 368 for (i = 0; i < ARRAY_SIZE(trans->dbg.active_regions); i++) { 369 struct iwl_ucode_tlv **active_reg = 370 &trans->dbg.active_regions[i]; 371 372 kfree(*active_reg); 373 *active_reg = NULL; 374 } 375 376 list_for_each_entry_safe(tlv_node, tlv_node_tmp, 377 &trans->dbg.debug_info_tlv_list, list) { 378 list_del(&tlv_node->list); 379 kfree(tlv_node); 380 } 381 382 for (i = 0; i < ARRAY_SIZE(trans->dbg.time_point); i++) { 383 struct iwl_dbg_tlv_time_point_data *tp = 384 &trans->dbg.time_point[i]; 385 386 list_for_each_entry_safe(tlv_node, tlv_node_tmp, &tp->trig_list, 387 list) { 388 list_del(&tlv_node->list); 389 kfree(tlv_node); 390 } 391 392 list_for_each_entry_safe(tlv_node, tlv_node_tmp, &tp->hcmd_list, 393 list) { 394 list_del(&tlv_node->list); 395 kfree(tlv_node); 396 } 397 398 list_for_each_entry_safe(tlv_node, tlv_node_tmp, 399 &tp->active_trig_list, list) { 400 list_del(&tlv_node->list); 401 kfree(tlv_node); 402 } 403 } 404 405 for (i = 0; i < ARRAY_SIZE(trans->dbg.fw_mon_ini); i++) 406 iwl_dbg_tlv_fragments_free(trans, i); 407 } 408 409 static int iwl_dbg_tlv_parse_bin(struct iwl_trans *trans, const u8 *data, 410 size_t len) 411 { 412 const struct iwl_ucode_tlv *tlv; 413 u32 tlv_len; 414 415 while (len >= sizeof(*tlv)) { 416 len -= sizeof(*tlv); 417 tlv = (void *)data; 418 419 tlv_len = le32_to_cpu(tlv->length); 420 421 if (len < tlv_len) { 422 IWL_ERR(trans, "invalid TLV len: %zd/%u\n", 423 len, tlv_len); 424 return -EINVAL; 425 } 426 len -= ALIGN(tlv_len, 4); 427 data += sizeof(*tlv) + ALIGN(tlv_len, 4); 428 429 iwl_dbg_tlv_alloc(trans, tlv, true); 430 } 431 432 return 0; 433 } 434 435 void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans) 436 { 437 const struct firmware *fw; 438 int res; 439 440 if (!iwlwifi_mod_params.enable_ini || 441 trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_9000) 442 return; 443 444 res = firmware_request_nowarn(&fw, "iwl-debug-yoyo.bin", dev); 445 if (res) 446 return; 447 448 iwl_dbg_tlv_parse_bin(trans, fw->data, fw->size); 449 450 release_firmware(fw); 451 } 452 453 void iwl_dbg_tlv_init(struct iwl_trans *trans) 454 { 455 int i; 456 457 INIT_LIST_HEAD(&trans->dbg.debug_info_tlv_list); 458 INIT_LIST_HEAD(&trans->dbg.periodic_trig_list); 459 460 for (i = 0; i < ARRAY_SIZE(trans->dbg.time_point); i++) { 461 struct iwl_dbg_tlv_time_point_data *tp = 462 &trans->dbg.time_point[i]; 463 464 INIT_LIST_HEAD(&tp->trig_list); 465 INIT_LIST_HEAD(&tp->hcmd_list); 466 INIT_LIST_HEAD(&tp->active_trig_list); 467 } 468 } 469 470 static int iwl_dbg_tlv_alloc_fragment(struct iwl_fw_runtime *fwrt, 471 struct iwl_dram_data *frag, u32 pages) 472 { 473 void *block = NULL; 474 dma_addr_t physical; 475 476 if (!frag || frag->size || !pages) 477 return -EIO; 478 479 /* 480 * We try to allocate as many pages as we can, starting with 481 * the requested amount and going down until we can allocate 482 * something. Because of DIV_ROUND_UP(), pages will never go 483 * down to 0 and stop the loop, so stop when pages reaches 1, 484 * which is too small anyway. 485 */ 486 while (pages > 1) { 487 block = dma_alloc_coherent(fwrt->dev, pages * PAGE_SIZE, 488 &physical, 489 GFP_KERNEL | __GFP_NOWARN); 490 if (block) 491 break; 492 493 IWL_WARN(fwrt, "WRT: Failed to allocate fragment size %lu\n", 494 pages * PAGE_SIZE); 495 496 pages = DIV_ROUND_UP(pages, 2); 497 } 498 499 if (!block) 500 return -ENOMEM; 501 502 frag->physical = physical; 503 frag->block = block; 504 frag->size = pages * PAGE_SIZE; 505 506 return pages; 507 } 508 509 static int iwl_dbg_tlv_alloc_fragments(struct iwl_fw_runtime *fwrt, 510 enum iwl_fw_ini_allocation_id alloc_id) 511 { 512 struct iwl_fw_mon *fw_mon; 513 struct iwl_fw_ini_allocation_tlv *fw_mon_cfg; 514 u32 num_frags, remain_pages, frag_pages; 515 int i; 516 517 if (alloc_id < IWL_FW_INI_ALLOCATION_INVALID || 518 alloc_id >= IWL_FW_INI_ALLOCATION_NUM) 519 return -EIO; 520 521 fw_mon_cfg = &fwrt->trans->dbg.fw_mon_cfg[alloc_id]; 522 fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id]; 523 524 if (fw_mon->num_frags || 525 fw_mon_cfg->buf_location != 526 cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH)) 527 return 0; 528 529 num_frags = le32_to_cpu(fw_mon_cfg->max_frags_num); 530 if (!fw_has_capa(&fwrt->fw->ucode_capa, 531 IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP)) { 532 if (alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1) 533 return -EIO; 534 num_frags = 1; 535 } 536 537 remain_pages = DIV_ROUND_UP(le32_to_cpu(fw_mon_cfg->req_size), 538 PAGE_SIZE); 539 num_frags = min_t(u32, num_frags, BUF_ALLOC_MAX_NUM_FRAGS); 540 num_frags = min_t(u32, num_frags, remain_pages); 541 frag_pages = DIV_ROUND_UP(remain_pages, num_frags); 542 543 fw_mon->frags = kcalloc(num_frags, sizeof(*fw_mon->frags), GFP_KERNEL); 544 if (!fw_mon->frags) 545 return -ENOMEM; 546 547 for (i = 0; i < num_frags; i++) { 548 int pages = min_t(u32, frag_pages, remain_pages); 549 550 IWL_DEBUG_FW(fwrt, 551 "WRT: Allocating DRAM buffer (alloc_id=%u, fragment=%u, size=0x%lx)\n", 552 alloc_id, i, pages * PAGE_SIZE); 553 554 pages = iwl_dbg_tlv_alloc_fragment(fwrt, &fw_mon->frags[i], 555 pages); 556 if (pages < 0) { 557 u32 alloc_size = le32_to_cpu(fw_mon_cfg->req_size) - 558 (remain_pages * PAGE_SIZE); 559 560 if (alloc_size < le32_to_cpu(fw_mon_cfg->min_size)) { 561 iwl_dbg_tlv_fragments_free(fwrt->trans, 562 alloc_id); 563 return pages; 564 } 565 break; 566 } 567 568 remain_pages -= pages; 569 fw_mon->num_frags++; 570 } 571 572 return 0; 573 } 574 575 static int iwl_dbg_tlv_apply_buffer(struct iwl_fw_runtime *fwrt, 576 enum iwl_fw_ini_allocation_id alloc_id) 577 { 578 struct iwl_fw_mon *fw_mon; 579 u32 remain_frags, num_commands; 580 int i, fw_mon_idx = 0; 581 582 if (!fw_has_capa(&fwrt->fw->ucode_capa, 583 IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP)) 584 return 0; 585 586 if (alloc_id < IWL_FW_INI_ALLOCATION_INVALID || 587 alloc_id >= IWL_FW_INI_ALLOCATION_NUM) 588 return -EIO; 589 590 if (le32_to_cpu(fwrt->trans->dbg.fw_mon_cfg[alloc_id].buf_location) != 591 IWL_FW_INI_LOCATION_DRAM_PATH) 592 return 0; 593 594 fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id]; 595 596 /* the first fragment of DBGC1 is given to the FW via register 597 * or context info 598 */ 599 if (alloc_id == IWL_FW_INI_ALLOCATION_ID_DBGC1) 600 fw_mon_idx++; 601 602 remain_frags = fw_mon->num_frags - fw_mon_idx; 603 if (!remain_frags) 604 return 0; 605 606 num_commands = DIV_ROUND_UP(remain_frags, BUF_ALLOC_MAX_NUM_FRAGS); 607 608 IWL_DEBUG_FW(fwrt, "WRT: Applying DRAM destination (alloc_id=%u)\n", 609 alloc_id); 610 611 for (i = 0; i < num_commands; i++) { 612 u32 num_frags = min_t(u32, remain_frags, 613 BUF_ALLOC_MAX_NUM_FRAGS); 614 struct iwl_buf_alloc_cmd data = { 615 .alloc_id = cpu_to_le32(alloc_id), 616 .num_frags = cpu_to_le32(num_frags), 617 .buf_location = 618 cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH), 619 }; 620 struct iwl_host_cmd hcmd = { 621 .id = WIDE_ID(DEBUG_GROUP, BUFFER_ALLOCATION), 622 .data[0] = &data, 623 .len[0] = sizeof(data), 624 }; 625 int ret, j; 626 627 for (j = 0; j < num_frags; j++) { 628 struct iwl_buf_alloc_frag *frag = &data.frags[j]; 629 struct iwl_dram_data *fw_mon_frag = 630 &fw_mon->frags[fw_mon_idx++]; 631 632 frag->addr = cpu_to_le64(fw_mon_frag->physical); 633 frag->size = cpu_to_le32(fw_mon_frag->size); 634 } 635 ret = iwl_trans_send_cmd(fwrt->trans, &hcmd); 636 if (ret) 637 return ret; 638 639 remain_frags -= num_frags; 640 } 641 642 return 0; 643 } 644 645 static void iwl_dbg_tlv_apply_buffers(struct iwl_fw_runtime *fwrt) 646 { 647 int ret, i; 648 649 for (i = 0; i < IWL_FW_INI_ALLOCATION_NUM; i++) { 650 ret = iwl_dbg_tlv_apply_buffer(fwrt, i); 651 if (ret) 652 IWL_WARN(fwrt, 653 "WRT: Failed to apply DRAM buffer for allocation id %d, ret=%d\n", 654 i, ret); 655 } 656 } 657 658 static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt, 659 struct list_head *hcmd_list) 660 { 661 struct iwl_dbg_tlv_node *node; 662 663 list_for_each_entry(node, hcmd_list, list) { 664 struct iwl_fw_ini_hcmd_tlv *hcmd = (void *)node->tlv.data; 665 struct iwl_fw_ini_hcmd *hcmd_data = &hcmd->hcmd; 666 u16 hcmd_len = le32_to_cpu(node->tlv.length) - sizeof(*hcmd); 667 struct iwl_host_cmd cmd = { 668 .id = WIDE_ID(hcmd_data->group, hcmd_data->id), 669 .len = { hcmd_len, }, 670 .data = { hcmd_data->data, }, 671 }; 672 673 iwl_trans_send_cmd(fwrt->trans, &cmd); 674 } 675 } 676 677 static void iwl_dbg_tlv_periodic_trig_handler(struct timer_list *t) 678 { 679 struct iwl_dbg_tlv_timer_node *timer_node = 680 from_timer(timer_node, t, timer); 681 struct iwl_fwrt_dump_data dump_data = { 682 .trig = (void *)timer_node->tlv->data, 683 }; 684 int ret; 685 686 ret = iwl_fw_dbg_ini_collect(timer_node->fwrt, &dump_data); 687 if (!ret || ret == -EBUSY) { 688 u32 occur = le32_to_cpu(dump_data.trig->occurrences); 689 u32 collect_interval = le32_to_cpu(dump_data.trig->data[0]); 690 691 if (!occur) 692 return; 693 694 mod_timer(t, jiffies + msecs_to_jiffies(collect_interval)); 695 } 696 } 697 698 static void iwl_dbg_tlv_set_periodic_trigs(struct iwl_fw_runtime *fwrt) 699 { 700 struct iwl_dbg_tlv_node *node; 701 struct list_head *trig_list = 702 &fwrt->trans->dbg.time_point[IWL_FW_INI_TIME_POINT_PERIODIC].active_trig_list; 703 704 list_for_each_entry(node, trig_list, list) { 705 struct iwl_fw_ini_trigger_tlv *trig = (void *)node->tlv.data; 706 struct iwl_dbg_tlv_timer_node *timer_node; 707 u32 occur = le32_to_cpu(trig->occurrences), collect_interval; 708 u32 min_interval = 100; 709 710 if (!occur) 711 continue; 712 713 /* make sure there is at least one dword of data for the 714 * interval value 715 */ 716 if (le32_to_cpu(node->tlv.length) < 717 sizeof(*trig) + sizeof(__le32)) { 718 IWL_ERR(fwrt, 719 "WRT: Invalid periodic trigger data was not given\n"); 720 continue; 721 } 722 723 if (le32_to_cpu(trig->data[0]) < min_interval) { 724 IWL_WARN(fwrt, 725 "WRT: Override min interval from %u to %u msec\n", 726 le32_to_cpu(trig->data[0]), min_interval); 727 trig->data[0] = cpu_to_le32(min_interval); 728 } 729 730 collect_interval = le32_to_cpu(trig->data[0]); 731 732 timer_node = kzalloc(sizeof(*timer_node), GFP_KERNEL); 733 if (!timer_node) { 734 IWL_ERR(fwrt, 735 "WRT: Failed to allocate periodic trigger\n"); 736 continue; 737 } 738 739 timer_node->fwrt = fwrt; 740 timer_node->tlv = &node->tlv; 741 timer_setup(&timer_node->timer, 742 iwl_dbg_tlv_periodic_trig_handler, 0); 743 744 list_add_tail(&timer_node->list, 745 &fwrt->trans->dbg.periodic_trig_list); 746 747 IWL_DEBUG_FW(fwrt, "WRT: Enabling periodic trigger\n"); 748 749 mod_timer(&timer_node->timer, 750 jiffies + msecs_to_jiffies(collect_interval)); 751 } 752 } 753 754 static bool is_trig_data_contained(const struct iwl_ucode_tlv *new, 755 const struct iwl_ucode_tlv *old) 756 { 757 const struct iwl_fw_ini_trigger_tlv *new_trig = (const void *)new->data; 758 const struct iwl_fw_ini_trigger_tlv *old_trig = (const void *)old->data; 759 const __le32 *new_data = new_trig->data, *old_data = old_trig->data; 760 u32 new_dwords_num = iwl_tlv_array_len(new, new_trig, data); 761 u32 old_dwords_num = iwl_tlv_array_len(old, old_trig, data); 762 int i, j; 763 764 for (i = 0; i < new_dwords_num; i++) { 765 bool match = false; 766 767 for (j = 0; j < old_dwords_num; j++) { 768 if (new_data[i] == old_data[j]) { 769 match = true; 770 break; 771 } 772 } 773 if (!match) 774 return false; 775 } 776 777 return true; 778 } 779 780 static int iwl_dbg_tlv_override_trig_node(struct iwl_fw_runtime *fwrt, 781 struct iwl_ucode_tlv *trig_tlv, 782 struct iwl_dbg_tlv_node *node) 783 { 784 struct iwl_ucode_tlv *node_tlv = &node->tlv; 785 struct iwl_fw_ini_trigger_tlv *node_trig = (void *)node_tlv->data; 786 struct iwl_fw_ini_trigger_tlv *trig = (void *)trig_tlv->data; 787 u32 policy = le32_to_cpu(trig->apply_policy); 788 u32 size = le32_to_cpu(trig_tlv->length); 789 u32 trig_data_len = size - sizeof(*trig); 790 u32 offset = 0; 791 792 if (!(policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA)) { 793 u32 data_len = le32_to_cpu(node_tlv->length) - 794 sizeof(*node_trig); 795 796 IWL_DEBUG_FW(fwrt, 797 "WRT: Appending trigger data (time point %u)\n", 798 le32_to_cpu(trig->time_point)); 799 800 offset += data_len; 801 size += data_len; 802 } else { 803 IWL_DEBUG_FW(fwrt, 804 "WRT: Overriding trigger data (time point %u)\n", 805 le32_to_cpu(trig->time_point)); 806 } 807 808 if (size != le32_to_cpu(node_tlv->length)) { 809 struct list_head *prev = node->list.prev; 810 struct iwl_dbg_tlv_node *tmp; 811 812 list_del(&node->list); 813 814 tmp = krealloc(node, sizeof(*node) + size, GFP_KERNEL); 815 if (!tmp) { 816 IWL_WARN(fwrt, 817 "WRT: No memory to override trigger (time point %u)\n", 818 le32_to_cpu(trig->time_point)); 819 820 list_add(&node->list, prev); 821 822 return -ENOMEM; 823 } 824 825 list_add(&tmp->list, prev); 826 node_tlv = &tmp->tlv; 827 node_trig = (void *)node_tlv->data; 828 } 829 830 memcpy(node_trig->data + offset, trig->data, trig_data_len); 831 node_tlv->length = cpu_to_le32(size); 832 833 if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG) { 834 IWL_DEBUG_FW(fwrt, 835 "WRT: Overriding trigger configuration (time point %u)\n", 836 le32_to_cpu(trig->time_point)); 837 838 /* the first 11 dwords are configuration related */ 839 memcpy(node_trig, trig, sizeof(__le32) * 11); 840 } 841 842 if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_REGIONS) { 843 IWL_DEBUG_FW(fwrt, 844 "WRT: Overriding trigger regions (time point %u)\n", 845 le32_to_cpu(trig->time_point)); 846 847 node_trig->regions_mask = trig->regions_mask; 848 } else { 849 IWL_DEBUG_FW(fwrt, 850 "WRT: Appending trigger regions (time point %u)\n", 851 le32_to_cpu(trig->time_point)); 852 853 node_trig->regions_mask |= trig->regions_mask; 854 } 855 856 return 0; 857 } 858 859 static int 860 iwl_dbg_tlv_add_active_trigger(struct iwl_fw_runtime *fwrt, 861 struct list_head *trig_list, 862 struct iwl_ucode_tlv *trig_tlv) 863 { 864 struct iwl_fw_ini_trigger_tlv *trig = (void *)trig_tlv->data; 865 struct iwl_dbg_tlv_node *node, *match = NULL; 866 u32 policy = le32_to_cpu(trig->apply_policy); 867 868 list_for_each_entry(node, trig_list, list) { 869 if (!(policy & IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT)) 870 break; 871 872 if (!(policy & IWL_FW_INI_APPLY_POLICY_MATCH_DATA) || 873 is_trig_data_contained(trig_tlv, &node->tlv)) { 874 match = node; 875 break; 876 } 877 } 878 879 if (!match) { 880 IWL_DEBUG_FW(fwrt, "WRT: Enabling trigger (time point %u)\n", 881 le32_to_cpu(trig->time_point)); 882 return iwl_dbg_tlv_add(trig_tlv, trig_list); 883 } 884 885 return iwl_dbg_tlv_override_trig_node(fwrt, trig_tlv, match); 886 } 887 888 static void 889 iwl_dbg_tlv_gen_active_trig_list(struct iwl_fw_runtime *fwrt, 890 struct iwl_dbg_tlv_time_point_data *tp) 891 { 892 struct iwl_dbg_tlv_node *node; 893 struct list_head *trig_list = &tp->trig_list; 894 struct list_head *active_trig_list = &tp->active_trig_list; 895 896 list_for_each_entry(node, trig_list, list) { 897 struct iwl_ucode_tlv *tlv = &node->tlv; 898 899 iwl_dbg_tlv_add_active_trigger(fwrt, active_trig_list, tlv); 900 } 901 } 902 903 static bool iwl_dbg_tlv_check_fw_pkt(struct iwl_fw_runtime *fwrt, 904 struct iwl_fwrt_dump_data *dump_data, 905 union iwl_dbg_tlv_tp_data *tp_data, 906 u32 trig_data) 907 { 908 struct iwl_rx_packet *pkt = tp_data->fw_pkt; 909 struct iwl_cmd_header *wanted_hdr = (void *)&trig_data; 910 911 if (pkt && (pkt->hdr.cmd == wanted_hdr->cmd && 912 pkt->hdr.group_id == wanted_hdr->group_id)) { 913 struct iwl_rx_packet *fw_pkt = 914 kmemdup(pkt, 915 sizeof(*pkt) + iwl_rx_packet_payload_len(pkt), 916 GFP_ATOMIC); 917 918 if (!fw_pkt) 919 return false; 920 921 dump_data->fw_pkt = fw_pkt; 922 923 return true; 924 } 925 926 return false; 927 } 928 929 static int 930 iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, 931 struct list_head *active_trig_list, 932 union iwl_dbg_tlv_tp_data *tp_data, 933 bool (*data_check)(struct iwl_fw_runtime *fwrt, 934 struct iwl_fwrt_dump_data *dump_data, 935 union iwl_dbg_tlv_tp_data *tp_data, 936 u32 trig_data)) 937 { 938 struct iwl_dbg_tlv_node *node; 939 940 list_for_each_entry(node, active_trig_list, list) { 941 struct iwl_fwrt_dump_data dump_data = { 942 .trig = (void *)node->tlv.data, 943 }; 944 u32 num_data = iwl_tlv_array_len(&node->tlv, dump_data.trig, 945 data); 946 int ret, i; 947 948 if (!num_data) { 949 ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data); 950 if (ret) 951 return ret; 952 } 953 954 for (i = 0; i < num_data; i++) { 955 if (!data_check || 956 data_check(fwrt, &dump_data, tp_data, 957 le32_to_cpu(dump_data.trig->data[i]))) { 958 ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data); 959 if (ret) 960 return ret; 961 962 break; 963 } 964 } 965 } 966 967 return 0; 968 } 969 970 static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt) 971 { 972 enum iwl_fw_ini_buffer_location *ini_dest = &fwrt->trans->dbg.ini_dest; 973 int ret, i; 974 u32 failed_alloc = 0; 975 976 if (*ini_dest != IWL_FW_INI_LOCATION_INVALID) 977 return; 978 979 IWL_DEBUG_FW(fwrt, 980 "WRT: Generating active triggers list, domain 0x%x\n", 981 fwrt->trans->dbg.domains_bitmap); 982 983 for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.time_point); i++) { 984 struct iwl_dbg_tlv_time_point_data *tp = 985 &fwrt->trans->dbg.time_point[i]; 986 987 iwl_dbg_tlv_gen_active_trig_list(fwrt, tp); 988 } 989 990 *ini_dest = IWL_FW_INI_LOCATION_INVALID; 991 for (i = 0; i < IWL_FW_INI_ALLOCATION_NUM; i++) { 992 struct iwl_fw_ini_allocation_tlv *fw_mon_cfg = 993 &fwrt->trans->dbg.fw_mon_cfg[i]; 994 u32 dest = le32_to_cpu(fw_mon_cfg->buf_location); 995 996 if (dest == IWL_FW_INI_LOCATION_INVALID) 997 continue; 998 999 if (*ini_dest == IWL_FW_INI_LOCATION_INVALID) 1000 *ini_dest = dest; 1001 1002 if (dest != *ini_dest) 1003 continue; 1004 1005 ret = iwl_dbg_tlv_alloc_fragments(fwrt, i); 1006 1007 if (ret) { 1008 IWL_WARN(fwrt, 1009 "WRT: Failed to allocate DRAM buffer for allocation id %d, ret=%d\n", 1010 i, ret); 1011 failed_alloc |= BIT(i); 1012 } 1013 } 1014 1015 if (!failed_alloc) 1016 return; 1017 1018 for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.active_regions) && failed_alloc; i++) { 1019 struct iwl_fw_ini_region_tlv *reg; 1020 struct iwl_ucode_tlv **active_reg = 1021 &fwrt->trans->dbg.active_regions[i]; 1022 u32 reg_type; 1023 1024 if (!*active_reg) 1025 continue; 1026 1027 reg = (void *)(*active_reg)->data; 1028 reg_type = le32_to_cpu(reg->type); 1029 1030 if (reg_type != IWL_FW_INI_REGION_DRAM_BUFFER || 1031 !(BIT(le32_to_cpu(reg->dram_alloc_id)) & failed_alloc)) 1032 continue; 1033 1034 IWL_DEBUG_FW(fwrt, 1035 "WRT: removing allocation id %d from region id %d\n", 1036 le32_to_cpu(reg->dram_alloc_id), i); 1037 1038 failed_alloc &= ~le32_to_cpu(reg->dram_alloc_id); 1039 fwrt->trans->dbg.unsupported_region_msk |= BIT(i); 1040 1041 kfree(*active_reg); 1042 *active_reg = NULL; 1043 } 1044 } 1045 1046 void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt, 1047 enum iwl_fw_ini_time_point tp_id, 1048 union iwl_dbg_tlv_tp_data *tp_data) 1049 { 1050 struct list_head *hcmd_list, *trig_list; 1051 1052 if (!iwl_trans_dbg_ini_valid(fwrt->trans) || 1053 tp_id == IWL_FW_INI_TIME_POINT_INVALID || 1054 tp_id >= IWL_FW_INI_TIME_POINT_NUM) 1055 return; 1056 1057 hcmd_list = &fwrt->trans->dbg.time_point[tp_id].hcmd_list; 1058 trig_list = &fwrt->trans->dbg.time_point[tp_id].active_trig_list; 1059 1060 switch (tp_id) { 1061 case IWL_FW_INI_TIME_POINT_EARLY: 1062 iwl_dbg_tlv_init_cfg(fwrt); 1063 iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data, NULL); 1064 break; 1065 case IWL_FW_INI_TIME_POINT_AFTER_ALIVE: 1066 iwl_dbg_tlv_apply_buffers(fwrt); 1067 iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list); 1068 iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data, NULL); 1069 break; 1070 case IWL_FW_INI_TIME_POINT_PERIODIC: 1071 iwl_dbg_tlv_set_periodic_trigs(fwrt); 1072 iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list); 1073 break; 1074 case IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF: 1075 case IWL_FW_INI_TIME_POINT_MISSED_BEACONS: 1076 case IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFICATION: 1077 iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list); 1078 iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data, 1079 iwl_dbg_tlv_check_fw_pkt); 1080 break; 1081 default: 1082 iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list); 1083 iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data, NULL); 1084 break; 1085 } 1086 } 1087 IWL_EXPORT_SYMBOL(iwl_dbg_tlv_time_point); 1088