1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (C) 2018-2021 Intel Corporation
4  */
5 #include <linux/firmware.h>
6 #include "iwl-drv.h"
7 #include "iwl-trans.h"
8 #include "iwl-dbg-tlv.h"
9 #include "fw/dbg.h"
10 #include "fw/runtime.h"
11 
12 /**
13  * enum iwl_dbg_tlv_type - debug TLV types
14  * @IWL_DBG_TLV_TYPE_DEBUG_INFO: debug info TLV
15  * @IWL_DBG_TLV_TYPE_BUF_ALLOC: buffer allocation TLV
16  * @IWL_DBG_TLV_TYPE_HCMD: host command TLV
17  * @IWL_DBG_TLV_TYPE_REGION: region TLV
18  * @IWL_DBG_TLV_TYPE_TRIGGER: trigger TLV
19  * @IWL_DBG_TLV_TYPE_NUM: number of debug TLVs
20  */
21 enum iwl_dbg_tlv_type {
22 	IWL_DBG_TLV_TYPE_DEBUG_INFO =
23 		IWL_UCODE_TLV_TYPE_DEBUG_INFO - IWL_UCODE_TLV_DEBUG_BASE,
24 	IWL_DBG_TLV_TYPE_BUF_ALLOC,
25 	IWL_DBG_TLV_TYPE_HCMD,
26 	IWL_DBG_TLV_TYPE_REGION,
27 	IWL_DBG_TLV_TYPE_TRIGGER,
28 	IWL_DBG_TLV_TYPE_NUM,
29 };
30 
31 /**
32  * struct iwl_dbg_tlv_ver_data -  debug TLV version struct
33  * @min_ver: min version supported
34  * @max_ver: max version supported
35  */
36 struct iwl_dbg_tlv_ver_data {
37 	int min_ver;
38 	int max_ver;
39 };
40 
41 /**
42  * struct iwl_dbg_tlv_timer_node - timer node struct
43  * @list: list of &struct iwl_dbg_tlv_timer_node
44  * @timer: timer
45  * @fwrt: &struct iwl_fw_runtime
46  * @tlv: TLV attach to the timer node
47  */
48 struct iwl_dbg_tlv_timer_node {
49 	struct list_head list;
50 	struct timer_list timer;
51 	struct iwl_fw_runtime *fwrt;
52 	struct iwl_ucode_tlv *tlv;
53 };
54 
55 static const struct iwl_dbg_tlv_ver_data
56 dbg_ver_table[IWL_DBG_TLV_TYPE_NUM] = {
57 	[IWL_DBG_TLV_TYPE_DEBUG_INFO]	= {.min_ver = 1, .max_ver = 1,},
58 	[IWL_DBG_TLV_TYPE_BUF_ALLOC]	= {.min_ver = 1, .max_ver = 1,},
59 	[IWL_DBG_TLV_TYPE_HCMD]		= {.min_ver = 1, .max_ver = 1,},
60 	[IWL_DBG_TLV_TYPE_REGION]	= {.min_ver = 1, .max_ver = 2,},
61 	[IWL_DBG_TLV_TYPE_TRIGGER]	= {.min_ver = 1, .max_ver = 1,},
62 };
63 
64 static int iwl_dbg_tlv_add(const struct iwl_ucode_tlv *tlv,
65 			   struct list_head *list)
66 {
67 	u32 len = le32_to_cpu(tlv->length);
68 	struct iwl_dbg_tlv_node *node;
69 
70 	node = kzalloc(sizeof(*node) + len, GFP_KERNEL);
71 	if (!node)
72 		return -ENOMEM;
73 
74 	memcpy(&node->tlv, tlv, sizeof(node->tlv) + len);
75 	list_add_tail(&node->list, list);
76 
77 	return 0;
78 }
79 
80 static bool iwl_dbg_tlv_ver_support(const struct iwl_ucode_tlv *tlv)
81 {
82 	const struct iwl_fw_ini_header *hdr = (const void *)&tlv->data[0];
83 	u32 type = le32_to_cpu(tlv->type);
84 	u32 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE;
85 	u32 ver = le32_to_cpu(hdr->version);
86 
87 	if (ver < dbg_ver_table[tlv_idx].min_ver ||
88 	    ver > dbg_ver_table[tlv_idx].max_ver)
89 		return false;
90 
91 	return true;
92 }
93 
94 static int iwl_dbg_tlv_alloc_debug_info(struct iwl_trans *trans,
95 					const struct iwl_ucode_tlv *tlv)
96 {
97 	const struct iwl_fw_ini_debug_info_tlv *debug_info = (const void *)tlv->data;
98 
99 	if (le32_to_cpu(tlv->length) != sizeof(*debug_info))
100 		return -EINVAL;
101 
102 	IWL_DEBUG_FW(trans, "WRT: Loading debug cfg: %s\n",
103 		     debug_info->debug_cfg_name);
104 
105 	return iwl_dbg_tlv_add(tlv, &trans->dbg.debug_info_tlv_list);
106 }
107 
108 static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans *trans,
109 				       const struct iwl_ucode_tlv *tlv)
110 {
111 	const struct iwl_fw_ini_allocation_tlv *alloc = (const void *)tlv->data;
112 	u32 buf_location;
113 	u32 alloc_id;
114 
115 	if (le32_to_cpu(tlv->length) != sizeof(*alloc))
116 		return -EINVAL;
117 
118 	buf_location = le32_to_cpu(alloc->buf_location);
119 	alloc_id = le32_to_cpu(alloc->alloc_id);
120 
121 	if (buf_location == IWL_FW_INI_LOCATION_INVALID ||
122 	    buf_location >= IWL_FW_INI_LOCATION_NUM)
123 		goto err;
124 
125 	if (alloc_id == IWL_FW_INI_ALLOCATION_INVALID ||
126 	    alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
127 		goto err;
128 
129 	if (buf_location == IWL_FW_INI_LOCATION_NPK_PATH &&
130 	    alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
131 		goto err;
132 
133 	if (buf_location == IWL_FW_INI_LOCATION_SRAM_PATH &&
134 	    alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
135 		goto err;
136 
137 	trans->dbg.fw_mon_cfg[alloc_id] = *alloc;
138 
139 	return 0;
140 err:
141 	IWL_ERR(trans,
142 		"WRT: Invalid allocation id %u and/or location id %u for allocation TLV\n",
143 		alloc_id, buf_location);
144 	return -EINVAL;
145 }
146 
147 static int iwl_dbg_tlv_alloc_hcmd(struct iwl_trans *trans,
148 				  const struct iwl_ucode_tlv *tlv)
149 {
150 	const struct iwl_fw_ini_hcmd_tlv *hcmd = (const void *)tlv->data;
151 	u32 tp = le32_to_cpu(hcmd->time_point);
152 
153 	if (le32_to_cpu(tlv->length) <= sizeof(*hcmd))
154 		return -EINVAL;
155 
156 	/* Host commands can not be sent in early time point since the FW
157 	 * is not ready
158 	 */
159 	if (tp == IWL_FW_INI_TIME_POINT_INVALID ||
160 	    tp >= IWL_FW_INI_TIME_POINT_NUM ||
161 	    tp == IWL_FW_INI_TIME_POINT_EARLY) {
162 		IWL_ERR(trans,
163 			"WRT: Invalid time point %u for host command TLV\n",
164 			tp);
165 		return -EINVAL;
166 	}
167 
168 	return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].hcmd_list);
169 }
170 
171 static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
172 				    const struct iwl_ucode_tlv *tlv)
173 {
174 	const struct iwl_fw_ini_region_tlv *reg = (const void *)tlv->data;
175 	struct iwl_ucode_tlv **active_reg;
176 	u32 id = le32_to_cpu(reg->id);
177 	u32 type = le32_to_cpu(reg->type);
178 	u32 tlv_len = sizeof(*tlv) + le32_to_cpu(tlv->length);
179 
180 	/*
181 	 * The higher part of the ID in version 2 is irrelevant for
182 	 * us, so mask it out.
183 	 */
184 	if (le32_to_cpu(reg->hdr.version) == 2)
185 		id &= IWL_FW_INI_REGION_V2_MASK;
186 
187 	if (le32_to_cpu(tlv->length) < sizeof(*reg))
188 		return -EINVAL;
189 
190 	/* for safe use of a string from FW, limit it to IWL_FW_INI_MAX_NAME */
191 	IWL_DEBUG_FW(trans, "WRT: parsing region: %.*s\n",
192 		     IWL_FW_INI_MAX_NAME, reg->name);
193 
194 	if (id >= IWL_FW_INI_MAX_REGION_ID) {
195 		IWL_ERR(trans, "WRT: Invalid region id %u\n", id);
196 		return -EINVAL;
197 	}
198 
199 	if (type <= IWL_FW_INI_REGION_INVALID ||
200 	    type >= IWL_FW_INI_REGION_NUM) {
201 		IWL_ERR(trans, "WRT: Invalid region type %u\n", type);
202 		return -EINVAL;
203 	}
204 
205 	if (type == IWL_FW_INI_REGION_PCI_IOSF_CONFIG &&
206 	    !trans->ops->read_config32) {
207 		IWL_ERR(trans, "WRT: Unsupported region type %u\n", type);
208 		return -EOPNOTSUPP;
209 	}
210 
211 	active_reg = &trans->dbg.active_regions[id];
212 	if (*active_reg) {
213 		IWL_WARN(trans, "WRT: Overriding region id %u\n", id);
214 
215 		kfree(*active_reg);
216 	}
217 
218 	*active_reg = kmemdup(tlv, tlv_len, GFP_KERNEL);
219 	if (!*active_reg)
220 		return -ENOMEM;
221 
222 	IWL_DEBUG_FW(trans, "WRT: Enabling region id %u type %u\n", id, type);
223 
224 	return 0;
225 }
226 
227 static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
228 				     const struct iwl_ucode_tlv *tlv)
229 {
230 	const struct iwl_fw_ini_trigger_tlv *trig = (const void *)tlv->data;
231 	struct iwl_fw_ini_trigger_tlv *dup_trig;
232 	u32 tp = le32_to_cpu(trig->time_point);
233 	struct iwl_ucode_tlv *dup = NULL;
234 	int ret;
235 
236 	if (le32_to_cpu(tlv->length) < sizeof(*trig))
237 		return -EINVAL;
238 
239 	if (tp <= IWL_FW_INI_TIME_POINT_INVALID ||
240 	    tp >= IWL_FW_INI_TIME_POINT_NUM) {
241 		IWL_ERR(trans,
242 			"WRT: Invalid time point %u for trigger TLV\n",
243 			tp);
244 		return -EINVAL;
245 	}
246 
247 	if (!le32_to_cpu(trig->occurrences)) {
248 		dup = kmemdup(tlv, sizeof(*tlv) + le32_to_cpu(tlv->length),
249 				GFP_KERNEL);
250 		if (!dup)
251 			return -ENOMEM;
252 		dup_trig = (void *)dup->data;
253 		dup_trig->occurrences = cpu_to_le32(-1);
254 		tlv = dup;
255 	}
256 
257 	ret = iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list);
258 	kfree(dup);
259 
260 	return ret;
261 }
262 
263 static int (*dbg_tlv_alloc[])(struct iwl_trans *trans,
264 			      const struct iwl_ucode_tlv *tlv) = {
265 	[IWL_DBG_TLV_TYPE_DEBUG_INFO]	= iwl_dbg_tlv_alloc_debug_info,
266 	[IWL_DBG_TLV_TYPE_BUF_ALLOC]	= iwl_dbg_tlv_alloc_buf_alloc,
267 	[IWL_DBG_TLV_TYPE_HCMD]		= iwl_dbg_tlv_alloc_hcmd,
268 	[IWL_DBG_TLV_TYPE_REGION]	= iwl_dbg_tlv_alloc_region,
269 	[IWL_DBG_TLV_TYPE_TRIGGER]	= iwl_dbg_tlv_alloc_trigger,
270 };
271 
272 void iwl_dbg_tlv_alloc(struct iwl_trans *trans, const struct iwl_ucode_tlv *tlv,
273 		       bool ext)
274 {
275 	const struct iwl_fw_ini_header *hdr = (const void *)&tlv->data[0];
276 	u32 type = le32_to_cpu(tlv->type);
277 	u32 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE;
278 	u32 domain = le32_to_cpu(hdr->domain);
279 	enum iwl_ini_cfg_state *cfg_state = ext ?
280 		&trans->dbg.external_ini_cfg : &trans->dbg.internal_ini_cfg;
281 	int ret;
282 
283 	if (domain != IWL_FW_INI_DOMAIN_ALWAYS_ON &&
284 	    !(domain & trans->dbg.domains_bitmap)) {
285 		IWL_DEBUG_FW(trans,
286 			     "WRT: Skipping TLV with disabled domain 0x%0x (0x%0x)\n",
287 			     domain, trans->dbg.domains_bitmap);
288 		return;
289 	}
290 
291 	if (tlv_idx >= ARRAY_SIZE(dbg_tlv_alloc) || !dbg_tlv_alloc[tlv_idx]) {
292 		IWL_ERR(trans, "WRT: Unsupported TLV type 0x%x\n", type);
293 		goto out_err;
294 	}
295 
296 	if (!iwl_dbg_tlv_ver_support(tlv)) {
297 		IWL_ERR(trans, "WRT: Unsupported TLV 0x%x version %u\n", type,
298 			le32_to_cpu(hdr->version));
299 		goto out_err;
300 	}
301 
302 	ret = dbg_tlv_alloc[tlv_idx](trans, tlv);
303 	if (ret) {
304 		IWL_ERR(trans,
305 			"WRT: Failed to allocate TLV 0x%x, ret %d, (ext=%d)\n",
306 			type, ret, ext);
307 		goto out_err;
308 	}
309 
310 	if (*cfg_state == IWL_INI_CFG_STATE_NOT_LOADED)
311 		*cfg_state = IWL_INI_CFG_STATE_LOADED;
312 
313 	return;
314 
315 out_err:
316 	*cfg_state = IWL_INI_CFG_STATE_CORRUPTED;
317 }
318 
319 void iwl_dbg_tlv_del_timers(struct iwl_trans *trans)
320 {
321 	struct list_head *timer_list = &trans->dbg.periodic_trig_list;
322 	struct iwl_dbg_tlv_timer_node *node, *tmp;
323 
324 	list_for_each_entry_safe(node, tmp, timer_list, list) {
325 		del_timer(&node->timer);
326 		list_del(&node->list);
327 		kfree(node);
328 	}
329 }
330 IWL_EXPORT_SYMBOL(iwl_dbg_tlv_del_timers);
331 
332 static void iwl_dbg_tlv_fragments_free(struct iwl_trans *trans,
333 				       enum iwl_fw_ini_allocation_id alloc_id)
334 {
335 	struct iwl_fw_mon *fw_mon;
336 	int i;
337 
338 	if (alloc_id <= IWL_FW_INI_ALLOCATION_INVALID ||
339 	    alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
340 		return;
341 
342 	fw_mon = &trans->dbg.fw_mon_ini[alloc_id];
343 
344 	for (i = 0; i < fw_mon->num_frags; i++) {
345 		struct iwl_dram_data *frag = &fw_mon->frags[i];
346 
347 		dma_free_coherent(trans->dev, frag->size, frag->block,
348 				  frag->physical);
349 
350 		frag->physical = 0;
351 		frag->block = NULL;
352 		frag->size = 0;
353 	}
354 
355 	kfree(fw_mon->frags);
356 	fw_mon->frags = NULL;
357 	fw_mon->num_frags = 0;
358 }
359 
360 void iwl_dbg_tlv_free(struct iwl_trans *trans)
361 {
362 	struct iwl_dbg_tlv_node *tlv_node, *tlv_node_tmp;
363 	int i;
364 
365 	iwl_dbg_tlv_del_timers(trans);
366 
367 	for (i = 0; i < ARRAY_SIZE(trans->dbg.active_regions); i++) {
368 		struct iwl_ucode_tlv **active_reg =
369 			&trans->dbg.active_regions[i];
370 
371 		kfree(*active_reg);
372 		*active_reg = NULL;
373 	}
374 
375 	list_for_each_entry_safe(tlv_node, tlv_node_tmp,
376 				 &trans->dbg.debug_info_tlv_list, list) {
377 		list_del(&tlv_node->list);
378 		kfree(tlv_node);
379 	}
380 
381 	for (i = 0; i < ARRAY_SIZE(trans->dbg.time_point); i++) {
382 		struct iwl_dbg_tlv_time_point_data *tp =
383 			&trans->dbg.time_point[i];
384 
385 		list_for_each_entry_safe(tlv_node, tlv_node_tmp, &tp->trig_list,
386 					 list) {
387 			list_del(&tlv_node->list);
388 			kfree(tlv_node);
389 		}
390 
391 		list_for_each_entry_safe(tlv_node, tlv_node_tmp, &tp->hcmd_list,
392 					 list) {
393 			list_del(&tlv_node->list);
394 			kfree(tlv_node);
395 		}
396 
397 		list_for_each_entry_safe(tlv_node, tlv_node_tmp,
398 					 &tp->active_trig_list, list) {
399 			list_del(&tlv_node->list);
400 			kfree(tlv_node);
401 		}
402 	}
403 
404 	for (i = 0; i < ARRAY_SIZE(trans->dbg.fw_mon_ini); i++)
405 		iwl_dbg_tlv_fragments_free(trans, i);
406 }
407 
408 static int iwl_dbg_tlv_parse_bin(struct iwl_trans *trans, const u8 *data,
409 				 size_t len)
410 {
411 	const struct iwl_ucode_tlv *tlv;
412 	u32 tlv_len;
413 
414 	while (len >= sizeof(*tlv)) {
415 		len -= sizeof(*tlv);
416 		tlv = (void *)data;
417 
418 		tlv_len = le32_to_cpu(tlv->length);
419 
420 		if (len < tlv_len) {
421 			IWL_ERR(trans, "invalid TLV len: %zd/%u\n",
422 				len, tlv_len);
423 			return -EINVAL;
424 		}
425 		len -= ALIGN(tlv_len, 4);
426 		data += sizeof(*tlv) + ALIGN(tlv_len, 4);
427 
428 		iwl_dbg_tlv_alloc(trans, tlv, true);
429 	}
430 
431 	return 0;
432 }
433 
434 void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans)
435 {
436 	const struct firmware *fw;
437 	const char *yoyo_bin = "iwl-debug-yoyo.bin";
438 	int res;
439 
440 	if (!iwlwifi_mod_params.enable_ini ||
441 	    trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_9000)
442 		return;
443 
444 	res = firmware_request_nowarn(&fw, yoyo_bin, dev);
445 	IWL_DEBUG_FW(trans, "%s %s\n", res ? "didn't load" : "loaded", yoyo_bin);
446 
447 	if (res)
448 		return;
449 
450 	iwl_dbg_tlv_parse_bin(trans, fw->data, fw->size);
451 
452 	release_firmware(fw);
453 }
454 
455 void iwl_dbg_tlv_init(struct iwl_trans *trans)
456 {
457 	int i;
458 
459 	INIT_LIST_HEAD(&trans->dbg.debug_info_tlv_list);
460 	INIT_LIST_HEAD(&trans->dbg.periodic_trig_list);
461 
462 	for (i = 0; i < ARRAY_SIZE(trans->dbg.time_point); i++) {
463 		struct iwl_dbg_tlv_time_point_data *tp =
464 			&trans->dbg.time_point[i];
465 
466 		INIT_LIST_HEAD(&tp->trig_list);
467 		INIT_LIST_HEAD(&tp->hcmd_list);
468 		INIT_LIST_HEAD(&tp->active_trig_list);
469 	}
470 }
471 
472 static int iwl_dbg_tlv_alloc_fragment(struct iwl_fw_runtime *fwrt,
473 				      struct iwl_dram_data *frag, u32 pages)
474 {
475 	void *block = NULL;
476 	dma_addr_t physical;
477 
478 	if (!frag || frag->size || !pages)
479 		return -EIO;
480 
481 	/*
482 	 * We try to allocate as many pages as we can, starting with
483 	 * the requested amount and going down until we can allocate
484 	 * something.  Because of DIV_ROUND_UP(), pages will never go
485 	 * down to 0 and stop the loop, so stop when pages reaches 1,
486 	 * which is too small anyway.
487 	 */
488 	while (pages > 1) {
489 		block = dma_alloc_coherent(fwrt->dev, pages * PAGE_SIZE,
490 					   &physical,
491 					   GFP_KERNEL | __GFP_NOWARN);
492 		if (block)
493 			break;
494 
495 		IWL_WARN(fwrt, "WRT: Failed to allocate fragment size %lu\n",
496 			 pages * PAGE_SIZE);
497 
498 		pages = DIV_ROUND_UP(pages, 2);
499 	}
500 
501 	if (!block)
502 		return -ENOMEM;
503 
504 	frag->physical = physical;
505 	frag->block = block;
506 	frag->size = pages * PAGE_SIZE;
507 
508 	return pages;
509 }
510 
511 static int iwl_dbg_tlv_alloc_fragments(struct iwl_fw_runtime *fwrt,
512 				       enum iwl_fw_ini_allocation_id alloc_id)
513 {
514 	struct iwl_fw_mon *fw_mon;
515 	struct iwl_fw_ini_allocation_tlv *fw_mon_cfg;
516 	u32 num_frags, remain_pages, frag_pages;
517 	int i;
518 
519 	if (alloc_id < IWL_FW_INI_ALLOCATION_INVALID ||
520 	    alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
521 		return -EIO;
522 
523 	fw_mon_cfg = &fwrt->trans->dbg.fw_mon_cfg[alloc_id];
524 	fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id];
525 
526 	if (fw_mon->num_frags ||
527 	    fw_mon_cfg->buf_location !=
528 	    cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH))
529 		return 0;
530 
531 	num_frags = le32_to_cpu(fw_mon_cfg->max_frags_num);
532 	if (!fw_has_capa(&fwrt->fw->ucode_capa,
533 			 IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP)) {
534 		if (alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
535 			return -EIO;
536 		num_frags = 1;
537 	}
538 
539 	remain_pages = DIV_ROUND_UP(le32_to_cpu(fw_mon_cfg->req_size),
540 				    PAGE_SIZE);
541 	num_frags = min_t(u32, num_frags, BUF_ALLOC_MAX_NUM_FRAGS);
542 	num_frags = min_t(u32, num_frags, remain_pages);
543 	frag_pages = DIV_ROUND_UP(remain_pages, num_frags);
544 
545 	fw_mon->frags = kcalloc(num_frags, sizeof(*fw_mon->frags), GFP_KERNEL);
546 	if (!fw_mon->frags)
547 		return -ENOMEM;
548 
549 	for (i = 0; i < num_frags; i++) {
550 		int pages = min_t(u32, frag_pages, remain_pages);
551 
552 		IWL_DEBUG_FW(fwrt,
553 			     "WRT: Allocating DRAM buffer (alloc_id=%u, fragment=%u, size=0x%lx)\n",
554 			     alloc_id, i, pages * PAGE_SIZE);
555 
556 		pages = iwl_dbg_tlv_alloc_fragment(fwrt, &fw_mon->frags[i],
557 						   pages);
558 		if (pages < 0) {
559 			u32 alloc_size = le32_to_cpu(fw_mon_cfg->req_size) -
560 				(remain_pages * PAGE_SIZE);
561 
562 			if (alloc_size < le32_to_cpu(fw_mon_cfg->min_size)) {
563 				iwl_dbg_tlv_fragments_free(fwrt->trans,
564 							   alloc_id);
565 				return pages;
566 			}
567 			break;
568 		}
569 
570 		remain_pages -= pages;
571 		fw_mon->num_frags++;
572 	}
573 
574 	return 0;
575 }
576 
577 static int iwl_dbg_tlv_apply_buffer(struct iwl_fw_runtime *fwrt,
578 				    enum iwl_fw_ini_allocation_id alloc_id)
579 {
580 	struct iwl_fw_mon *fw_mon;
581 	u32 remain_frags, num_commands;
582 	int i, fw_mon_idx = 0;
583 
584 	if (!fw_has_capa(&fwrt->fw->ucode_capa,
585 			 IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP))
586 		return 0;
587 
588 	if (alloc_id < IWL_FW_INI_ALLOCATION_INVALID ||
589 	    alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
590 		return -EIO;
591 
592 	if (le32_to_cpu(fwrt->trans->dbg.fw_mon_cfg[alloc_id].buf_location) !=
593 	    IWL_FW_INI_LOCATION_DRAM_PATH)
594 		return 0;
595 
596 	fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id];
597 
598 	/* the first fragment of DBGC1 is given to the FW via register
599 	 * or context info
600 	 */
601 	if (alloc_id == IWL_FW_INI_ALLOCATION_ID_DBGC1)
602 		fw_mon_idx++;
603 
604 	remain_frags = fw_mon->num_frags - fw_mon_idx;
605 	if (!remain_frags)
606 		return 0;
607 
608 	num_commands = DIV_ROUND_UP(remain_frags, BUF_ALLOC_MAX_NUM_FRAGS);
609 
610 	IWL_DEBUG_FW(fwrt, "WRT: Applying DRAM destination (alloc_id=%u)\n",
611 		     alloc_id);
612 
613 	for (i = 0; i < num_commands; i++) {
614 		u32 num_frags = min_t(u32, remain_frags,
615 				      BUF_ALLOC_MAX_NUM_FRAGS);
616 		struct iwl_buf_alloc_cmd data = {
617 			.alloc_id = cpu_to_le32(alloc_id),
618 			.num_frags = cpu_to_le32(num_frags),
619 			.buf_location =
620 				cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH),
621 		};
622 		struct iwl_host_cmd hcmd = {
623 			.id = WIDE_ID(DEBUG_GROUP, BUFFER_ALLOCATION),
624 			.data[0] = &data,
625 			.len[0] = sizeof(data),
626 			.flags = CMD_SEND_IN_RFKILL,
627 		};
628 		int ret, j;
629 
630 		for (j = 0; j < num_frags; j++) {
631 			struct iwl_buf_alloc_frag *frag = &data.frags[j];
632 			struct iwl_dram_data *fw_mon_frag =
633 				&fw_mon->frags[fw_mon_idx++];
634 
635 			frag->addr = cpu_to_le64(fw_mon_frag->physical);
636 			frag->size = cpu_to_le32(fw_mon_frag->size);
637 		}
638 		ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
639 		if (ret)
640 			return ret;
641 
642 		remain_frags -= num_frags;
643 	}
644 
645 	return 0;
646 }
647 
648 static void iwl_dbg_tlv_apply_buffers(struct iwl_fw_runtime *fwrt)
649 {
650 	int ret, i;
651 
652 	for (i = 0; i < IWL_FW_INI_ALLOCATION_NUM; i++) {
653 		ret = iwl_dbg_tlv_apply_buffer(fwrt, i);
654 		if (ret)
655 			IWL_WARN(fwrt,
656 				 "WRT: Failed to apply DRAM buffer for allocation id %d, ret=%d\n",
657 				 i, ret);
658 	}
659 }
660 
661 static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt,
662 				   struct list_head *hcmd_list)
663 {
664 	struct iwl_dbg_tlv_node *node;
665 
666 	list_for_each_entry(node, hcmd_list, list) {
667 		struct iwl_fw_ini_hcmd_tlv *hcmd = (void *)node->tlv.data;
668 		struct iwl_fw_ini_hcmd *hcmd_data = &hcmd->hcmd;
669 		u16 hcmd_len = le32_to_cpu(node->tlv.length) - sizeof(*hcmd);
670 		struct iwl_host_cmd cmd = {
671 			.id = WIDE_ID(hcmd_data->group, hcmd_data->id),
672 			.len = { hcmd_len, },
673 			.data = { hcmd_data->data, },
674 		};
675 
676 		iwl_trans_send_cmd(fwrt->trans, &cmd);
677 	}
678 }
679 
680 static void iwl_dbg_tlv_periodic_trig_handler(struct timer_list *t)
681 {
682 	struct iwl_dbg_tlv_timer_node *timer_node =
683 		from_timer(timer_node, t, timer);
684 	struct iwl_fwrt_dump_data dump_data = {
685 		.trig = (void *)timer_node->tlv->data,
686 	};
687 	int ret;
688 
689 	ret = iwl_fw_dbg_ini_collect(timer_node->fwrt, &dump_data, false);
690 	if (!ret || ret == -EBUSY) {
691 		u32 occur = le32_to_cpu(dump_data.trig->occurrences);
692 		u32 collect_interval = le32_to_cpu(dump_data.trig->data[0]);
693 
694 		if (!occur)
695 			return;
696 
697 		mod_timer(t, jiffies + msecs_to_jiffies(collect_interval));
698 	}
699 }
700 
701 static void iwl_dbg_tlv_set_periodic_trigs(struct iwl_fw_runtime *fwrt)
702 {
703 	struct iwl_dbg_tlv_node *node;
704 	struct list_head *trig_list =
705 		&fwrt->trans->dbg.time_point[IWL_FW_INI_TIME_POINT_PERIODIC].active_trig_list;
706 
707 	list_for_each_entry(node, trig_list, list) {
708 		struct iwl_fw_ini_trigger_tlv *trig = (void *)node->tlv.data;
709 		struct iwl_dbg_tlv_timer_node *timer_node;
710 		u32 occur = le32_to_cpu(trig->occurrences), collect_interval;
711 		u32 min_interval = 100;
712 
713 		if (!occur)
714 			continue;
715 
716 		/* make sure there is at least one dword of data for the
717 		 * interval value
718 		 */
719 		if (le32_to_cpu(node->tlv.length) <
720 		    sizeof(*trig) + sizeof(__le32)) {
721 			IWL_ERR(fwrt,
722 				"WRT: Invalid periodic trigger data was not given\n");
723 			continue;
724 		}
725 
726 		if (le32_to_cpu(trig->data[0]) < min_interval) {
727 			IWL_WARN(fwrt,
728 				 "WRT: Override min interval from %u to %u msec\n",
729 				 le32_to_cpu(trig->data[0]), min_interval);
730 			trig->data[0] = cpu_to_le32(min_interval);
731 		}
732 
733 		collect_interval = le32_to_cpu(trig->data[0]);
734 
735 		timer_node = kzalloc(sizeof(*timer_node), GFP_KERNEL);
736 		if (!timer_node) {
737 			IWL_ERR(fwrt,
738 				"WRT: Failed to allocate periodic trigger\n");
739 			continue;
740 		}
741 
742 		timer_node->fwrt = fwrt;
743 		timer_node->tlv = &node->tlv;
744 		timer_setup(&timer_node->timer,
745 			    iwl_dbg_tlv_periodic_trig_handler, 0);
746 
747 		list_add_tail(&timer_node->list,
748 			      &fwrt->trans->dbg.periodic_trig_list);
749 
750 		IWL_DEBUG_FW(fwrt, "WRT: Enabling periodic trigger\n");
751 
752 		mod_timer(&timer_node->timer,
753 			  jiffies + msecs_to_jiffies(collect_interval));
754 	}
755 }
756 
757 static bool is_trig_data_contained(const struct iwl_ucode_tlv *new,
758 				   const struct iwl_ucode_tlv *old)
759 {
760 	const struct iwl_fw_ini_trigger_tlv *new_trig = (const void *)new->data;
761 	const struct iwl_fw_ini_trigger_tlv *old_trig = (const void *)old->data;
762 	const __le32 *new_data = new_trig->data, *old_data = old_trig->data;
763 	u32 new_dwords_num = iwl_tlv_array_len(new, new_trig, data);
764 	u32 old_dwords_num = iwl_tlv_array_len(old, old_trig, data);
765 	int i, j;
766 
767 	for (i = 0; i < new_dwords_num; i++) {
768 		bool match = false;
769 
770 		for (j = 0; j < old_dwords_num; j++) {
771 			if (new_data[i] == old_data[j]) {
772 				match = true;
773 				break;
774 			}
775 		}
776 		if (!match)
777 			return false;
778 	}
779 
780 	return true;
781 }
782 
783 static int iwl_dbg_tlv_override_trig_node(struct iwl_fw_runtime *fwrt,
784 					  struct iwl_ucode_tlv *trig_tlv,
785 					  struct iwl_dbg_tlv_node *node)
786 {
787 	struct iwl_ucode_tlv *node_tlv = &node->tlv;
788 	struct iwl_fw_ini_trigger_tlv *node_trig = (void *)node_tlv->data;
789 	struct iwl_fw_ini_trigger_tlv *trig = (void *)trig_tlv->data;
790 	u32 policy = le32_to_cpu(trig->apply_policy);
791 	u32 size = le32_to_cpu(trig_tlv->length);
792 	u32 trig_data_len = size - sizeof(*trig);
793 	u32 offset = 0;
794 
795 	if (!(policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA)) {
796 		u32 data_len = le32_to_cpu(node_tlv->length) -
797 			sizeof(*node_trig);
798 
799 		IWL_DEBUG_FW(fwrt,
800 			     "WRT: Appending trigger data (time point %u)\n",
801 			     le32_to_cpu(trig->time_point));
802 
803 		offset += data_len;
804 		size += data_len;
805 	} else {
806 		IWL_DEBUG_FW(fwrt,
807 			     "WRT: Overriding trigger data (time point %u)\n",
808 			     le32_to_cpu(trig->time_point));
809 	}
810 
811 	if (size != le32_to_cpu(node_tlv->length)) {
812 		struct list_head *prev = node->list.prev;
813 		struct iwl_dbg_tlv_node *tmp;
814 
815 		list_del(&node->list);
816 
817 		tmp = krealloc(node, sizeof(*node) + size, GFP_KERNEL);
818 		if (!tmp) {
819 			IWL_WARN(fwrt,
820 				 "WRT: No memory to override trigger (time point %u)\n",
821 				 le32_to_cpu(trig->time_point));
822 
823 			list_add(&node->list, prev);
824 
825 			return -ENOMEM;
826 		}
827 
828 		list_add(&tmp->list, prev);
829 		node_tlv = &tmp->tlv;
830 		node_trig = (void *)node_tlv->data;
831 	}
832 
833 	memcpy(node_trig->data + offset, trig->data, trig_data_len);
834 	node_tlv->length = cpu_to_le32(size);
835 
836 	if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG) {
837 		IWL_DEBUG_FW(fwrt,
838 			     "WRT: Overriding trigger configuration (time point %u)\n",
839 			     le32_to_cpu(trig->time_point));
840 
841 		/* the first 11 dwords are configuration related */
842 		memcpy(node_trig, trig, sizeof(__le32) * 11);
843 	}
844 
845 	if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_REGIONS) {
846 		IWL_DEBUG_FW(fwrt,
847 			     "WRT: Overriding trigger regions (time point %u)\n",
848 			     le32_to_cpu(trig->time_point));
849 
850 		node_trig->regions_mask = trig->regions_mask;
851 	} else {
852 		IWL_DEBUG_FW(fwrt,
853 			     "WRT: Appending trigger regions (time point %u)\n",
854 			     le32_to_cpu(trig->time_point));
855 
856 		node_trig->regions_mask |= trig->regions_mask;
857 	}
858 
859 	return 0;
860 }
861 
862 static int
863 iwl_dbg_tlv_add_active_trigger(struct iwl_fw_runtime *fwrt,
864 			       struct list_head *trig_list,
865 			       struct iwl_ucode_tlv *trig_tlv)
866 {
867 	struct iwl_fw_ini_trigger_tlv *trig = (void *)trig_tlv->data;
868 	struct iwl_dbg_tlv_node *node, *match = NULL;
869 	u32 policy = le32_to_cpu(trig->apply_policy);
870 
871 	list_for_each_entry(node, trig_list, list) {
872 		if (!(policy & IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT))
873 			break;
874 
875 		if (!(policy & IWL_FW_INI_APPLY_POLICY_MATCH_DATA) ||
876 		    is_trig_data_contained(trig_tlv, &node->tlv)) {
877 			match = node;
878 			break;
879 		}
880 	}
881 
882 	if (!match) {
883 		IWL_DEBUG_FW(fwrt, "WRT: Enabling trigger (time point %u)\n",
884 			     le32_to_cpu(trig->time_point));
885 		return iwl_dbg_tlv_add(trig_tlv, trig_list);
886 	}
887 
888 	return iwl_dbg_tlv_override_trig_node(fwrt, trig_tlv, match);
889 }
890 
891 static void
892 iwl_dbg_tlv_gen_active_trig_list(struct iwl_fw_runtime *fwrt,
893 				 struct iwl_dbg_tlv_time_point_data *tp)
894 {
895 	struct iwl_dbg_tlv_node *node;
896 	struct list_head *trig_list = &tp->trig_list;
897 	struct list_head *active_trig_list = &tp->active_trig_list;
898 
899 	list_for_each_entry(node, trig_list, list) {
900 		struct iwl_ucode_tlv *tlv = &node->tlv;
901 
902 		iwl_dbg_tlv_add_active_trigger(fwrt, active_trig_list, tlv);
903 	}
904 }
905 
906 static bool iwl_dbg_tlv_check_fw_pkt(struct iwl_fw_runtime *fwrt,
907 				     struct iwl_fwrt_dump_data *dump_data,
908 				     union iwl_dbg_tlv_tp_data *tp_data,
909 				     u32 trig_data)
910 {
911 	struct iwl_rx_packet *pkt = tp_data->fw_pkt;
912 	struct iwl_cmd_header *wanted_hdr = (void *)&trig_data;
913 
914 	if (pkt && (pkt->hdr.cmd == wanted_hdr->cmd &&
915 		    pkt->hdr.group_id == wanted_hdr->group_id)) {
916 		struct iwl_rx_packet *fw_pkt =
917 			kmemdup(pkt,
918 				sizeof(*pkt) + iwl_rx_packet_payload_len(pkt),
919 				GFP_ATOMIC);
920 
921 		if (!fw_pkt)
922 			return false;
923 
924 		dump_data->fw_pkt = fw_pkt;
925 
926 		return true;
927 	}
928 
929 	return false;
930 }
931 
932 static int
933 iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync,
934 		       struct list_head *active_trig_list,
935 		       union iwl_dbg_tlv_tp_data *tp_data,
936 		       bool (*data_check)(struct iwl_fw_runtime *fwrt,
937 					  struct iwl_fwrt_dump_data *dump_data,
938 					  union iwl_dbg_tlv_tp_data *tp_data,
939 					  u32 trig_data))
940 {
941 	struct iwl_dbg_tlv_node *node;
942 
943 	list_for_each_entry(node, active_trig_list, list) {
944 		struct iwl_fwrt_dump_data dump_data = {
945 			.trig = (void *)node->tlv.data,
946 		};
947 		u32 num_data = iwl_tlv_array_len(&node->tlv, dump_data.trig,
948 						 data);
949 		int ret, i;
950 
951 		if (!num_data) {
952 			ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data, sync);
953 			if (ret)
954 				return ret;
955 		}
956 
957 		for (i = 0; i < num_data; i++) {
958 			if (!data_check ||
959 			    data_check(fwrt, &dump_data, tp_data,
960 				       le32_to_cpu(dump_data.trig->data[i]))) {
961 				ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data, sync);
962 				if (ret)
963 					return ret;
964 
965 				break;
966 			}
967 		}
968 	}
969 
970 	return 0;
971 }
972 
973 static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt)
974 {
975 	enum iwl_fw_ini_buffer_location *ini_dest = &fwrt->trans->dbg.ini_dest;
976 	int ret, i;
977 	u32 failed_alloc = 0;
978 
979 	if (*ini_dest != IWL_FW_INI_LOCATION_INVALID)
980 		return;
981 
982 	IWL_DEBUG_FW(fwrt,
983 		     "WRT: Generating active triggers list, domain 0x%x\n",
984 		     fwrt->trans->dbg.domains_bitmap);
985 
986 	for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.time_point); i++) {
987 		struct iwl_dbg_tlv_time_point_data *tp =
988 			&fwrt->trans->dbg.time_point[i];
989 
990 		iwl_dbg_tlv_gen_active_trig_list(fwrt, tp);
991 	}
992 
993 	*ini_dest = IWL_FW_INI_LOCATION_INVALID;
994 	for (i = 0; i < IWL_FW_INI_ALLOCATION_NUM; i++) {
995 		struct iwl_fw_ini_allocation_tlv *fw_mon_cfg =
996 			&fwrt->trans->dbg.fw_mon_cfg[i];
997 		u32 dest = le32_to_cpu(fw_mon_cfg->buf_location);
998 
999 		if (dest == IWL_FW_INI_LOCATION_INVALID)
1000 			continue;
1001 
1002 		if (*ini_dest == IWL_FW_INI_LOCATION_INVALID)
1003 			*ini_dest = dest;
1004 
1005 		if (dest != *ini_dest)
1006 			continue;
1007 
1008 		ret = iwl_dbg_tlv_alloc_fragments(fwrt, i);
1009 
1010 		if (ret) {
1011 			IWL_WARN(fwrt,
1012 				 "WRT: Failed to allocate DRAM buffer for allocation id %d, ret=%d\n",
1013 				 i, ret);
1014 			failed_alloc |= BIT(i);
1015 		}
1016 	}
1017 
1018 	if (!failed_alloc)
1019 		return;
1020 
1021 	for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.active_regions) && failed_alloc; i++) {
1022 		struct iwl_fw_ini_region_tlv *reg;
1023 		struct iwl_ucode_tlv **active_reg =
1024 			&fwrt->trans->dbg.active_regions[i];
1025 		u32 reg_type;
1026 
1027 		if (!*active_reg)
1028 			continue;
1029 
1030 		reg = (void *)(*active_reg)->data;
1031 		reg_type = le32_to_cpu(reg->type);
1032 
1033 		if (reg_type != IWL_FW_INI_REGION_DRAM_BUFFER ||
1034 		    !(BIT(le32_to_cpu(reg->dram_alloc_id)) & failed_alloc))
1035 			continue;
1036 
1037 		IWL_DEBUG_FW(fwrt,
1038 			     "WRT: removing allocation id %d from region id %d\n",
1039 			     le32_to_cpu(reg->dram_alloc_id), i);
1040 
1041 		failed_alloc &= ~le32_to_cpu(reg->dram_alloc_id);
1042 		fwrt->trans->dbg.unsupported_region_msk |= BIT(i);
1043 
1044 		kfree(*active_reg);
1045 		*active_reg = NULL;
1046 	}
1047 }
1048 
1049 void _iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
1050 			     enum iwl_fw_ini_time_point tp_id,
1051 			     union iwl_dbg_tlv_tp_data *tp_data,
1052 			     bool sync)
1053 {
1054 	struct list_head *hcmd_list, *trig_list;
1055 
1056 	if (!iwl_trans_dbg_ini_valid(fwrt->trans) ||
1057 	    tp_id == IWL_FW_INI_TIME_POINT_INVALID ||
1058 	    tp_id >= IWL_FW_INI_TIME_POINT_NUM)
1059 		return;
1060 
1061 	hcmd_list = &fwrt->trans->dbg.time_point[tp_id].hcmd_list;
1062 	trig_list = &fwrt->trans->dbg.time_point[tp_id].active_trig_list;
1063 
1064 	switch (tp_id) {
1065 	case IWL_FW_INI_TIME_POINT_EARLY:
1066 		iwl_dbg_tlv_init_cfg(fwrt);
1067 		iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL);
1068 		break;
1069 	case IWL_FW_INI_TIME_POINT_AFTER_ALIVE:
1070 		iwl_dbg_tlv_apply_buffers(fwrt);
1071 		iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
1072 		iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL);
1073 		break;
1074 	case IWL_FW_INI_TIME_POINT_PERIODIC:
1075 		iwl_dbg_tlv_set_periodic_trigs(fwrt);
1076 		iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
1077 		break;
1078 	case IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF:
1079 	case IWL_FW_INI_TIME_POINT_MISSED_BEACONS:
1080 	case IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFICATION:
1081 		iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
1082 		iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data,
1083 				       iwl_dbg_tlv_check_fw_pkt);
1084 		break;
1085 	default:
1086 		iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
1087 		iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL);
1088 		break;
1089 	}
1090 }
1091 IWL_EXPORT_SYMBOL(_iwl_dbg_tlv_time_point);
1092