xref: /openbmc/linux/drivers/net/wireless/intel/iwlwifi/fw/dbg.c (revision 05cf4fe738242183f1237f1b3a28b4479348c0a1)
1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
9  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10  * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
11  * Copyright(c) 2018        Intel Corporation
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of version 2 of the GNU General Public License as
15  * published by the Free Software Foundation.
16  *
17  * This program is distributed in the hope that it will be useful, but
18  * WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  * General Public License for more details.
21  *
22  * The full GNU General Public License is included in this distribution
23  * in the file called COPYING.
24  *
25  * Contact Information:
26  *  Intel Linux Wireless <linuxwifi@intel.com>
27  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28  *
29  * BSD LICENSE
30  *
31  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
32  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33  * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
34  * Copyright(c) 2018        Intel Corporation
35  * All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  *
41  *  * Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  *  * Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in
45  *    the documentation and/or other materials provided with the
46  *    distribution.
47  *  * Neither the name Intel Corporation nor the names of its
48  *    contributors may be used to endorse or promote products derived
49  *    from this software without specific prior written permission.
50  *
51  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62  *
63  *****************************************************************************/
64 #include <linux/devcoredump.h>
65 #include "iwl-drv.h"
66 #include "runtime.h"
67 #include "dbg.h"
68 #include "debugfs.h"
69 #include "iwl-io.h"
70 #include "iwl-prph.h"
71 #include "iwl-csr.h"
72 
73 /**
74  * struct iwl_fw_dump_ptrs - set of pointers needed for the fw-error-dump
75  *
76  * @fwrt_ptr: pointer to the buffer coming from fwrt
77  * @trans_ptr: pointer to struct %iwl_trans_dump_data which contains the
78  *	transport's data.
79  * @trans_len: length of the valid data in trans_ptr
80  * @fwrt_len: length of the valid data in fwrt_ptr
81  */
82 struct iwl_fw_dump_ptrs {
83 	struct iwl_trans_dump_data *trans_ptr;
84 	void *fwrt_ptr;
85 	u32 fwrt_len;
86 };
87 
88 #define RADIO_REG_MAX_READ 0x2ad
89 static void iwl_read_radio_regs(struct iwl_fw_runtime *fwrt,
90 				struct iwl_fw_error_dump_data **dump_data)
91 {
92 	u8 *pos = (void *)(*dump_data)->data;
93 	unsigned long flags;
94 	int i;
95 
96 	IWL_DEBUG_INFO(fwrt, "WRT radio registers dump\n");
97 
98 	if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
99 		return;
100 
101 	(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RADIO_REG);
102 	(*dump_data)->len = cpu_to_le32(RADIO_REG_MAX_READ);
103 
104 	for (i = 0; i < RADIO_REG_MAX_READ; i++) {
105 		u32 rd_cmd = RADIO_RSP_RD_CMD;
106 
107 		rd_cmd |= i << RADIO_RSP_ADDR_POS;
108 		iwl_write_prph_no_grab(fwrt->trans, RSP_RADIO_CMD, rd_cmd);
109 		*pos = (u8)iwl_read_prph_no_grab(fwrt->trans, RSP_RADIO_RDDAT);
110 
111 		pos++;
112 	}
113 
114 	*dump_data = iwl_fw_error_next_data(*dump_data);
115 
116 	iwl_trans_release_nic_access(fwrt->trans, &flags);
117 }
118 
119 static void iwl_fwrt_dump_rxf(struct iwl_fw_runtime *fwrt,
120 			      struct iwl_fw_error_dump_data **dump_data,
121 			      int size, u32 offset, int fifo_num)
122 {
123 	struct iwl_fw_error_dump_fifo *fifo_hdr;
124 	u32 *fifo_data;
125 	u32 fifo_len;
126 	int i;
127 
128 	fifo_hdr = (void *)(*dump_data)->data;
129 	fifo_data = (void *)fifo_hdr->data;
130 	fifo_len = size;
131 
132 	/* No need to try to read the data if the length is 0 */
133 	if (fifo_len == 0)
134 		return;
135 
136 	/* Add a TLV for the RXF */
137 	(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
138 	(*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
139 
140 	fifo_hdr->fifo_num = cpu_to_le32(fifo_num);
141 	fifo_hdr->available_bytes =
142 		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
143 						RXF_RD_D_SPACE + offset));
144 	fifo_hdr->wr_ptr =
145 		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
146 						RXF_RD_WR_PTR + offset));
147 	fifo_hdr->rd_ptr =
148 		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
149 						RXF_RD_RD_PTR + offset));
150 	fifo_hdr->fence_ptr =
151 		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
152 						RXF_RD_FENCE_PTR + offset));
153 	fifo_hdr->fence_mode =
154 		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
155 						RXF_SET_FENCE_MODE + offset));
156 
157 	/* Lock fence */
158 	iwl_trans_write_prph(fwrt->trans, RXF_SET_FENCE_MODE + offset, 0x1);
159 	/* Set fence pointer to the same place like WR pointer */
160 	iwl_trans_write_prph(fwrt->trans, RXF_LD_WR2FENCE + offset, 0x1);
161 	/* Set fence offset */
162 	iwl_trans_write_prph(fwrt->trans,
163 			     RXF_LD_FENCE_OFFSET_ADDR + offset, 0x0);
164 
165 	/* Read FIFO */
166 	fifo_len /= sizeof(u32); /* Size in DWORDS */
167 	for (i = 0; i < fifo_len; i++)
168 		fifo_data[i] = iwl_trans_read_prph(fwrt->trans,
169 						 RXF_FIFO_RD_FENCE_INC +
170 						 offset);
171 	*dump_data = iwl_fw_error_next_data(*dump_data);
172 }
173 
174 static void iwl_fwrt_dump_txf(struct iwl_fw_runtime *fwrt,
175 			      struct iwl_fw_error_dump_data **dump_data,
176 			      int size, u32 offset, int fifo_num)
177 {
178 	struct iwl_fw_error_dump_fifo *fifo_hdr;
179 	u32 *fifo_data;
180 	u32 fifo_len;
181 	int i;
182 
183 	fifo_hdr = (void *)(*dump_data)->data;
184 	fifo_data = (void *)fifo_hdr->data;
185 	fifo_len = size;
186 
187 	/* No need to try to read the data if the length is 0 */
188 	if (fifo_len == 0)
189 		return;
190 
191 	/* Add a TLV for the FIFO */
192 	(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF);
193 	(*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
194 
195 	fifo_hdr->fifo_num = cpu_to_le32(fifo_num);
196 	fifo_hdr->available_bytes =
197 		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
198 						TXF_FIFO_ITEM_CNT + offset));
199 	fifo_hdr->wr_ptr =
200 		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
201 						TXF_WR_PTR + offset));
202 	fifo_hdr->rd_ptr =
203 		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
204 						TXF_RD_PTR + offset));
205 	fifo_hdr->fence_ptr =
206 		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
207 						TXF_FENCE_PTR + offset));
208 	fifo_hdr->fence_mode =
209 		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
210 						TXF_LOCK_FENCE + offset));
211 
212 	/* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */
213 	iwl_trans_write_prph(fwrt->trans, TXF_READ_MODIFY_ADDR + offset,
214 			     TXF_WR_PTR + offset);
215 
216 	/* Dummy-read to advance the read pointer to the head */
217 	iwl_trans_read_prph(fwrt->trans, TXF_READ_MODIFY_DATA + offset);
218 
219 	/* Read FIFO */
220 	fifo_len /= sizeof(u32); /* Size in DWORDS */
221 	for (i = 0; i < fifo_len; i++)
222 		fifo_data[i] = iwl_trans_read_prph(fwrt->trans,
223 						  TXF_READ_MODIFY_DATA +
224 						  offset);
225 	*dump_data = iwl_fw_error_next_data(*dump_data);
226 }
227 
228 static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt,
229 			      struct iwl_fw_error_dump_data **dump_data)
230 {
231 	struct iwl_fw_error_dump_fifo *fifo_hdr;
232 	struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg;
233 	u32 *fifo_data;
234 	u32 fifo_len;
235 	unsigned long flags;
236 	int i, j;
237 
238 	IWL_DEBUG_INFO(fwrt, "WRT FIFO dump\n");
239 
240 	if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
241 		return;
242 
243 	if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) {
244 		/* Pull RXF1 */
245 		iwl_fwrt_dump_rxf(fwrt, dump_data,
246 				  cfg->lmac[0].rxfifo1_size, 0, 0);
247 		/* Pull RXF2 */
248 		iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size,
249 				  RXF_DIFF_FROM_PREV, 1);
250 		/* Pull LMAC2 RXF1 */
251 		if (fwrt->smem_cfg.num_lmacs > 1)
252 			iwl_fwrt_dump_rxf(fwrt, dump_data,
253 					  cfg->lmac[1].rxfifo1_size,
254 					  LMAC2_PRPH_OFFSET, 2);
255 	}
256 
257 	if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) {
258 		/* Pull TXF data from LMAC1 */
259 		for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) {
260 			/* Mark the number of TXF we're pulling now */
261 			iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM, i);
262 			iwl_fwrt_dump_txf(fwrt, dump_data,
263 					  cfg->lmac[0].txfifo_size[i], 0, i);
264 		}
265 
266 		/* Pull TXF data from LMAC2 */
267 		if (fwrt->smem_cfg.num_lmacs > 1) {
268 			for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries;
269 			     i++) {
270 				/* Mark the number of TXF we're pulling now */
271 				iwl_trans_write_prph(fwrt->trans,
272 						     TXF_LARC_NUM +
273 						     LMAC2_PRPH_OFFSET, i);
274 				iwl_fwrt_dump_txf(fwrt, dump_data,
275 						  cfg->lmac[1].txfifo_size[i],
276 						  LMAC2_PRPH_OFFSET,
277 						  i + cfg->num_txfifo_entries);
278 			}
279 		}
280 	}
281 
282 	if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF) &&
283 	    fw_has_capa(&fwrt->fw->ucode_capa,
284 			IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
285 		/* Pull UMAC internal TXF data from all TXFs */
286 		for (i = 0;
287 		     i < ARRAY_SIZE(fwrt->smem_cfg.internal_txfifo_size);
288 		     i++) {
289 			fifo_hdr = (void *)(*dump_data)->data;
290 			fifo_data = (void *)fifo_hdr->data;
291 			fifo_len = fwrt->smem_cfg.internal_txfifo_size[i];
292 
293 			/* No need to try to read the data if the length is 0 */
294 			if (fifo_len == 0)
295 				continue;
296 
297 			/* Add a TLV for the internal FIFOs */
298 			(*dump_data)->type =
299 				cpu_to_le32(IWL_FW_ERROR_DUMP_INTERNAL_TXF);
300 			(*dump_data)->len =
301 				cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
302 
303 			fifo_hdr->fifo_num = cpu_to_le32(i);
304 
305 			/* Mark the number of TXF we're pulling now */
306 			iwl_trans_write_prph(fwrt->trans, TXF_CPU2_NUM, i +
307 				fwrt->smem_cfg.num_txfifo_entries);
308 
309 			fifo_hdr->available_bytes =
310 				cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
311 								TXF_CPU2_FIFO_ITEM_CNT));
312 			fifo_hdr->wr_ptr =
313 				cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
314 								TXF_CPU2_WR_PTR));
315 			fifo_hdr->rd_ptr =
316 				cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
317 								TXF_CPU2_RD_PTR));
318 			fifo_hdr->fence_ptr =
319 				cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
320 								TXF_CPU2_FENCE_PTR));
321 			fifo_hdr->fence_mode =
322 				cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
323 								TXF_CPU2_LOCK_FENCE));
324 
325 			/* Set TXF_CPU2_READ_MODIFY_ADDR to TXF_CPU2_WR_PTR */
326 			iwl_trans_write_prph(fwrt->trans,
327 					     TXF_CPU2_READ_MODIFY_ADDR,
328 					     TXF_CPU2_WR_PTR);
329 
330 			/* Dummy-read to advance the read pointer to head */
331 			iwl_trans_read_prph(fwrt->trans,
332 					    TXF_CPU2_READ_MODIFY_DATA);
333 
334 			/* Read FIFO */
335 			fifo_len /= sizeof(u32); /* Size in DWORDS */
336 			for (j = 0; j < fifo_len; j++)
337 				fifo_data[j] =
338 					iwl_trans_read_prph(fwrt->trans,
339 							    TXF_CPU2_READ_MODIFY_DATA);
340 			*dump_data = iwl_fw_error_next_data(*dump_data);
341 		}
342 	}
343 
344 	iwl_trans_release_nic_access(fwrt->trans, &flags);
345 }
346 
347 #define IWL8260_ICCM_OFFSET		0x44000 /* Only for B-step */
348 #define IWL8260_ICCM_LEN		0xC000 /* Only for B-step */
349 
350 struct iwl_prph_range {
351 	u32 start, end;
352 };
353 
354 static const struct iwl_prph_range iwl_prph_dump_addr_comm[] = {
355 	{ .start = 0x00a00000, .end = 0x00a00000 },
356 	{ .start = 0x00a0000c, .end = 0x00a00024 },
357 	{ .start = 0x00a0002c, .end = 0x00a0003c },
358 	{ .start = 0x00a00410, .end = 0x00a00418 },
359 	{ .start = 0x00a00420, .end = 0x00a00420 },
360 	{ .start = 0x00a00428, .end = 0x00a00428 },
361 	{ .start = 0x00a00430, .end = 0x00a0043c },
362 	{ .start = 0x00a00444, .end = 0x00a00444 },
363 	{ .start = 0x00a004c0, .end = 0x00a004cc },
364 	{ .start = 0x00a004d8, .end = 0x00a004d8 },
365 	{ .start = 0x00a004e0, .end = 0x00a004f0 },
366 	{ .start = 0x00a00840, .end = 0x00a00840 },
367 	{ .start = 0x00a00850, .end = 0x00a00858 },
368 	{ .start = 0x00a01004, .end = 0x00a01008 },
369 	{ .start = 0x00a01010, .end = 0x00a01010 },
370 	{ .start = 0x00a01018, .end = 0x00a01018 },
371 	{ .start = 0x00a01024, .end = 0x00a01024 },
372 	{ .start = 0x00a0102c, .end = 0x00a01034 },
373 	{ .start = 0x00a0103c, .end = 0x00a01040 },
374 	{ .start = 0x00a01048, .end = 0x00a01094 },
375 	{ .start = 0x00a01c00, .end = 0x00a01c20 },
376 	{ .start = 0x00a01c58, .end = 0x00a01c58 },
377 	{ .start = 0x00a01c7c, .end = 0x00a01c7c },
378 	{ .start = 0x00a01c28, .end = 0x00a01c54 },
379 	{ .start = 0x00a01c5c, .end = 0x00a01c5c },
380 	{ .start = 0x00a01c60, .end = 0x00a01cdc },
381 	{ .start = 0x00a01ce0, .end = 0x00a01d0c },
382 	{ .start = 0x00a01d18, .end = 0x00a01d20 },
383 	{ .start = 0x00a01d2c, .end = 0x00a01d30 },
384 	{ .start = 0x00a01d40, .end = 0x00a01d5c },
385 	{ .start = 0x00a01d80, .end = 0x00a01d80 },
386 	{ .start = 0x00a01d98, .end = 0x00a01d9c },
387 	{ .start = 0x00a01da8, .end = 0x00a01da8 },
388 	{ .start = 0x00a01db8, .end = 0x00a01df4 },
389 	{ .start = 0x00a01dc0, .end = 0x00a01dfc },
390 	{ .start = 0x00a01e00, .end = 0x00a01e2c },
391 	{ .start = 0x00a01e40, .end = 0x00a01e60 },
392 	{ .start = 0x00a01e68, .end = 0x00a01e6c },
393 	{ .start = 0x00a01e74, .end = 0x00a01e74 },
394 	{ .start = 0x00a01e84, .end = 0x00a01e90 },
395 	{ .start = 0x00a01e9c, .end = 0x00a01ec4 },
396 	{ .start = 0x00a01ed0, .end = 0x00a01ee0 },
397 	{ .start = 0x00a01f00, .end = 0x00a01f1c },
398 	{ .start = 0x00a01f44, .end = 0x00a01ffc },
399 	{ .start = 0x00a02000, .end = 0x00a02048 },
400 	{ .start = 0x00a02068, .end = 0x00a020f0 },
401 	{ .start = 0x00a02100, .end = 0x00a02118 },
402 	{ .start = 0x00a02140, .end = 0x00a0214c },
403 	{ .start = 0x00a02168, .end = 0x00a0218c },
404 	{ .start = 0x00a021c0, .end = 0x00a021c0 },
405 	{ .start = 0x00a02400, .end = 0x00a02410 },
406 	{ .start = 0x00a02418, .end = 0x00a02420 },
407 	{ .start = 0x00a02428, .end = 0x00a0242c },
408 	{ .start = 0x00a02434, .end = 0x00a02434 },
409 	{ .start = 0x00a02440, .end = 0x00a02460 },
410 	{ .start = 0x00a02468, .end = 0x00a024b0 },
411 	{ .start = 0x00a024c8, .end = 0x00a024cc },
412 	{ .start = 0x00a02500, .end = 0x00a02504 },
413 	{ .start = 0x00a0250c, .end = 0x00a02510 },
414 	{ .start = 0x00a02540, .end = 0x00a02554 },
415 	{ .start = 0x00a02580, .end = 0x00a025f4 },
416 	{ .start = 0x00a02600, .end = 0x00a0260c },
417 	{ .start = 0x00a02648, .end = 0x00a02650 },
418 	{ .start = 0x00a02680, .end = 0x00a02680 },
419 	{ .start = 0x00a026c0, .end = 0x00a026d0 },
420 	{ .start = 0x00a02700, .end = 0x00a0270c },
421 	{ .start = 0x00a02804, .end = 0x00a02804 },
422 	{ .start = 0x00a02818, .end = 0x00a0281c },
423 	{ .start = 0x00a02c00, .end = 0x00a02db4 },
424 	{ .start = 0x00a02df4, .end = 0x00a02fb0 },
425 	{ .start = 0x00a03000, .end = 0x00a03014 },
426 	{ .start = 0x00a0301c, .end = 0x00a0302c },
427 	{ .start = 0x00a03034, .end = 0x00a03038 },
428 	{ .start = 0x00a03040, .end = 0x00a03048 },
429 	{ .start = 0x00a03060, .end = 0x00a03068 },
430 	{ .start = 0x00a03070, .end = 0x00a03074 },
431 	{ .start = 0x00a0307c, .end = 0x00a0307c },
432 	{ .start = 0x00a03080, .end = 0x00a03084 },
433 	{ .start = 0x00a0308c, .end = 0x00a03090 },
434 	{ .start = 0x00a03098, .end = 0x00a03098 },
435 	{ .start = 0x00a030a0, .end = 0x00a030a0 },
436 	{ .start = 0x00a030a8, .end = 0x00a030b4 },
437 	{ .start = 0x00a030bc, .end = 0x00a030bc },
438 	{ .start = 0x00a030c0, .end = 0x00a0312c },
439 	{ .start = 0x00a03c00, .end = 0x00a03c5c },
440 	{ .start = 0x00a04400, .end = 0x00a04454 },
441 	{ .start = 0x00a04460, .end = 0x00a04474 },
442 	{ .start = 0x00a044c0, .end = 0x00a044ec },
443 	{ .start = 0x00a04500, .end = 0x00a04504 },
444 	{ .start = 0x00a04510, .end = 0x00a04538 },
445 	{ .start = 0x00a04540, .end = 0x00a04548 },
446 	{ .start = 0x00a04560, .end = 0x00a0457c },
447 	{ .start = 0x00a04590, .end = 0x00a04598 },
448 	{ .start = 0x00a045c0, .end = 0x00a045f4 },
449 };
450 
451 static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = {
452 	{ .start = 0x00a05c00, .end = 0x00a05c18 },
453 	{ .start = 0x00a05400, .end = 0x00a056e8 },
454 	{ .start = 0x00a08000, .end = 0x00a098bc },
455 	{ .start = 0x00a02400, .end = 0x00a02758 },
456 };
457 
458 static void iwl_read_prph_block(struct iwl_trans *trans, u32 start,
459 				u32 len_bytes, __le32 *data)
460 {
461 	u32 i;
462 
463 	for (i = 0; i < len_bytes; i += 4)
464 		*data++ = cpu_to_le32(iwl_read_prph_no_grab(trans, start + i));
465 }
466 
467 static void iwl_dump_prph(struct iwl_trans *trans,
468 			  struct iwl_fw_error_dump_data **data,
469 			  const struct iwl_prph_range *iwl_prph_dump_addr,
470 			  u32 range_len)
471 {
472 	struct iwl_fw_error_dump_prph *prph;
473 	unsigned long flags;
474 	u32 i;
475 
476 	IWL_DEBUG_INFO(trans, "WRT PRPH dump\n");
477 
478 	if (!iwl_trans_grab_nic_access(trans, &flags))
479 		return;
480 
481 	for (i = 0; i < range_len; i++) {
482 		/* The range includes both boundaries */
483 		int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
484 			 iwl_prph_dump_addr[i].start + 4;
485 
486 		(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH);
487 		(*data)->len = cpu_to_le32(sizeof(*prph) +
488 					num_bytes_in_chunk);
489 		prph = (void *)(*data)->data;
490 		prph->prph_start = cpu_to_le32(iwl_prph_dump_addr[i].start);
491 
492 		iwl_read_prph_block(trans, iwl_prph_dump_addr[i].start,
493 				    /* our range is inclusive, hence + 4 */
494 				    iwl_prph_dump_addr[i].end -
495 				    iwl_prph_dump_addr[i].start + 4,
496 				    (void *)prph->data);
497 
498 		*data = iwl_fw_error_next_data(*data);
499 	}
500 
501 	iwl_trans_release_nic_access(trans, &flags);
502 }
503 
504 /*
505  * alloc_sgtable - allocates scallerlist table in the given size,
506  * fills it with pages and returns it
507  * @size: the size (in bytes) of the table
508 */
509 static struct scatterlist *alloc_sgtable(int size)
510 {
511 	int alloc_size, nents, i;
512 	struct page *new_page;
513 	struct scatterlist *iter;
514 	struct scatterlist *table;
515 
516 	nents = DIV_ROUND_UP(size, PAGE_SIZE);
517 	table = kcalloc(nents, sizeof(*table), GFP_KERNEL);
518 	if (!table)
519 		return NULL;
520 	sg_init_table(table, nents);
521 	iter = table;
522 	for_each_sg(table, iter, sg_nents(table), i) {
523 		new_page = alloc_page(GFP_KERNEL);
524 		if (!new_page) {
525 			/* release all previous allocated pages in the table */
526 			iter = table;
527 			for_each_sg(table, iter, sg_nents(table), i) {
528 				new_page = sg_page(iter);
529 				if (new_page)
530 					__free_page(new_page);
531 			}
532 			return NULL;
533 		}
534 		alloc_size = min_t(int, size, PAGE_SIZE);
535 		size -= PAGE_SIZE;
536 		sg_set_page(iter, new_page, alloc_size, 0);
537 	}
538 	return table;
539 }
540 
541 static int iwl_fw_get_prph_len(struct iwl_fw_runtime *fwrt)
542 {
543 	u32 prph_len = 0;
544 	int i;
545 
546 	for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm);
547 	     i++) {
548 		/* The range includes both boundaries */
549 		int num_bytes_in_chunk =
550 			iwl_prph_dump_addr_comm[i].end -
551 			iwl_prph_dump_addr_comm[i].start + 4;
552 
553 		prph_len += sizeof(struct iwl_fw_error_dump_data) +
554 			sizeof(struct iwl_fw_error_dump_prph) +
555 			num_bytes_in_chunk;
556 	}
557 
558 	if (fwrt->trans->cfg->mq_rx_supported) {
559 		for (i = 0; i <
560 			ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) {
561 			/* The range includes both boundaries */
562 			int num_bytes_in_chunk =
563 				iwl_prph_dump_addr_9000[i].end -
564 				iwl_prph_dump_addr_9000[i].start + 4;
565 
566 			prph_len += sizeof(struct iwl_fw_error_dump_data) +
567 				sizeof(struct iwl_fw_error_dump_prph) +
568 				num_bytes_in_chunk;
569 		}
570 	}
571 	return prph_len;
572 }
573 
574 static void iwl_fw_dump_mem(struct iwl_fw_runtime *fwrt,
575 			    struct iwl_fw_error_dump_data **dump_data,
576 			    u32 len, u32 ofs, u32 type)
577 {
578 	struct iwl_fw_error_dump_mem *dump_mem;
579 
580 	if (!len)
581 		return;
582 
583 	(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
584 	(*dump_data)->len = cpu_to_le32(len + sizeof(*dump_mem));
585 	dump_mem = (void *)(*dump_data)->data;
586 	dump_mem->type = cpu_to_le32(type);
587 	dump_mem->offset = cpu_to_le32(ofs);
588 	iwl_trans_read_mem_bytes(fwrt->trans, ofs, dump_mem->data, len);
589 	*dump_data = iwl_fw_error_next_data(*dump_data);
590 
591 	IWL_DEBUG_INFO(fwrt, "WRT memory dump. Type=%u\n", dump_mem->type);
592 }
593 
594 #define ADD_LEN(len, item_len, const_len) \
595 	do {size_t item = item_len; len += (!!item) * const_len + item; } \
596 	while (0)
597 
598 static int iwl_fw_fifo_len(struct iwl_fw_runtime *fwrt,
599 			   struct iwl_fwrt_shared_mem_cfg *mem_cfg)
600 {
601 	size_t hdr_len = sizeof(struct iwl_fw_error_dump_data) +
602 			 sizeof(struct iwl_fw_error_dump_fifo);
603 	u32 fifo_len = 0;
604 	int i;
605 
606 	if (!(fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)))
607 		goto dump_txf;
608 
609 	/* Count RXF2 size */
610 	ADD_LEN(fifo_len, mem_cfg->rxfifo2_size, hdr_len);
611 
612 	/* Count RXF1 sizes */
613 	for (i = 0; i < mem_cfg->num_lmacs; i++)
614 		ADD_LEN(fifo_len, mem_cfg->lmac[i].rxfifo1_size, hdr_len);
615 
616 dump_txf:
617 	if (!(fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)))
618 		goto dump_internal_txf;
619 
620 	/* Count TXF sizes */
621 	for (i = 0; i < mem_cfg->num_lmacs; i++) {
622 		int j;
623 
624 		for (j = 0; j < mem_cfg->num_txfifo_entries; j++)
625 			ADD_LEN(fifo_len, mem_cfg->lmac[i].txfifo_size[j],
626 				hdr_len);
627 	}
628 
629 dump_internal_txf:
630 	if (!((fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF)) &&
631 	      fw_has_capa(&fwrt->fw->ucode_capa,
632 			  IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)))
633 		goto out;
634 
635 	for (i = 0; i < ARRAY_SIZE(mem_cfg->internal_txfifo_size); i++)
636 		ADD_LEN(fifo_len, mem_cfg->internal_txfifo_size[i], hdr_len);
637 
638 out:
639 	return fifo_len;
640 }
641 
642 static struct iwl_fw_error_dump_file *
643 _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
644 		   struct iwl_fw_dump_ptrs *fw_error_dump)
645 {
646 	struct iwl_fw_error_dump_file *dump_file;
647 	struct iwl_fw_error_dump_data *dump_data;
648 	struct iwl_fw_error_dump_info *dump_info;
649 	struct iwl_fw_error_dump_smem_cfg *dump_smem_cfg;
650 	struct iwl_fw_error_dump_trigger_desc *dump_trig;
651 	u32 sram_len, sram_ofs;
652 	const struct iwl_fw_dbg_mem_seg_tlv *fw_mem = fwrt->fw->dbg.mem_tlv;
653 	struct iwl_fwrt_shared_mem_cfg *mem_cfg = &fwrt->smem_cfg;
654 	u32 file_len, fifo_len = 0, prph_len = 0, radio_len = 0;
655 	u32 smem_len = fwrt->fw->dbg.n_mem_tlv ? 0 : fwrt->trans->cfg->smem_len;
656 	u32 sram2_len = fwrt->fw->dbg.n_mem_tlv ?
657 				0 : fwrt->trans->cfg->dccm2_len;
658 	bool monitor_dump_only = false;
659 	int i;
660 
661 	if (fwrt->dump.trig &&
662 	    fwrt->dump.trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)
663 		monitor_dump_only = true;
664 
665 	/* SRAM - include stack CCM if driver knows the values for it */
666 	if (!fwrt->trans->cfg->dccm_offset || !fwrt->trans->cfg->dccm_len) {
667 		const struct fw_img *img;
668 
669 		img = &fwrt->fw->img[fwrt->cur_fw_img];
670 		sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
671 		sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
672 	} else {
673 		sram_ofs = fwrt->trans->cfg->dccm_offset;
674 		sram_len = fwrt->trans->cfg->dccm_len;
675 	}
676 
677 	/* reading RXF/TXF sizes */
678 	if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) {
679 		fifo_len = iwl_fw_fifo_len(fwrt, mem_cfg);
680 
681 		/* Make room for PRPH registers */
682 		if (!fwrt->trans->cfg->gen2 &&
683 		    fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH))
684 			prph_len += iwl_fw_get_prph_len(fwrt);
685 
686 		if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 &&
687 		    fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_RADIO_REG))
688 			radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ;
689 	}
690 
691 	file_len = sizeof(*dump_file) + fifo_len + prph_len + radio_len;
692 
693 	if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO))
694 		file_len += sizeof(*dump_data) + sizeof(*dump_info);
695 	if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG))
696 		file_len += sizeof(*dump_data) + sizeof(*dump_smem_cfg);
697 
698 	if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
699 		size_t hdr_len = sizeof(*dump_data) +
700 				 sizeof(struct iwl_fw_error_dump_mem);
701 
702 		/* Dump SRAM only if no mem_tlvs */
703 		if (!fwrt->fw->dbg.n_mem_tlv)
704 			ADD_LEN(file_len, sram_len, hdr_len);
705 
706 		/* Make room for all mem types that exist */
707 		ADD_LEN(file_len, smem_len, hdr_len);
708 		ADD_LEN(file_len, sram2_len, hdr_len);
709 
710 		for (i = 0; i < fwrt->fw->dbg.n_mem_tlv; i++)
711 			ADD_LEN(file_len, le32_to_cpu(fw_mem[i].len), hdr_len);
712 	}
713 
714 	/* Make room for fw's virtual image pages, if it exists */
715 	if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
716 	    !fwrt->trans->cfg->gen2 &&
717 	    fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
718 	    fwrt->fw_paging_db[0].fw_paging_block)
719 		file_len += fwrt->num_of_paging_blk *
720 			(sizeof(*dump_data) +
721 			 sizeof(struct iwl_fw_error_dump_paging) +
722 			 PAGING_BLOCK_SIZE);
723 
724 	if (iwl_fw_dbg_is_d3_debug_enabled(fwrt) && fwrt->dump.d3_debug_data) {
725 		file_len += sizeof(*dump_data) +
726 			fwrt->trans->cfg->d3_debug_data_length * 2;
727 	}
728 
729 	/* If we only want a monitor dump, reset the file length */
730 	if (monitor_dump_only) {
731 		file_len = sizeof(*dump_file) + sizeof(*dump_data) * 2 +
732 			   sizeof(*dump_info) + sizeof(*dump_smem_cfg);
733 	}
734 
735 	if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
736 	    fwrt->dump.desc)
737 		file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
738 			    fwrt->dump.desc->len;
739 
740 	dump_file = vzalloc(file_len);
741 	if (!dump_file)
742 		return NULL;
743 
744 	fw_error_dump->fwrt_ptr = dump_file;
745 
746 	dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
747 	dump_data = (void *)dump_file->data;
748 
749 	if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO)) {
750 		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
751 		dump_data->len = cpu_to_le32(sizeof(*dump_info));
752 		dump_info = (void *)dump_data->data;
753 		dump_info->device_family =
754 			fwrt->trans->cfg->device_family ==
755 			IWL_DEVICE_FAMILY_7000 ?
756 				cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) :
757 				cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8);
758 		dump_info->hw_step =
759 			cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev));
760 		memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable,
761 		       sizeof(dump_info->fw_human_readable));
762 		strncpy(dump_info->dev_human_readable, fwrt->trans->cfg->name,
763 			sizeof(dump_info->dev_human_readable) - 1);
764 		strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name,
765 			sizeof(dump_info->bus_human_readable) - 1);
766 
767 		dump_data = iwl_fw_error_next_data(dump_data);
768 	}
769 
770 	if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG)) {
771 		/* Dump shared memory configuration */
772 		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG);
773 		dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg));
774 		dump_smem_cfg = (void *)dump_data->data;
775 		dump_smem_cfg->num_lmacs = cpu_to_le32(mem_cfg->num_lmacs);
776 		dump_smem_cfg->num_txfifo_entries =
777 			cpu_to_le32(mem_cfg->num_txfifo_entries);
778 		for (i = 0; i < MAX_NUM_LMAC; i++) {
779 			int j;
780 			u32 *txf_size = mem_cfg->lmac[i].txfifo_size;
781 
782 			for (j = 0; j < TX_FIFO_MAX_NUM; j++)
783 				dump_smem_cfg->lmac[i].txfifo_size[j] =
784 					cpu_to_le32(txf_size[j]);
785 			dump_smem_cfg->lmac[i].rxfifo1_size =
786 				cpu_to_le32(mem_cfg->lmac[i].rxfifo1_size);
787 		}
788 		dump_smem_cfg->rxfifo2_size =
789 			cpu_to_le32(mem_cfg->rxfifo2_size);
790 		dump_smem_cfg->internal_txfifo_addr =
791 			cpu_to_le32(mem_cfg->internal_txfifo_addr);
792 		for (i = 0; i < TX_FIFO_INTERNAL_MAX_NUM; i++) {
793 			dump_smem_cfg->internal_txfifo_size[i] =
794 				cpu_to_le32(mem_cfg->internal_txfifo_size[i]);
795 		}
796 
797 		dump_data = iwl_fw_error_next_data(dump_data);
798 	}
799 
800 	/* We only dump the FIFOs if the FW is in error state */
801 	if (fifo_len) {
802 		iwl_fw_dump_fifos(fwrt, &dump_data);
803 		if (radio_len)
804 			iwl_read_radio_regs(fwrt, &dump_data);
805 	}
806 
807 	if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
808 	    fwrt->dump.desc) {
809 		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
810 		dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
811 					     fwrt->dump.desc->len);
812 		dump_trig = (void *)dump_data->data;
813 		memcpy(dump_trig, &fwrt->dump.desc->trig_desc,
814 		       sizeof(*dump_trig) + fwrt->dump.desc->len);
815 
816 		dump_data = iwl_fw_error_next_data(dump_data);
817 	}
818 
819 	/* In case we only want monitor dump, skip to dump trasport data */
820 	if (monitor_dump_only)
821 		goto out;
822 
823 	if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
824 		const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem =
825 			fwrt->fw->dbg.mem_tlv;
826 
827 		if (!fwrt->fw->dbg.n_mem_tlv)
828 			iwl_fw_dump_mem(fwrt, &dump_data, sram_len, sram_ofs,
829 					IWL_FW_ERROR_DUMP_MEM_SRAM);
830 
831 		for (i = 0; i < fwrt->fw->dbg.n_mem_tlv; i++) {
832 			u32 len = le32_to_cpu(fw_dbg_mem[i].len);
833 			u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs);
834 
835 			iwl_fw_dump_mem(fwrt, &dump_data, len, ofs,
836 					le32_to_cpu(fw_dbg_mem[i].data_type));
837 		}
838 
839 		iwl_fw_dump_mem(fwrt, &dump_data, smem_len,
840 				fwrt->trans->cfg->smem_offset,
841 				IWL_FW_ERROR_DUMP_MEM_SMEM);
842 
843 		iwl_fw_dump_mem(fwrt, &dump_data, sram2_len,
844 				fwrt->trans->cfg->dccm2_offset,
845 				IWL_FW_ERROR_DUMP_MEM_SRAM);
846 	}
847 
848 	if (iwl_fw_dbg_is_d3_debug_enabled(fwrt) && fwrt->dump.d3_debug_data) {
849 		u32 addr = fwrt->trans->cfg->d3_debug_data_base_addr;
850 		size_t data_size = fwrt->trans->cfg->d3_debug_data_length;
851 
852 		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_D3_DEBUG_DATA);
853 		dump_data->len = cpu_to_le32(data_size * 2);
854 
855 		memcpy(dump_data->data, fwrt->dump.d3_debug_data, data_size);
856 
857 		kfree(fwrt->dump.d3_debug_data);
858 		fwrt->dump.d3_debug_data = NULL;
859 
860 		iwl_trans_read_mem_bytes(fwrt->trans, addr,
861 					 dump_data->data + data_size,
862 					 data_size);
863 
864 		dump_data = iwl_fw_error_next_data(dump_data);
865 	}
866 
867 	/* Dump fw's virtual image */
868 	if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
869 	    !fwrt->trans->cfg->gen2 &&
870 	    fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
871 	    fwrt->fw_paging_db[0].fw_paging_block) {
872 		IWL_DEBUG_INFO(fwrt, "WRT paging dump\n");
873 		for (i = 1; i < fwrt->num_of_paging_blk + 1; i++) {
874 			struct iwl_fw_error_dump_paging *paging;
875 			struct page *pages =
876 				fwrt->fw_paging_db[i].fw_paging_block;
877 			dma_addr_t addr = fwrt->fw_paging_db[i].fw_paging_phys;
878 
879 			dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
880 			dump_data->len = cpu_to_le32(sizeof(*paging) +
881 						     PAGING_BLOCK_SIZE);
882 			paging = (void *)dump_data->data;
883 			paging->index = cpu_to_le32(i);
884 			dma_sync_single_for_cpu(fwrt->trans->dev, addr,
885 						PAGING_BLOCK_SIZE,
886 						DMA_BIDIRECTIONAL);
887 			memcpy(paging->data, page_address(pages),
888 			       PAGING_BLOCK_SIZE);
889 			dump_data = iwl_fw_error_next_data(dump_data);
890 		}
891 	}
892 
893 	if (prph_len) {
894 		iwl_dump_prph(fwrt->trans, &dump_data,
895 			      iwl_prph_dump_addr_comm,
896 			      ARRAY_SIZE(iwl_prph_dump_addr_comm));
897 
898 		if (fwrt->trans->cfg->mq_rx_supported)
899 			iwl_dump_prph(fwrt->trans, &dump_data,
900 				      iwl_prph_dump_addr_9000,
901 				      ARRAY_SIZE(iwl_prph_dump_addr_9000));
902 	}
903 
904 out:
905 	dump_file->file_len = cpu_to_le32(file_len);
906 	return dump_file;
907 }
908 
909 void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
910 {
911 	struct iwl_fw_dump_ptrs *fw_error_dump;
912 	struct iwl_fw_error_dump_file *dump_file;
913 	struct scatterlist *sg_dump_data;
914 	u32 file_len;
915 
916 	IWL_DEBUG_INFO(fwrt, "WRT dump start\n");
917 
918 	/* there's no point in fw dump if the bus is dead */
919 	if (test_bit(STATUS_TRANS_DEAD, &fwrt->trans->status)) {
920 		IWL_ERR(fwrt, "Skip fw error dump since bus is dead\n");
921 		goto out;
922 	}
923 
924 	fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
925 	if (!fw_error_dump)
926 		goto out;
927 
928 	dump_file = _iwl_fw_error_dump(fwrt, fw_error_dump);
929 	if (!dump_file) {
930 		kfree(fw_error_dump);
931 		goto out;
932 	}
933 
934 	fw_error_dump->trans_ptr = iwl_trans_dump_data(fwrt->trans,
935 						       fwrt->dump.trig);
936 	file_len = le32_to_cpu(dump_file->file_len);
937 	fw_error_dump->fwrt_len = file_len;
938 	if (fw_error_dump->trans_ptr) {
939 		file_len += fw_error_dump->trans_ptr->len;
940 		dump_file->file_len = cpu_to_le32(file_len);
941 	}
942 
943 	sg_dump_data = alloc_sgtable(file_len);
944 	if (sg_dump_data) {
945 		sg_pcopy_from_buffer(sg_dump_data,
946 				     sg_nents(sg_dump_data),
947 				     fw_error_dump->fwrt_ptr,
948 				     fw_error_dump->fwrt_len, 0);
949 		if (fw_error_dump->trans_ptr)
950 			sg_pcopy_from_buffer(sg_dump_data,
951 					     sg_nents(sg_dump_data),
952 					     fw_error_dump->trans_ptr->data,
953 					     fw_error_dump->trans_ptr->len,
954 					     fw_error_dump->fwrt_len);
955 		dev_coredumpsg(fwrt->trans->dev, sg_dump_data, file_len,
956 			       GFP_KERNEL);
957 	}
958 	vfree(fw_error_dump->fwrt_ptr);
959 	vfree(fw_error_dump->trans_ptr);
960 	kfree(fw_error_dump);
961 
962 out:
963 	iwl_fw_free_dump_desc(fwrt);
964 	clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
965 	IWL_DEBUG_INFO(fwrt, "WRT dump done\n");
966 }
967 IWL_EXPORT_SYMBOL(iwl_fw_error_dump);
968 
969 const struct iwl_fw_dump_desc iwl_dump_desc_assert = {
970 	.trig_desc = {
971 		.type = cpu_to_le32(FW_DBG_TRIGGER_FW_ASSERT),
972 	},
973 };
974 IWL_EXPORT_SYMBOL(iwl_dump_desc_assert);
975 
976 void iwl_fw_alive_error_dump(struct iwl_fw_runtime *fwrt)
977 {
978 	struct iwl_fw_dump_desc *iwl_dump_desc_no_alive =
979 		kmalloc(sizeof(*iwl_dump_desc_no_alive), GFP_KERNEL);
980 
981 	if (!iwl_dump_desc_no_alive)
982 		return;
983 
984 	iwl_dump_desc_no_alive->trig_desc.type =
985 		cpu_to_le32(FW_DBG_TRIGGER_NO_ALIVE);
986 	iwl_dump_desc_no_alive->len = 0;
987 
988 	if (WARN_ON(fwrt->dump.desc))
989 		iwl_fw_free_dump_desc(fwrt);
990 
991 	IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n",
992 		 FW_DBG_TRIGGER_NO_ALIVE);
993 
994 	fwrt->dump.desc = iwl_dump_desc_no_alive;
995 	iwl_fw_error_dump(fwrt);
996 	clear_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &fwrt->status);
997 }
998 IWL_EXPORT_SYMBOL(iwl_fw_alive_error_dump);
999 
1000 int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
1001 			    const struct iwl_fw_dump_desc *desc, void *trigger,
1002 			    unsigned int delay)
1003 {
1004 	/*
1005 	 * If the loading of the FW completed successfully, the next step is to
1006 	 * get the SMEM config data. Thus, if fwrt->smem_cfg.num_lmacs is non
1007 	 * zero, the FW was already loaded successully. If the state is "NO_FW"
1008 	 * in such a case - exit, since FW may be dead. Otherwise, we
1009 	 * can try to collect the data, since FW might just not be fully
1010 	 * loaded (no "ALIVE" yet), and the debug data is accessible.
1011 	 *
1012 	 * Corner case: got the FW alive but crashed before getting the SMEM
1013 	 *	config. In such a case, due to HW access problems, we might
1014 	 *	collect garbage.
1015 	 */
1016 	if (fwrt->trans->state == IWL_TRANS_NO_FW &&
1017 	    fwrt->smem_cfg.num_lmacs)
1018 		return -EIO;
1019 
1020 	if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status) ||
1021 	    test_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &fwrt->status))
1022 		return -EBUSY;
1023 
1024 	if (WARN_ON(fwrt->dump.desc))
1025 		iwl_fw_free_dump_desc(fwrt);
1026 
1027 	IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n",
1028 		 le32_to_cpu(desc->trig_desc.type));
1029 
1030 	fwrt->dump.desc = desc;
1031 	fwrt->dump.trig = trigger;
1032 
1033 	schedule_delayed_work(&fwrt->dump.wk, delay);
1034 
1035 	return 0;
1036 }
1037 IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_desc);
1038 
1039 int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
1040 		       enum iwl_fw_dbg_trigger trig,
1041 		       const char *str, size_t len,
1042 		       struct iwl_fw_dbg_trigger_tlv *trigger)
1043 {
1044 	struct iwl_fw_dump_desc *desc;
1045 	unsigned int delay = 0;
1046 
1047 	if (trigger) {
1048 		u16 occurrences = le16_to_cpu(trigger->occurrences) - 1;
1049 
1050 		if (!le16_to_cpu(trigger->occurrences))
1051 			return 0;
1052 
1053 		if (trigger->flags & IWL_FW_DBG_FORCE_RESTART) {
1054 			IWL_WARN(fwrt, "Force restart: trigger %d fired.\n",
1055 				 trig);
1056 			iwl_force_nmi(fwrt->trans);
1057 			return 0;
1058 		}
1059 
1060 		trigger->occurrences = cpu_to_le16(occurrences);
1061 		delay = le16_to_cpu(trigger->trig_dis_ms);
1062 	}
1063 
1064 	desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC);
1065 	if (!desc)
1066 		return -ENOMEM;
1067 
1068 
1069 	desc->len = len;
1070 	desc->trig_desc.type = cpu_to_le32(trig);
1071 	memcpy(desc->trig_desc.data, str, len);
1072 
1073 	return iwl_fw_dbg_collect_desc(fwrt, desc, trigger, delay);
1074 }
1075 IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect);
1076 
1077 int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
1078 			    struct iwl_fw_dbg_trigger_tlv *trigger,
1079 			    const char *fmt, ...)
1080 {
1081 	int ret, len = 0;
1082 	char buf[64];
1083 
1084 	if (fmt) {
1085 		va_list ap;
1086 
1087 		buf[sizeof(buf) - 1] = '\0';
1088 
1089 		va_start(ap, fmt);
1090 		vsnprintf(buf, sizeof(buf), fmt, ap);
1091 		va_end(ap);
1092 
1093 		/* check for truncation */
1094 		if (WARN_ON_ONCE(buf[sizeof(buf) - 1]))
1095 			buf[sizeof(buf) - 1] = '\0';
1096 
1097 		len = strlen(buf) + 1;
1098 	}
1099 
1100 	ret = iwl_fw_dbg_collect(fwrt, le32_to_cpu(trigger->id), buf, len,
1101 				 trigger);
1102 
1103 	if (ret)
1104 		return ret;
1105 
1106 	return 0;
1107 }
1108 IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_trig);
1109 
1110 int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id)
1111 {
1112 	u8 *ptr;
1113 	int ret;
1114 	int i;
1115 
1116 	if (WARN_ONCE(conf_id >= ARRAY_SIZE(fwrt->fw->dbg.conf_tlv),
1117 		      "Invalid configuration %d\n", conf_id))
1118 		return -EINVAL;
1119 
1120 	/* EARLY START - firmware's configuration is hard coded */
1121 	if ((!fwrt->fw->dbg.conf_tlv[conf_id] ||
1122 	     !fwrt->fw->dbg.conf_tlv[conf_id]->num_of_hcmds) &&
1123 	    conf_id == FW_DBG_START_FROM_ALIVE)
1124 		return 0;
1125 
1126 	if (!fwrt->fw->dbg.conf_tlv[conf_id])
1127 		return -EINVAL;
1128 
1129 	if (fwrt->dump.conf != FW_DBG_INVALID)
1130 		IWL_WARN(fwrt, "FW already configured (%d) - re-configuring\n",
1131 			 fwrt->dump.conf);
1132 
1133 	/* Send all HCMDs for configuring the FW debug */
1134 	ptr = (void *)&fwrt->fw->dbg.conf_tlv[conf_id]->hcmd;
1135 	for (i = 0; i < fwrt->fw->dbg.conf_tlv[conf_id]->num_of_hcmds; i++) {
1136 		struct iwl_fw_dbg_conf_hcmd *cmd = (void *)ptr;
1137 		struct iwl_host_cmd hcmd = {
1138 			.id = cmd->id,
1139 			.len = { le16_to_cpu(cmd->len), },
1140 			.data = { cmd->data, },
1141 		};
1142 
1143 		ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
1144 		if (ret)
1145 			return ret;
1146 
1147 		ptr += sizeof(*cmd);
1148 		ptr += le16_to_cpu(cmd->len);
1149 	}
1150 
1151 	fwrt->dump.conf = conf_id;
1152 
1153 	return 0;
1154 }
1155 IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf);
1156 
1157 /* this function assumes dump_start was called beforehand and dump_end will be
1158  * called afterwards
1159  */
1160 void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt)
1161 {
1162 	struct iwl_fw_dbg_params params = {0};
1163 
1164 	if (!test_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status))
1165 		return;
1166 
1167 	if (fwrt->ops && fwrt->ops->fw_running &&
1168 	    !fwrt->ops->fw_running(fwrt->ops_ctx)) {
1169 		IWL_ERR(fwrt, "Firmware not running - cannot dump error\n");
1170 		iwl_fw_free_dump_desc(fwrt);
1171 		clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
1172 		return;
1173 	}
1174 
1175 	iwl_fw_dbg_stop_recording(fwrt, &params);
1176 
1177 	iwl_fw_error_dump(fwrt);
1178 
1179 	/* start recording again if the firmware is not crashed */
1180 	if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) &&
1181 	    fwrt->fw->dbg.dest_tlv) {
1182 		/* wait before we collect the data till the DBGC stop */
1183 		udelay(500);
1184 		iwl_fw_dbg_restart_recording(fwrt, &params);
1185 	}
1186 }
1187 IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_sync);
1188 
1189 void iwl_fw_error_dump_wk(struct work_struct *work)
1190 {
1191 	struct iwl_fw_runtime *fwrt =
1192 		container_of(work, struct iwl_fw_runtime, dump.wk.work);
1193 
1194 	if (fwrt->ops && fwrt->ops->dump_start &&
1195 	    fwrt->ops->dump_start(fwrt->ops_ctx))
1196 		return;
1197 
1198 	iwl_fw_dbg_collect_sync(fwrt);
1199 
1200 	if (fwrt->ops && fwrt->ops->dump_end)
1201 		fwrt->ops->dump_end(fwrt->ops_ctx);
1202 }
1203 
1204 void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt)
1205 {
1206 	const struct iwl_cfg *cfg = fwrt->trans->cfg;
1207 
1208 	if (!iwl_fw_dbg_is_d3_debug_enabled(fwrt))
1209 		return;
1210 
1211 	if (!fwrt->dump.d3_debug_data) {
1212 		fwrt->dump.d3_debug_data = kmalloc(cfg->d3_debug_data_length,
1213 						   GFP_KERNEL);
1214 		if (!fwrt->dump.d3_debug_data) {
1215 			IWL_ERR(fwrt,
1216 				"failed to allocate memory for D3 debug data\n");
1217 			return;
1218 		}
1219 	}
1220 
1221 	/* if the buffer holds previous debug data it is overwritten */
1222 	iwl_trans_read_mem_bytes(fwrt->trans, cfg->d3_debug_data_base_addr,
1223 				 fwrt->dump.d3_debug_data,
1224 				 cfg->d3_debug_data_length);
1225 }
1226 IWL_EXPORT_SYMBOL(iwl_fw_dbg_read_d3_debug_data);
1227