1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
9  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10  * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
11  * Copyright(c) 2018        Intel Corporation
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of version 2 of the GNU General Public License as
15  * published by the Free Software Foundation.
16  *
17  * This program is distributed in the hope that it will be useful, but
18  * WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  * General Public License for more details.
21  *
22  * You should have received a copy of the GNU General Public License
23  * along with this program;
24  *
25  * The full GNU General Public License is included in this distribution
26  * in the file called COPYING.
27  *
28  * Contact Information:
29  *  Intel Linux Wireless <linuxwifi@intel.com>
30  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31  *
32  * BSD LICENSE
33  *
34  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
35  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36  * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
37  * Copyright(c) 2018        Intel Corporation
38  * All rights reserved.
39  *
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that the following conditions
42  * are met:
43  *
44  *  * Redistributions of source code must retain the above copyright
45  *    notice, this list of conditions and the following disclaimer.
46  *  * Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in
48  *    the documentation and/or other materials provided with the
49  *    distribution.
50  *  * Neither the name Intel Corporation nor the names of its
51  *    contributors may be used to endorse or promote products derived
52  *    from this software without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65  *
66  *****************************************************************************/
67 #include <linux/devcoredump.h>
68 #include "iwl-drv.h"
69 #include "runtime.h"
70 #include "dbg.h"
71 #include "debugfs.h"
72 #include "iwl-io.h"
73 #include "iwl-prph.h"
74 #include "iwl-csr.h"
75 
76 /**
77  * struct iwl_fw_dump_ptrs - set of pointers needed for the fw-error-dump
78  *
79  * @fwrt_ptr: pointer to the buffer coming from fwrt
80  * @trans_ptr: pointer to struct %iwl_trans_dump_data which contains the
81  *	transport's data.
82  * @trans_len: length of the valid data in trans_ptr
83  * @fwrt_len: length of the valid data in fwrt_ptr
84  */
85 struct iwl_fw_dump_ptrs {
86 	struct iwl_trans_dump_data *trans_ptr;
87 	void *fwrt_ptr;
88 	u32 fwrt_len;
89 };
90 
91 #define RADIO_REG_MAX_READ 0x2ad
92 static void iwl_read_radio_regs(struct iwl_fw_runtime *fwrt,
93 				struct iwl_fw_error_dump_data **dump_data)
94 {
95 	u8 *pos = (void *)(*dump_data)->data;
96 	unsigned long flags;
97 	int i;
98 
99 	IWL_DEBUG_INFO(fwrt, "WRT radio registers dump\n");
100 
101 	if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
102 		return;
103 
104 	(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RADIO_REG);
105 	(*dump_data)->len = cpu_to_le32(RADIO_REG_MAX_READ);
106 
107 	for (i = 0; i < RADIO_REG_MAX_READ; i++) {
108 		u32 rd_cmd = RADIO_RSP_RD_CMD;
109 
110 		rd_cmd |= i << RADIO_RSP_ADDR_POS;
111 		iwl_write_prph_no_grab(fwrt->trans, RSP_RADIO_CMD, rd_cmd);
112 		*pos = (u8)iwl_read_prph_no_grab(fwrt->trans, RSP_RADIO_RDDAT);
113 
114 		pos++;
115 	}
116 
117 	*dump_data = iwl_fw_error_next_data(*dump_data);
118 
119 	iwl_trans_release_nic_access(fwrt->trans, &flags);
120 }
121 
122 static void iwl_fwrt_dump_rxf(struct iwl_fw_runtime *fwrt,
123 			      struct iwl_fw_error_dump_data **dump_data,
124 			      int size, u32 offset, int fifo_num)
125 {
126 	struct iwl_fw_error_dump_fifo *fifo_hdr;
127 	u32 *fifo_data;
128 	u32 fifo_len;
129 	int i;
130 
131 	fifo_hdr = (void *)(*dump_data)->data;
132 	fifo_data = (void *)fifo_hdr->data;
133 	fifo_len = size;
134 
135 	/* No need to try to read the data if the length is 0 */
136 	if (fifo_len == 0)
137 		return;
138 
139 	/* Add a TLV for the RXF */
140 	(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
141 	(*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
142 
143 	fifo_hdr->fifo_num = cpu_to_le32(fifo_num);
144 	fifo_hdr->available_bytes =
145 		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
146 						RXF_RD_D_SPACE + offset));
147 	fifo_hdr->wr_ptr =
148 		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
149 						RXF_RD_WR_PTR + offset));
150 	fifo_hdr->rd_ptr =
151 		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
152 						RXF_RD_RD_PTR + offset));
153 	fifo_hdr->fence_ptr =
154 		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
155 						RXF_RD_FENCE_PTR + offset));
156 	fifo_hdr->fence_mode =
157 		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
158 						RXF_SET_FENCE_MODE + offset));
159 
160 	/* Lock fence */
161 	iwl_trans_write_prph(fwrt->trans, RXF_SET_FENCE_MODE + offset, 0x1);
162 	/* Set fence pointer to the same place like WR pointer */
163 	iwl_trans_write_prph(fwrt->trans, RXF_LD_WR2FENCE + offset, 0x1);
164 	/* Set fence offset */
165 	iwl_trans_write_prph(fwrt->trans,
166 			     RXF_LD_FENCE_OFFSET_ADDR + offset, 0x0);
167 
168 	/* Read FIFO */
169 	fifo_len /= sizeof(u32); /* Size in DWORDS */
170 	for (i = 0; i < fifo_len; i++)
171 		fifo_data[i] = iwl_trans_read_prph(fwrt->trans,
172 						 RXF_FIFO_RD_FENCE_INC +
173 						 offset);
174 	*dump_data = iwl_fw_error_next_data(*dump_data);
175 }
176 
177 static void iwl_fwrt_dump_txf(struct iwl_fw_runtime *fwrt,
178 			      struct iwl_fw_error_dump_data **dump_data,
179 			      int size, u32 offset, int fifo_num)
180 {
181 	struct iwl_fw_error_dump_fifo *fifo_hdr;
182 	u32 *fifo_data;
183 	u32 fifo_len;
184 	int i;
185 
186 	fifo_hdr = (void *)(*dump_data)->data;
187 	fifo_data = (void *)fifo_hdr->data;
188 	fifo_len = size;
189 
190 	/* No need to try to read the data if the length is 0 */
191 	if (fifo_len == 0)
192 		return;
193 
194 	/* Add a TLV for the FIFO */
195 	(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF);
196 	(*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
197 
198 	fifo_hdr->fifo_num = cpu_to_le32(fifo_num);
199 	fifo_hdr->available_bytes =
200 		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
201 						TXF_FIFO_ITEM_CNT + offset));
202 	fifo_hdr->wr_ptr =
203 		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
204 						TXF_WR_PTR + offset));
205 	fifo_hdr->rd_ptr =
206 		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
207 						TXF_RD_PTR + offset));
208 	fifo_hdr->fence_ptr =
209 		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
210 						TXF_FENCE_PTR + offset));
211 	fifo_hdr->fence_mode =
212 		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
213 						TXF_LOCK_FENCE + offset));
214 
215 	/* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */
216 	iwl_trans_write_prph(fwrt->trans, TXF_READ_MODIFY_ADDR + offset,
217 			     TXF_WR_PTR + offset);
218 
219 	/* Dummy-read to advance the read pointer to the head */
220 	iwl_trans_read_prph(fwrt->trans, TXF_READ_MODIFY_DATA + offset);
221 
222 	/* Read FIFO */
223 	fifo_len /= sizeof(u32); /* Size in DWORDS */
224 	for (i = 0; i < fifo_len; i++)
225 		fifo_data[i] = iwl_trans_read_prph(fwrt->trans,
226 						  TXF_READ_MODIFY_DATA +
227 						  offset);
228 	*dump_data = iwl_fw_error_next_data(*dump_data);
229 }
230 
231 static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt,
232 			      struct iwl_fw_error_dump_data **dump_data)
233 {
234 	struct iwl_fw_error_dump_fifo *fifo_hdr;
235 	struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg;
236 	u32 *fifo_data;
237 	u32 fifo_len;
238 	unsigned long flags;
239 	int i, j;
240 
241 	IWL_DEBUG_INFO(fwrt, "WRT FIFO dump\n");
242 
243 	if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
244 		return;
245 
246 	if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) {
247 		/* Pull RXF1 */
248 		iwl_fwrt_dump_rxf(fwrt, dump_data,
249 				  cfg->lmac[0].rxfifo1_size, 0, 0);
250 		/* Pull RXF2 */
251 		iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size,
252 				  RXF_DIFF_FROM_PREV, 1);
253 		/* Pull LMAC2 RXF1 */
254 		if (fwrt->smem_cfg.num_lmacs > 1)
255 			iwl_fwrt_dump_rxf(fwrt, dump_data,
256 					  cfg->lmac[1].rxfifo1_size,
257 					  LMAC2_PRPH_OFFSET, 2);
258 	}
259 
260 	if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) {
261 		/* Pull TXF data from LMAC1 */
262 		for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) {
263 			/* Mark the number of TXF we're pulling now */
264 			iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM, i);
265 			iwl_fwrt_dump_txf(fwrt, dump_data,
266 					  cfg->lmac[0].txfifo_size[i], 0, i);
267 		}
268 
269 		/* Pull TXF data from LMAC2 */
270 		if (fwrt->smem_cfg.num_lmacs > 1) {
271 			for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries;
272 			     i++) {
273 				/* Mark the number of TXF we're pulling now */
274 				iwl_trans_write_prph(fwrt->trans,
275 						     TXF_LARC_NUM +
276 						     LMAC2_PRPH_OFFSET, i);
277 				iwl_fwrt_dump_txf(fwrt, dump_data,
278 						  cfg->lmac[1].txfifo_size[i],
279 						  LMAC2_PRPH_OFFSET,
280 						  i + cfg->num_txfifo_entries);
281 			}
282 		}
283 	}
284 
285 	if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF) &&
286 	    fw_has_capa(&fwrt->fw->ucode_capa,
287 			IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
288 		/* Pull UMAC internal TXF data from all TXFs */
289 		for (i = 0;
290 		     i < ARRAY_SIZE(fwrt->smem_cfg.internal_txfifo_size);
291 		     i++) {
292 			fifo_hdr = (void *)(*dump_data)->data;
293 			fifo_data = (void *)fifo_hdr->data;
294 			fifo_len = fwrt->smem_cfg.internal_txfifo_size[i];
295 
296 			/* No need to try to read the data if the length is 0 */
297 			if (fifo_len == 0)
298 				continue;
299 
300 			/* Add a TLV for the internal FIFOs */
301 			(*dump_data)->type =
302 				cpu_to_le32(IWL_FW_ERROR_DUMP_INTERNAL_TXF);
303 			(*dump_data)->len =
304 				cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
305 
306 			fifo_hdr->fifo_num = cpu_to_le32(i);
307 
308 			/* Mark the number of TXF we're pulling now */
309 			iwl_trans_write_prph(fwrt->trans, TXF_CPU2_NUM, i +
310 				fwrt->smem_cfg.num_txfifo_entries);
311 
312 			fifo_hdr->available_bytes =
313 				cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
314 								TXF_CPU2_FIFO_ITEM_CNT));
315 			fifo_hdr->wr_ptr =
316 				cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
317 								TXF_CPU2_WR_PTR));
318 			fifo_hdr->rd_ptr =
319 				cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
320 								TXF_CPU2_RD_PTR));
321 			fifo_hdr->fence_ptr =
322 				cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
323 								TXF_CPU2_FENCE_PTR));
324 			fifo_hdr->fence_mode =
325 				cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
326 								TXF_CPU2_LOCK_FENCE));
327 
328 			/* Set TXF_CPU2_READ_MODIFY_ADDR to TXF_CPU2_WR_PTR */
329 			iwl_trans_write_prph(fwrt->trans,
330 					     TXF_CPU2_READ_MODIFY_ADDR,
331 					     TXF_CPU2_WR_PTR);
332 
333 			/* Dummy-read to advance the read pointer to head */
334 			iwl_trans_read_prph(fwrt->trans,
335 					    TXF_CPU2_READ_MODIFY_DATA);
336 
337 			/* Read FIFO */
338 			fifo_len /= sizeof(u32); /* Size in DWORDS */
339 			for (j = 0; j < fifo_len; j++)
340 				fifo_data[j] =
341 					iwl_trans_read_prph(fwrt->trans,
342 							    TXF_CPU2_READ_MODIFY_DATA);
343 			*dump_data = iwl_fw_error_next_data(*dump_data);
344 		}
345 	}
346 
347 	iwl_trans_release_nic_access(fwrt->trans, &flags);
348 }
349 
350 #define IWL8260_ICCM_OFFSET		0x44000 /* Only for B-step */
351 #define IWL8260_ICCM_LEN		0xC000 /* Only for B-step */
352 
353 struct iwl_prph_range {
354 	u32 start, end;
355 };
356 
357 static const struct iwl_prph_range iwl_prph_dump_addr_comm[] = {
358 	{ .start = 0x00a00000, .end = 0x00a00000 },
359 	{ .start = 0x00a0000c, .end = 0x00a00024 },
360 	{ .start = 0x00a0002c, .end = 0x00a0003c },
361 	{ .start = 0x00a00410, .end = 0x00a00418 },
362 	{ .start = 0x00a00420, .end = 0x00a00420 },
363 	{ .start = 0x00a00428, .end = 0x00a00428 },
364 	{ .start = 0x00a00430, .end = 0x00a0043c },
365 	{ .start = 0x00a00444, .end = 0x00a00444 },
366 	{ .start = 0x00a004c0, .end = 0x00a004cc },
367 	{ .start = 0x00a004d8, .end = 0x00a004d8 },
368 	{ .start = 0x00a004e0, .end = 0x00a004f0 },
369 	{ .start = 0x00a00840, .end = 0x00a00840 },
370 	{ .start = 0x00a00850, .end = 0x00a00858 },
371 	{ .start = 0x00a01004, .end = 0x00a01008 },
372 	{ .start = 0x00a01010, .end = 0x00a01010 },
373 	{ .start = 0x00a01018, .end = 0x00a01018 },
374 	{ .start = 0x00a01024, .end = 0x00a01024 },
375 	{ .start = 0x00a0102c, .end = 0x00a01034 },
376 	{ .start = 0x00a0103c, .end = 0x00a01040 },
377 	{ .start = 0x00a01048, .end = 0x00a01094 },
378 	{ .start = 0x00a01c00, .end = 0x00a01c20 },
379 	{ .start = 0x00a01c58, .end = 0x00a01c58 },
380 	{ .start = 0x00a01c7c, .end = 0x00a01c7c },
381 	{ .start = 0x00a01c28, .end = 0x00a01c54 },
382 	{ .start = 0x00a01c5c, .end = 0x00a01c5c },
383 	{ .start = 0x00a01c60, .end = 0x00a01cdc },
384 	{ .start = 0x00a01ce0, .end = 0x00a01d0c },
385 	{ .start = 0x00a01d18, .end = 0x00a01d20 },
386 	{ .start = 0x00a01d2c, .end = 0x00a01d30 },
387 	{ .start = 0x00a01d40, .end = 0x00a01d5c },
388 	{ .start = 0x00a01d80, .end = 0x00a01d80 },
389 	{ .start = 0x00a01d98, .end = 0x00a01d9c },
390 	{ .start = 0x00a01da8, .end = 0x00a01da8 },
391 	{ .start = 0x00a01db8, .end = 0x00a01df4 },
392 	{ .start = 0x00a01dc0, .end = 0x00a01dfc },
393 	{ .start = 0x00a01e00, .end = 0x00a01e2c },
394 	{ .start = 0x00a01e40, .end = 0x00a01e60 },
395 	{ .start = 0x00a01e68, .end = 0x00a01e6c },
396 	{ .start = 0x00a01e74, .end = 0x00a01e74 },
397 	{ .start = 0x00a01e84, .end = 0x00a01e90 },
398 	{ .start = 0x00a01e9c, .end = 0x00a01ec4 },
399 	{ .start = 0x00a01ed0, .end = 0x00a01ee0 },
400 	{ .start = 0x00a01f00, .end = 0x00a01f1c },
401 	{ .start = 0x00a01f44, .end = 0x00a01ffc },
402 	{ .start = 0x00a02000, .end = 0x00a02048 },
403 	{ .start = 0x00a02068, .end = 0x00a020f0 },
404 	{ .start = 0x00a02100, .end = 0x00a02118 },
405 	{ .start = 0x00a02140, .end = 0x00a0214c },
406 	{ .start = 0x00a02168, .end = 0x00a0218c },
407 	{ .start = 0x00a021c0, .end = 0x00a021c0 },
408 	{ .start = 0x00a02400, .end = 0x00a02410 },
409 	{ .start = 0x00a02418, .end = 0x00a02420 },
410 	{ .start = 0x00a02428, .end = 0x00a0242c },
411 	{ .start = 0x00a02434, .end = 0x00a02434 },
412 	{ .start = 0x00a02440, .end = 0x00a02460 },
413 	{ .start = 0x00a02468, .end = 0x00a024b0 },
414 	{ .start = 0x00a024c8, .end = 0x00a024cc },
415 	{ .start = 0x00a02500, .end = 0x00a02504 },
416 	{ .start = 0x00a0250c, .end = 0x00a02510 },
417 	{ .start = 0x00a02540, .end = 0x00a02554 },
418 	{ .start = 0x00a02580, .end = 0x00a025f4 },
419 	{ .start = 0x00a02600, .end = 0x00a0260c },
420 	{ .start = 0x00a02648, .end = 0x00a02650 },
421 	{ .start = 0x00a02680, .end = 0x00a02680 },
422 	{ .start = 0x00a026c0, .end = 0x00a026d0 },
423 	{ .start = 0x00a02700, .end = 0x00a0270c },
424 	{ .start = 0x00a02804, .end = 0x00a02804 },
425 	{ .start = 0x00a02818, .end = 0x00a0281c },
426 	{ .start = 0x00a02c00, .end = 0x00a02db4 },
427 	{ .start = 0x00a02df4, .end = 0x00a02fb0 },
428 	{ .start = 0x00a03000, .end = 0x00a03014 },
429 	{ .start = 0x00a0301c, .end = 0x00a0302c },
430 	{ .start = 0x00a03034, .end = 0x00a03038 },
431 	{ .start = 0x00a03040, .end = 0x00a03048 },
432 	{ .start = 0x00a03060, .end = 0x00a03068 },
433 	{ .start = 0x00a03070, .end = 0x00a03074 },
434 	{ .start = 0x00a0307c, .end = 0x00a0307c },
435 	{ .start = 0x00a03080, .end = 0x00a03084 },
436 	{ .start = 0x00a0308c, .end = 0x00a03090 },
437 	{ .start = 0x00a03098, .end = 0x00a03098 },
438 	{ .start = 0x00a030a0, .end = 0x00a030a0 },
439 	{ .start = 0x00a030a8, .end = 0x00a030b4 },
440 	{ .start = 0x00a030bc, .end = 0x00a030bc },
441 	{ .start = 0x00a030c0, .end = 0x00a0312c },
442 	{ .start = 0x00a03c00, .end = 0x00a03c5c },
443 	{ .start = 0x00a04400, .end = 0x00a04454 },
444 	{ .start = 0x00a04460, .end = 0x00a04474 },
445 	{ .start = 0x00a044c0, .end = 0x00a044ec },
446 	{ .start = 0x00a04500, .end = 0x00a04504 },
447 	{ .start = 0x00a04510, .end = 0x00a04538 },
448 	{ .start = 0x00a04540, .end = 0x00a04548 },
449 	{ .start = 0x00a04560, .end = 0x00a0457c },
450 	{ .start = 0x00a04590, .end = 0x00a04598 },
451 	{ .start = 0x00a045c0, .end = 0x00a045f4 },
452 };
453 
454 static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = {
455 	{ .start = 0x00a05c00, .end = 0x00a05c18 },
456 	{ .start = 0x00a05400, .end = 0x00a056e8 },
457 	{ .start = 0x00a08000, .end = 0x00a098bc },
458 	{ .start = 0x00a02400, .end = 0x00a02758 },
459 };
460 
461 static void _iwl_read_prph_block(struct iwl_trans *trans, u32 start,
462 				 u32 len_bytes, __le32 *data)
463 {
464 	u32 i;
465 
466 	for (i = 0; i < len_bytes; i += 4)
467 		*data++ = cpu_to_le32(iwl_read_prph_no_grab(trans, start + i));
468 }
469 
470 static bool iwl_read_prph_block(struct iwl_trans *trans, u32 start,
471 				u32 len_bytes, __le32 *data)
472 {
473 	unsigned long flags;
474 	bool success = false;
475 
476 	if (iwl_trans_grab_nic_access(trans, &flags)) {
477 		success = true;
478 		_iwl_read_prph_block(trans, start, len_bytes, data);
479 		iwl_trans_release_nic_access(trans, &flags);
480 	}
481 
482 	return success;
483 }
484 
485 static void iwl_dump_prph(struct iwl_trans *trans,
486 			  struct iwl_fw_error_dump_data **data,
487 			  const struct iwl_prph_range *iwl_prph_dump_addr,
488 			  u32 range_len)
489 {
490 	struct iwl_fw_error_dump_prph *prph;
491 	unsigned long flags;
492 	u32 i;
493 
494 	IWL_DEBUG_INFO(trans, "WRT PRPH dump\n");
495 
496 	if (!iwl_trans_grab_nic_access(trans, &flags))
497 		return;
498 
499 	for (i = 0; i < range_len; i++) {
500 		/* The range includes both boundaries */
501 		int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
502 			 iwl_prph_dump_addr[i].start + 4;
503 
504 		(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH);
505 		(*data)->len = cpu_to_le32(sizeof(*prph) +
506 					num_bytes_in_chunk);
507 		prph = (void *)(*data)->data;
508 		prph->prph_start = cpu_to_le32(iwl_prph_dump_addr[i].start);
509 
510 		_iwl_read_prph_block(trans, iwl_prph_dump_addr[i].start,
511 				     /* our range is inclusive, hence + 4 */
512 				     iwl_prph_dump_addr[i].end -
513 				     iwl_prph_dump_addr[i].start + 4,
514 				     (void *)prph->data);
515 
516 		*data = iwl_fw_error_next_data(*data);
517 	}
518 
519 	iwl_trans_release_nic_access(trans, &flags);
520 }
521 
522 /*
523  * alloc_sgtable - allocates scallerlist table in the given size,
524  * fills it with pages and returns it
525  * @size: the size (in bytes) of the table
526 */
527 static struct scatterlist *alloc_sgtable(int size)
528 {
529 	int alloc_size, nents, i;
530 	struct page *new_page;
531 	struct scatterlist *iter;
532 	struct scatterlist *table;
533 
534 	nents = DIV_ROUND_UP(size, PAGE_SIZE);
535 	table = kcalloc(nents, sizeof(*table), GFP_KERNEL);
536 	if (!table)
537 		return NULL;
538 	sg_init_table(table, nents);
539 	iter = table;
540 	for_each_sg(table, iter, sg_nents(table), i) {
541 		new_page = alloc_page(GFP_KERNEL);
542 		if (!new_page) {
543 			/* release all previous allocated pages in the table */
544 			iter = table;
545 			for_each_sg(table, iter, sg_nents(table), i) {
546 				new_page = sg_page(iter);
547 				if (new_page)
548 					__free_page(new_page);
549 			}
550 			return NULL;
551 		}
552 		alloc_size = min_t(int, size, PAGE_SIZE);
553 		size -= PAGE_SIZE;
554 		sg_set_page(iter, new_page, alloc_size, 0);
555 	}
556 	return table;
557 }
558 
559 void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
560 {
561 	struct iwl_fw_error_dump_file *dump_file;
562 	struct iwl_fw_error_dump_data *dump_data;
563 	struct iwl_fw_error_dump_info *dump_info;
564 	struct iwl_fw_error_dump_mem *dump_mem;
565 	struct iwl_fw_error_dump_smem_cfg *dump_smem_cfg;
566 	struct iwl_fw_error_dump_trigger_desc *dump_trig;
567 	struct iwl_fw_dump_ptrs *fw_error_dump;
568 	struct scatterlist *sg_dump_data;
569 	u32 sram_len, sram_ofs;
570 	const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem = fwrt->fw->dbg_mem_tlv;
571 	struct iwl_fwrt_shared_mem_cfg *mem_cfg = &fwrt->smem_cfg;
572 	u32 file_len, fifo_data_len = 0, prph_len = 0, radio_len = 0;
573 	u32 smem_len = fwrt->fw->n_dbg_mem_tlv ? 0 : fwrt->trans->cfg->smem_len;
574 	u32 sram2_len = fwrt->fw->n_dbg_mem_tlv ?
575 				0 : fwrt->trans->cfg->dccm2_len;
576 	bool monitor_dump_only = false;
577 	int i;
578 
579 	IWL_DEBUG_INFO(fwrt, "WRT dump start\n");
580 
581 	/* there's no point in fw dump if the bus is dead */
582 	if (test_bit(STATUS_TRANS_DEAD, &fwrt->trans->status)) {
583 		IWL_ERR(fwrt, "Skip fw error dump since bus is dead\n");
584 		goto out;
585 	}
586 
587 	if (fwrt->dump.trig &&
588 	    fwrt->dump.trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)
589 		monitor_dump_only = true;
590 
591 	fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
592 	if (!fw_error_dump)
593 		goto out;
594 
595 	/* SRAM - include stack CCM if driver knows the values for it */
596 	if (!fwrt->trans->cfg->dccm_offset || !fwrt->trans->cfg->dccm_len) {
597 		const struct fw_img *img;
598 
599 		img = &fwrt->fw->img[fwrt->cur_fw_img];
600 		sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
601 		sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
602 	} else {
603 		sram_ofs = fwrt->trans->cfg->dccm_offset;
604 		sram_len = fwrt->trans->cfg->dccm_len;
605 	}
606 
607 	/* reading RXF/TXF sizes */
608 	if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) {
609 		fifo_data_len = 0;
610 
611 		if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) {
612 
613 			/* Count RXF2 size */
614 			if (mem_cfg->rxfifo2_size) {
615 				/* Add header info */
616 				fifo_data_len +=
617 					mem_cfg->rxfifo2_size +
618 					sizeof(*dump_data) +
619 					sizeof(struct iwl_fw_error_dump_fifo);
620 			}
621 
622 			/* Count RXF1 sizes */
623 			for (i = 0; i < mem_cfg->num_lmacs; i++) {
624 				if (!mem_cfg->lmac[i].rxfifo1_size)
625 					continue;
626 
627 				/* Add header info */
628 				fifo_data_len +=
629 					mem_cfg->lmac[i].rxfifo1_size +
630 					sizeof(*dump_data) +
631 					sizeof(struct iwl_fw_error_dump_fifo);
632 			}
633 		}
634 
635 		if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) {
636 			size_t fifo_const_len = sizeof(*dump_data) +
637 				sizeof(struct iwl_fw_error_dump_fifo);
638 
639 			/* Count TXF sizes */
640 			for (i = 0; i < mem_cfg->num_lmacs; i++) {
641 				int j;
642 
643 				for (j = 0; j < mem_cfg->num_txfifo_entries;
644 				     j++) {
645 					if (!mem_cfg->lmac[i].txfifo_size[j])
646 						continue;
647 
648 					/* Add header info */
649 					fifo_data_len +=
650 						fifo_const_len +
651 						mem_cfg->lmac[i].txfifo_size[j];
652 				}
653 			}
654 		}
655 
656 		if ((fwrt->fw->dbg_dump_mask &
657 		    BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF)) &&
658 		    fw_has_capa(&fwrt->fw->ucode_capa,
659 				IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
660 			for (i = 0;
661 			     i < ARRAY_SIZE(mem_cfg->internal_txfifo_size);
662 			     i++) {
663 				if (!mem_cfg->internal_txfifo_size[i])
664 					continue;
665 
666 				/* Add header info */
667 				fifo_data_len +=
668 					mem_cfg->internal_txfifo_size[i] +
669 					sizeof(*dump_data) +
670 					sizeof(struct iwl_fw_error_dump_fifo);
671 			}
672 		}
673 
674 		/* Make room for PRPH registers */
675 		if (!fwrt->trans->cfg->gen2 &&
676 		    fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH)) {
677 			for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm);
678 			     i++) {
679 				/* The range includes both boundaries */
680 				int num_bytes_in_chunk =
681 					iwl_prph_dump_addr_comm[i].end -
682 					iwl_prph_dump_addr_comm[i].start + 4;
683 
684 				prph_len += sizeof(*dump_data) +
685 					sizeof(struct iwl_fw_error_dump_prph) +
686 					num_bytes_in_chunk;
687 			}
688 		}
689 
690 		if (!fwrt->trans->cfg->gen2 &&
691 		    fwrt->trans->cfg->mq_rx_supported &&
692 		    fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH)) {
693 			for (i = 0; i <
694 				ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) {
695 				/* The range includes both boundaries */
696 				int num_bytes_in_chunk =
697 					iwl_prph_dump_addr_9000[i].end -
698 					iwl_prph_dump_addr_9000[i].start + 4;
699 
700 				prph_len += sizeof(*dump_data) +
701 					sizeof(struct iwl_fw_error_dump_prph) +
702 					num_bytes_in_chunk;
703 			}
704 		}
705 
706 		if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 &&
707 		    fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RADIO_REG))
708 			radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ;
709 	}
710 
711 	file_len = sizeof(*dump_file) +
712 		   fifo_data_len +
713 		   prph_len +
714 		   radio_len;
715 
716 	if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO))
717 		file_len += sizeof(*dump_data) + sizeof(*dump_info);
718 	if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG))
719 		file_len += sizeof(*dump_data) + sizeof(*dump_smem_cfg);
720 
721 	if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
722 		/* Make room for the SMEM, if it exists */
723 		if (smem_len)
724 			file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
725 				smem_len;
726 
727 		/* Make room for the secondary SRAM, if it exists */
728 		if (sram2_len)
729 			file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
730 				sram2_len;
731 
732 		/* Make room for MEM segments */
733 		for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) {
734 			file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
735 				    le32_to_cpu(fw_dbg_mem[i].len);
736 		}
737 	}
738 
739 	/* Make room for fw's virtual image pages, if it exists */
740 	if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
741 	    !fwrt->trans->cfg->gen2 &&
742 	    fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
743 	    fwrt->fw_paging_db[0].fw_paging_block)
744 		file_len += fwrt->num_of_paging_blk *
745 			(sizeof(*dump_data) +
746 			 sizeof(struct iwl_fw_error_dump_paging) +
747 			 PAGING_BLOCK_SIZE);
748 
749 	/* If we only want a monitor dump, reset the file length */
750 	if (monitor_dump_only) {
751 		file_len = sizeof(*dump_file) + sizeof(*dump_data) * 2 +
752 			   sizeof(*dump_info) + sizeof(*dump_smem_cfg);
753 	}
754 
755 	if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
756 	    fwrt->dump.desc)
757 		file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
758 			    fwrt->dump.desc->len;
759 
760 	if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM) &&
761 	    !fwrt->fw->n_dbg_mem_tlv)
762 		file_len += sizeof(*dump_data) + sram_len + sizeof(*dump_mem);
763 
764 	dump_file = vzalloc(file_len);
765 	if (!dump_file) {
766 		kfree(fw_error_dump);
767 		goto out;
768 	}
769 
770 	fw_error_dump->fwrt_ptr = dump_file;
771 
772 	dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
773 	dump_data = (void *)dump_file->data;
774 
775 	if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO)) {
776 		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
777 		dump_data->len = cpu_to_le32(sizeof(*dump_info));
778 		dump_info = (void *)dump_data->data;
779 		dump_info->device_family =
780 			fwrt->trans->cfg->device_family ==
781 			IWL_DEVICE_FAMILY_7000 ?
782 				cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) :
783 				cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8);
784 		dump_info->hw_step =
785 			cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev));
786 		memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable,
787 		       sizeof(dump_info->fw_human_readable));
788 		strncpy(dump_info->dev_human_readable, fwrt->trans->cfg->name,
789 			sizeof(dump_info->dev_human_readable) - 1);
790 		strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name,
791 			sizeof(dump_info->bus_human_readable) - 1);
792 
793 		dump_data = iwl_fw_error_next_data(dump_data);
794 	}
795 
796 	if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG)) {
797 		/* Dump shared memory configuration */
798 		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG);
799 		dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg));
800 		dump_smem_cfg = (void *)dump_data->data;
801 		dump_smem_cfg->num_lmacs = cpu_to_le32(mem_cfg->num_lmacs);
802 		dump_smem_cfg->num_txfifo_entries =
803 			cpu_to_le32(mem_cfg->num_txfifo_entries);
804 		for (i = 0; i < MAX_NUM_LMAC; i++) {
805 			int j;
806 			u32 *txf_size = mem_cfg->lmac[i].txfifo_size;
807 
808 			for (j = 0; j < TX_FIFO_MAX_NUM; j++)
809 				dump_smem_cfg->lmac[i].txfifo_size[j] =
810 					cpu_to_le32(txf_size[j]);
811 			dump_smem_cfg->lmac[i].rxfifo1_size =
812 				cpu_to_le32(mem_cfg->lmac[i].rxfifo1_size);
813 		}
814 		dump_smem_cfg->rxfifo2_size =
815 			cpu_to_le32(mem_cfg->rxfifo2_size);
816 		dump_smem_cfg->internal_txfifo_addr =
817 			cpu_to_le32(mem_cfg->internal_txfifo_addr);
818 		for (i = 0; i < TX_FIFO_INTERNAL_MAX_NUM; i++) {
819 			dump_smem_cfg->internal_txfifo_size[i] =
820 				cpu_to_le32(mem_cfg->internal_txfifo_size[i]);
821 		}
822 
823 		dump_data = iwl_fw_error_next_data(dump_data);
824 	}
825 
826 	/* We only dump the FIFOs if the FW is in error state */
827 	if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) {
828 		iwl_fw_dump_fifos(fwrt, &dump_data);
829 		if (radio_len)
830 			iwl_read_radio_regs(fwrt, &dump_data);
831 	}
832 
833 	if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
834 	    fwrt->dump.desc) {
835 		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
836 		dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
837 					     fwrt->dump.desc->len);
838 		dump_trig = (void *)dump_data->data;
839 		memcpy(dump_trig, &fwrt->dump.desc->trig_desc,
840 		       sizeof(*dump_trig) + fwrt->dump.desc->len);
841 
842 		dump_data = iwl_fw_error_next_data(dump_data);
843 	}
844 
845 	/* In case we only want monitor dump, skip to dump trasport data */
846 	if (monitor_dump_only)
847 		goto dump_trans_data;
848 
849 	if (!fwrt->fw->n_dbg_mem_tlv &&
850 	    fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
851 		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
852 		dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
853 		dump_mem = (void *)dump_data->data;
854 		dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
855 		dump_mem->offset = cpu_to_le32(sram_ofs);
856 		iwl_trans_read_mem_bytes(fwrt->trans, sram_ofs, dump_mem->data,
857 					 sram_len);
858 		dump_data = iwl_fw_error_next_data(dump_data);
859 	}
860 
861 	for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) {
862 		u32 len = le32_to_cpu(fw_dbg_mem[i].len);
863 		u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs);
864 		bool success;
865 
866 		if (!(fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)))
867 			break;
868 
869 		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
870 		dump_data->len = cpu_to_le32(len + sizeof(*dump_mem));
871 		dump_mem = (void *)dump_data->data;
872 		dump_mem->type = fw_dbg_mem[i].data_type;
873 		dump_mem->offset = cpu_to_le32(ofs);
874 
875 		IWL_DEBUG_INFO(fwrt, "WRT memory dump. Type=%u\n",
876 			       dump_mem->type);
877 
878 		switch (dump_mem->type & cpu_to_le32(FW_DBG_MEM_TYPE_MASK)) {
879 		case cpu_to_le32(FW_DBG_MEM_TYPE_REGULAR):
880 			iwl_trans_read_mem_bytes(fwrt->trans, ofs,
881 						 dump_mem->data,
882 						 len);
883 			success = true;
884 			break;
885 		case cpu_to_le32(FW_DBG_MEM_TYPE_PRPH):
886 			success = iwl_read_prph_block(fwrt->trans, ofs, len,
887 						      (void *)dump_mem->data);
888 			break;
889 		default:
890 			/*
891 			 * shouldn't get here, we ignored this kind
892 			 * of TLV earlier during the TLV parsing?!
893 			 */
894 			WARN_ON(1);
895 			success = false;
896 		}
897 
898 		if (success)
899 			dump_data = iwl_fw_error_next_data(dump_data);
900 	}
901 
902 	if (smem_len && fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
903 		IWL_DEBUG_INFO(fwrt, "WRT SMEM dump\n");
904 		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
905 		dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem));
906 		dump_mem = (void *)dump_data->data;
907 		dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SMEM);
908 		dump_mem->offset = cpu_to_le32(fwrt->trans->cfg->smem_offset);
909 		iwl_trans_read_mem_bytes(fwrt->trans,
910 					 fwrt->trans->cfg->smem_offset,
911 					 dump_mem->data, smem_len);
912 		dump_data = iwl_fw_error_next_data(dump_data);
913 	}
914 
915 	if (sram2_len && fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
916 		IWL_DEBUG_INFO(fwrt, "WRT SRAM dump\n");
917 		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
918 		dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem));
919 		dump_mem = (void *)dump_data->data;
920 		dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
921 		dump_mem->offset = cpu_to_le32(fwrt->trans->cfg->dccm2_offset);
922 		iwl_trans_read_mem_bytes(fwrt->trans,
923 					 fwrt->trans->cfg->dccm2_offset,
924 					 dump_mem->data, sram2_len);
925 		dump_data = iwl_fw_error_next_data(dump_data);
926 	}
927 
928 	/* Dump fw's virtual image */
929 	if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
930 	    !fwrt->trans->cfg->gen2 &&
931 	    fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
932 	    fwrt->fw_paging_db[0].fw_paging_block) {
933 		IWL_DEBUG_INFO(fwrt, "WRT paging dump\n");
934 		for (i = 1; i < fwrt->num_of_paging_blk + 1; i++) {
935 			struct iwl_fw_error_dump_paging *paging;
936 			struct page *pages =
937 				fwrt->fw_paging_db[i].fw_paging_block;
938 			dma_addr_t addr = fwrt->fw_paging_db[i].fw_paging_phys;
939 
940 			dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
941 			dump_data->len = cpu_to_le32(sizeof(*paging) +
942 						     PAGING_BLOCK_SIZE);
943 			paging = (void *)dump_data->data;
944 			paging->index = cpu_to_le32(i);
945 			dma_sync_single_for_cpu(fwrt->trans->dev, addr,
946 						PAGING_BLOCK_SIZE,
947 						DMA_BIDIRECTIONAL);
948 			memcpy(paging->data, page_address(pages),
949 			       PAGING_BLOCK_SIZE);
950 			dump_data = iwl_fw_error_next_data(dump_data);
951 		}
952 	}
953 
954 	if (prph_len) {
955 		iwl_dump_prph(fwrt->trans, &dump_data,
956 			      iwl_prph_dump_addr_comm,
957 			      ARRAY_SIZE(iwl_prph_dump_addr_comm));
958 
959 		if (fwrt->trans->cfg->mq_rx_supported)
960 			iwl_dump_prph(fwrt->trans, &dump_data,
961 				      iwl_prph_dump_addr_9000,
962 				      ARRAY_SIZE(iwl_prph_dump_addr_9000));
963 	}
964 
965 dump_trans_data:
966 	fw_error_dump->trans_ptr = iwl_trans_dump_data(fwrt->trans,
967 						       fwrt->dump.trig);
968 	fw_error_dump->fwrt_len = file_len;
969 	if (fw_error_dump->trans_ptr)
970 		file_len += fw_error_dump->trans_ptr->len;
971 	dump_file->file_len = cpu_to_le32(file_len);
972 
973 	sg_dump_data = alloc_sgtable(file_len);
974 	if (sg_dump_data) {
975 		sg_pcopy_from_buffer(sg_dump_data,
976 				     sg_nents(sg_dump_data),
977 				     fw_error_dump->fwrt_ptr,
978 				     fw_error_dump->fwrt_len, 0);
979 		if (fw_error_dump->trans_ptr)
980 			sg_pcopy_from_buffer(sg_dump_data,
981 					     sg_nents(sg_dump_data),
982 					     fw_error_dump->trans_ptr->data,
983 					     fw_error_dump->trans_ptr->len,
984 					     fw_error_dump->fwrt_len);
985 		dev_coredumpsg(fwrt->trans->dev, sg_dump_data, file_len,
986 			       GFP_KERNEL);
987 	}
988 	vfree(fw_error_dump->fwrt_ptr);
989 	vfree(fw_error_dump->trans_ptr);
990 	kfree(fw_error_dump);
991 
992 out:
993 	iwl_fw_free_dump_desc(fwrt);
994 	clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
995 	IWL_DEBUG_INFO(fwrt, "WRT dump done\n");
996 }
997 IWL_EXPORT_SYMBOL(iwl_fw_error_dump);
998 
999 const struct iwl_fw_dump_desc iwl_dump_desc_assert = {
1000 	.trig_desc = {
1001 		.type = cpu_to_le32(FW_DBG_TRIGGER_FW_ASSERT),
1002 	},
1003 };
1004 IWL_EXPORT_SYMBOL(iwl_dump_desc_assert);
1005 
1006 int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
1007 			    const struct iwl_fw_dump_desc *desc,
1008 			    const struct iwl_fw_dbg_trigger_tlv *trigger)
1009 {
1010 	unsigned int delay = 0;
1011 
1012 	if (trigger)
1013 		delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
1014 
1015 	/*
1016 	 * If the loading of the FW completed successfully, the next step is to
1017 	 * get the SMEM config data. Thus, if fwrt->smem_cfg.num_lmacs is non
1018 	 * zero, the FW was already loaded successully. If the state is "NO_FW"
1019 	 * in such a case - WARN and exit, since FW may be dead. Otherwise, we
1020 	 * can try to collect the data, since FW might just not be fully
1021 	 * loaded (no "ALIVE" yet), and the debug data is accessible.
1022 	 *
1023 	 * Corner case: got the FW alive but crashed before getting the SMEM
1024 	 *	config. In such a case, due to HW access problems, we might
1025 	 *	collect garbage.
1026 	 */
1027 	if (WARN((fwrt->trans->state == IWL_TRANS_NO_FW) &&
1028 		 fwrt->smem_cfg.num_lmacs,
1029 		 "Can't collect dbg data when FW isn't alive\n"))
1030 		return -EIO;
1031 
1032 	if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status))
1033 		return -EBUSY;
1034 
1035 	if (WARN_ON(fwrt->dump.desc))
1036 		iwl_fw_free_dump_desc(fwrt);
1037 
1038 	IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n",
1039 		 le32_to_cpu(desc->trig_desc.type));
1040 
1041 	fwrt->dump.desc = desc;
1042 	fwrt->dump.trig = trigger;
1043 
1044 	schedule_delayed_work(&fwrt->dump.wk, delay);
1045 
1046 	return 0;
1047 }
1048 IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_desc);
1049 
1050 int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
1051 		       enum iwl_fw_dbg_trigger trig,
1052 		       const char *str, size_t len,
1053 		       const struct iwl_fw_dbg_trigger_tlv *trigger)
1054 {
1055 	struct iwl_fw_dump_desc *desc;
1056 
1057 	if (trigger && trigger->flags & IWL_FW_DBG_FORCE_RESTART) {
1058 		IWL_WARN(fwrt, "Force restart: trigger %d fired.\n", trig);
1059 		iwl_force_nmi(fwrt->trans);
1060 		return 0;
1061 	}
1062 
1063 	desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC);
1064 	if (!desc)
1065 		return -ENOMEM;
1066 
1067 	desc->len = len;
1068 	desc->trig_desc.type = cpu_to_le32(trig);
1069 	memcpy(desc->trig_desc.data, str, len);
1070 
1071 	return iwl_fw_dbg_collect_desc(fwrt, desc, trigger);
1072 }
1073 IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect);
1074 
1075 int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
1076 			    struct iwl_fw_dbg_trigger_tlv *trigger,
1077 			    const char *fmt, ...)
1078 {
1079 	u16 occurrences = le16_to_cpu(trigger->occurrences);
1080 	int ret, len = 0;
1081 	char buf[64];
1082 
1083 	if (!occurrences)
1084 		return 0;
1085 
1086 	if (fmt) {
1087 		va_list ap;
1088 
1089 		buf[sizeof(buf) - 1] = '\0';
1090 
1091 		va_start(ap, fmt);
1092 		vsnprintf(buf, sizeof(buf), fmt, ap);
1093 		va_end(ap);
1094 
1095 		/* check for truncation */
1096 		if (WARN_ON_ONCE(buf[sizeof(buf) - 1]))
1097 			buf[sizeof(buf) - 1] = '\0';
1098 
1099 		len = strlen(buf) + 1;
1100 	}
1101 
1102 	ret = iwl_fw_dbg_collect(fwrt, le32_to_cpu(trigger->id), buf, len,
1103 				 trigger);
1104 
1105 	if (ret)
1106 		return ret;
1107 
1108 	trigger->occurrences = cpu_to_le16(occurrences - 1);
1109 	return 0;
1110 }
1111 IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_trig);
1112 
1113 int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id)
1114 {
1115 	u8 *ptr;
1116 	int ret;
1117 	int i;
1118 
1119 	if (WARN_ONCE(conf_id >= ARRAY_SIZE(fwrt->fw->dbg_conf_tlv),
1120 		      "Invalid configuration %d\n", conf_id))
1121 		return -EINVAL;
1122 
1123 	/* EARLY START - firmware's configuration is hard coded */
1124 	if ((!fwrt->fw->dbg_conf_tlv[conf_id] ||
1125 	     !fwrt->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) &&
1126 	    conf_id == FW_DBG_START_FROM_ALIVE)
1127 		return 0;
1128 
1129 	if (!fwrt->fw->dbg_conf_tlv[conf_id])
1130 		return -EINVAL;
1131 
1132 	if (fwrt->dump.conf != FW_DBG_INVALID)
1133 		IWL_WARN(fwrt, "FW already configured (%d) - re-configuring\n",
1134 			 fwrt->dump.conf);
1135 
1136 	/* start default config marker cmd for syncing logs */
1137 	iwl_fw_trigger_timestamp(fwrt, 1);
1138 
1139 	/* Send all HCMDs for configuring the FW debug */
1140 	ptr = (void *)&fwrt->fw->dbg_conf_tlv[conf_id]->hcmd;
1141 	for (i = 0; i < fwrt->fw->dbg_conf_tlv[conf_id]->num_of_hcmds; i++) {
1142 		struct iwl_fw_dbg_conf_hcmd *cmd = (void *)ptr;
1143 		struct iwl_host_cmd hcmd = {
1144 			.id = cmd->id,
1145 			.len = { le16_to_cpu(cmd->len), },
1146 			.data = { cmd->data, },
1147 		};
1148 
1149 		ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
1150 		if (ret)
1151 			return ret;
1152 
1153 		ptr += sizeof(*cmd);
1154 		ptr += le16_to_cpu(cmd->len);
1155 	}
1156 
1157 	fwrt->dump.conf = conf_id;
1158 
1159 	return 0;
1160 }
1161 IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf);
1162 
1163 void iwl_fw_error_dump_wk(struct work_struct *work)
1164 {
1165 	struct iwl_fw_runtime *fwrt =
1166 		container_of(work, struct iwl_fw_runtime, dump.wk.work);
1167 
1168 	if (fwrt->ops && fwrt->ops->dump_start &&
1169 	    fwrt->ops->dump_start(fwrt->ops_ctx))
1170 		return;
1171 
1172 	if (fwrt->ops && fwrt->ops->fw_running &&
1173 	    !fwrt->ops->fw_running(fwrt->ops_ctx)) {
1174 		IWL_ERR(fwrt, "Firmware not running - cannot dump error\n");
1175 		iwl_fw_free_dump_desc(fwrt);
1176 		clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
1177 		goto out;
1178 	}
1179 
1180 	if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
1181 		/* stop recording */
1182 		iwl_fw_dbg_stop_recording(fwrt);
1183 
1184 		iwl_fw_error_dump(fwrt);
1185 
1186 		/* start recording again if the firmware is not crashed */
1187 		if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) &&
1188 		    fwrt->fw->dbg_dest_tlv) {
1189 			iwl_clear_bits_prph(fwrt->trans,
1190 					    MON_BUFF_SAMPLE_CTL, 0x100);
1191 			iwl_clear_bits_prph(fwrt->trans,
1192 					    MON_BUFF_SAMPLE_CTL, 0x1);
1193 			iwl_set_bits_prph(fwrt->trans,
1194 					  MON_BUFF_SAMPLE_CTL, 0x1);
1195 		}
1196 	} else {
1197 		u32 in_sample = iwl_read_prph(fwrt->trans, DBGC_IN_SAMPLE);
1198 		u32 out_ctrl = iwl_read_prph(fwrt->trans, DBGC_OUT_CTRL);
1199 
1200 		iwl_fw_dbg_stop_recording(fwrt);
1201 		/* wait before we collect the data till the DBGC stop */
1202 		udelay(500);
1203 
1204 		iwl_fw_error_dump(fwrt);
1205 
1206 		/* start recording again if the firmware is not crashed */
1207 		if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) &&
1208 		    fwrt->fw->dbg_dest_tlv) {
1209 			iwl_write_prph(fwrt->trans, DBGC_IN_SAMPLE, in_sample);
1210 			iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, out_ctrl);
1211 		}
1212 	}
1213 out:
1214 	if (fwrt->ops && fwrt->ops->dump_end)
1215 		fwrt->ops->dump_end(fwrt->ops_ctx);
1216 }
1217 
1218