1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
9  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10  * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
11  * Copyright(c) 2018        Intel Corporation
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of version 2 of the GNU General Public License as
15  * published by the Free Software Foundation.
16  *
17  * This program is distributed in the hope that it will be useful, but
18  * WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  * General Public License for more details.
21  *
22  * The full GNU General Public License is included in this distribution
23  * in the file called COPYING.
24  *
25  * Contact Information:
26  *  Intel Linux Wireless <linuxwifi@intel.com>
27  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28  *
29  * BSD LICENSE
30  *
31  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
32  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33  * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
34  * Copyright(c) 2018        Intel Corporation
35  * All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  *
41  *  * Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  *  * Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in
45  *    the documentation and/or other materials provided with the
46  *    distribution.
47  *  * Neither the name Intel Corporation nor the names of its
48  *    contributors may be used to endorse or promote products derived
49  *    from this software without specific prior written permission.
50  *
51  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62  *
63  *****************************************************************************/
64 #include <linux/devcoredump.h>
65 #include "iwl-drv.h"
66 #include "runtime.h"
67 #include "dbg.h"
68 #include "debugfs.h"
69 #include "iwl-io.h"
70 #include "iwl-prph.h"
71 #include "iwl-csr.h"
72 
73 /**
74  * struct iwl_fw_dump_ptrs - set of pointers needed for the fw-error-dump
75  *
76  * @fwrt_ptr: pointer to the buffer coming from fwrt
77  * @trans_ptr: pointer to struct %iwl_trans_dump_data which contains the
78  *	transport's data.
79  * @trans_len: length of the valid data in trans_ptr
80  * @fwrt_len: length of the valid data in fwrt_ptr
81  */
82 struct iwl_fw_dump_ptrs {
83 	struct iwl_trans_dump_data *trans_ptr;
84 	void *fwrt_ptr;
85 	u32 fwrt_len;
86 };
87 
88 #define RADIO_REG_MAX_READ 0x2ad
89 static void iwl_read_radio_regs(struct iwl_fw_runtime *fwrt,
90 				struct iwl_fw_error_dump_data **dump_data)
91 {
92 	u8 *pos = (void *)(*dump_data)->data;
93 	unsigned long flags;
94 	int i;
95 
96 	IWL_DEBUG_INFO(fwrt, "WRT radio registers dump\n");
97 
98 	if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
99 		return;
100 
101 	(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RADIO_REG);
102 	(*dump_data)->len = cpu_to_le32(RADIO_REG_MAX_READ);
103 
104 	for (i = 0; i < RADIO_REG_MAX_READ; i++) {
105 		u32 rd_cmd = RADIO_RSP_RD_CMD;
106 
107 		rd_cmd |= i << RADIO_RSP_ADDR_POS;
108 		iwl_write_prph_no_grab(fwrt->trans, RSP_RADIO_CMD, rd_cmd);
109 		*pos = (u8)iwl_read_prph_no_grab(fwrt->trans, RSP_RADIO_RDDAT);
110 
111 		pos++;
112 	}
113 
114 	*dump_data = iwl_fw_error_next_data(*dump_data);
115 
116 	iwl_trans_release_nic_access(fwrt->trans, &flags);
117 }
118 
119 static void iwl_fwrt_dump_rxf(struct iwl_fw_runtime *fwrt,
120 			      struct iwl_fw_error_dump_data **dump_data,
121 			      int size, u32 offset, int fifo_num)
122 {
123 	struct iwl_fw_error_dump_fifo *fifo_hdr;
124 	u32 *fifo_data;
125 	u32 fifo_len;
126 	int i;
127 
128 	fifo_hdr = (void *)(*dump_data)->data;
129 	fifo_data = (void *)fifo_hdr->data;
130 	fifo_len = size;
131 
132 	/* No need to try to read the data if the length is 0 */
133 	if (fifo_len == 0)
134 		return;
135 
136 	/* Add a TLV for the RXF */
137 	(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
138 	(*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
139 
140 	fifo_hdr->fifo_num = cpu_to_le32(fifo_num);
141 	fifo_hdr->available_bytes =
142 		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
143 						RXF_RD_D_SPACE + offset));
144 	fifo_hdr->wr_ptr =
145 		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
146 						RXF_RD_WR_PTR + offset));
147 	fifo_hdr->rd_ptr =
148 		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
149 						RXF_RD_RD_PTR + offset));
150 	fifo_hdr->fence_ptr =
151 		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
152 						RXF_RD_FENCE_PTR + offset));
153 	fifo_hdr->fence_mode =
154 		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
155 						RXF_SET_FENCE_MODE + offset));
156 
157 	/* Lock fence */
158 	iwl_trans_write_prph(fwrt->trans, RXF_SET_FENCE_MODE + offset, 0x1);
159 	/* Set fence pointer to the same place like WR pointer */
160 	iwl_trans_write_prph(fwrt->trans, RXF_LD_WR2FENCE + offset, 0x1);
161 	/* Set fence offset */
162 	iwl_trans_write_prph(fwrt->trans,
163 			     RXF_LD_FENCE_OFFSET_ADDR + offset, 0x0);
164 
165 	/* Read FIFO */
166 	fifo_len /= sizeof(u32); /* Size in DWORDS */
167 	for (i = 0; i < fifo_len; i++)
168 		fifo_data[i] = iwl_trans_read_prph(fwrt->trans,
169 						 RXF_FIFO_RD_FENCE_INC +
170 						 offset);
171 	*dump_data = iwl_fw_error_next_data(*dump_data);
172 }
173 
174 static void iwl_fwrt_dump_txf(struct iwl_fw_runtime *fwrt,
175 			      struct iwl_fw_error_dump_data **dump_data,
176 			      int size, u32 offset, int fifo_num)
177 {
178 	struct iwl_fw_error_dump_fifo *fifo_hdr;
179 	u32 *fifo_data;
180 	u32 fifo_len;
181 	int i;
182 
183 	fifo_hdr = (void *)(*dump_data)->data;
184 	fifo_data = (void *)fifo_hdr->data;
185 	fifo_len = size;
186 
187 	/* No need to try to read the data if the length is 0 */
188 	if (fifo_len == 0)
189 		return;
190 
191 	/* Add a TLV for the FIFO */
192 	(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF);
193 	(*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
194 
195 	fifo_hdr->fifo_num = cpu_to_le32(fifo_num);
196 	fifo_hdr->available_bytes =
197 		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
198 						TXF_FIFO_ITEM_CNT + offset));
199 	fifo_hdr->wr_ptr =
200 		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
201 						TXF_WR_PTR + offset));
202 	fifo_hdr->rd_ptr =
203 		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
204 						TXF_RD_PTR + offset));
205 	fifo_hdr->fence_ptr =
206 		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
207 						TXF_FENCE_PTR + offset));
208 	fifo_hdr->fence_mode =
209 		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
210 						TXF_LOCK_FENCE + offset));
211 
212 	/* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */
213 	iwl_trans_write_prph(fwrt->trans, TXF_READ_MODIFY_ADDR + offset,
214 			     TXF_WR_PTR + offset);
215 
216 	/* Dummy-read to advance the read pointer to the head */
217 	iwl_trans_read_prph(fwrt->trans, TXF_READ_MODIFY_DATA + offset);
218 
219 	/* Read FIFO */
220 	fifo_len /= sizeof(u32); /* Size in DWORDS */
221 	for (i = 0; i < fifo_len; i++)
222 		fifo_data[i] = iwl_trans_read_prph(fwrt->trans,
223 						  TXF_READ_MODIFY_DATA +
224 						  offset);
225 	*dump_data = iwl_fw_error_next_data(*dump_data);
226 }
227 
228 static void iwl_fw_dump_rxf(struct iwl_fw_runtime *fwrt,
229 			    struct iwl_fw_error_dump_data **dump_data)
230 {
231 	struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg;
232 	unsigned long flags;
233 
234 	IWL_DEBUG_INFO(fwrt, "WRT RX FIFO dump\n");
235 
236 	if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
237 		return;
238 
239 	if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RXF)) {
240 		/* Pull RXF1 */
241 		iwl_fwrt_dump_rxf(fwrt, dump_data,
242 				  cfg->lmac[0].rxfifo1_size, 0, 0);
243 		/* Pull RXF2 */
244 		iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size,
245 				  RXF_DIFF_FROM_PREV, 1);
246 		/* Pull LMAC2 RXF1 */
247 		if (fwrt->smem_cfg.num_lmacs > 1)
248 			iwl_fwrt_dump_rxf(fwrt, dump_data,
249 					  cfg->lmac[1].rxfifo1_size,
250 					  LMAC2_PRPH_OFFSET, 2);
251 	}
252 
253 	iwl_trans_release_nic_access(fwrt->trans, &flags);
254 }
255 
256 static void iwl_fw_dump_txf(struct iwl_fw_runtime *fwrt,
257 			    struct iwl_fw_error_dump_data **dump_data)
258 {
259 	struct iwl_fw_error_dump_fifo *fifo_hdr;
260 	struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg;
261 	u32 *fifo_data;
262 	u32 fifo_len;
263 	unsigned long flags;
264 	int i, j;
265 
266 	IWL_DEBUG_INFO(fwrt, "WRT TX FIFO dump\n");
267 
268 	if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
269 		return;
270 
271 	if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_TXF)) {
272 		/* Pull TXF data from LMAC1 */
273 		for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) {
274 			/* Mark the number of TXF we're pulling now */
275 			iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM, i);
276 			iwl_fwrt_dump_txf(fwrt, dump_data,
277 					  cfg->lmac[0].txfifo_size[i], 0, i);
278 		}
279 
280 		/* Pull TXF data from LMAC2 */
281 		if (fwrt->smem_cfg.num_lmacs > 1) {
282 			for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries;
283 			     i++) {
284 				/* Mark the number of TXF we're pulling now */
285 				iwl_trans_write_prph(fwrt->trans,
286 						     TXF_LARC_NUM +
287 						     LMAC2_PRPH_OFFSET, i);
288 				iwl_fwrt_dump_txf(fwrt, dump_data,
289 						  cfg->lmac[1].txfifo_size[i],
290 						  LMAC2_PRPH_OFFSET,
291 						  i + cfg->num_txfifo_entries);
292 			}
293 		}
294 	}
295 
296 	if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_INTERNAL_TXF) &&
297 	    fw_has_capa(&fwrt->fw->ucode_capa,
298 			IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
299 		/* Pull UMAC internal TXF data from all TXFs */
300 		for (i = 0;
301 		     i < ARRAY_SIZE(fwrt->smem_cfg.internal_txfifo_size);
302 		     i++) {
303 			fifo_hdr = (void *)(*dump_data)->data;
304 			fifo_data = (void *)fifo_hdr->data;
305 			fifo_len = fwrt->smem_cfg.internal_txfifo_size[i];
306 
307 			/* No need to try to read the data if the length is 0 */
308 			if (fifo_len == 0)
309 				continue;
310 
311 			/* Add a TLV for the internal FIFOs */
312 			(*dump_data)->type =
313 				cpu_to_le32(IWL_FW_ERROR_DUMP_INTERNAL_TXF);
314 			(*dump_data)->len =
315 				cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
316 
317 			fifo_hdr->fifo_num = cpu_to_le32(i);
318 
319 			/* Mark the number of TXF we're pulling now */
320 			iwl_trans_write_prph(fwrt->trans, TXF_CPU2_NUM, i +
321 				fwrt->smem_cfg.num_txfifo_entries);
322 
323 			fifo_hdr->available_bytes =
324 				cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
325 								TXF_CPU2_FIFO_ITEM_CNT));
326 			fifo_hdr->wr_ptr =
327 				cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
328 								TXF_CPU2_WR_PTR));
329 			fifo_hdr->rd_ptr =
330 				cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
331 								TXF_CPU2_RD_PTR));
332 			fifo_hdr->fence_ptr =
333 				cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
334 								TXF_CPU2_FENCE_PTR));
335 			fifo_hdr->fence_mode =
336 				cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
337 								TXF_CPU2_LOCK_FENCE));
338 
339 			/* Set TXF_CPU2_READ_MODIFY_ADDR to TXF_CPU2_WR_PTR */
340 			iwl_trans_write_prph(fwrt->trans,
341 					     TXF_CPU2_READ_MODIFY_ADDR,
342 					     TXF_CPU2_WR_PTR);
343 
344 			/* Dummy-read to advance the read pointer to head */
345 			iwl_trans_read_prph(fwrt->trans,
346 					    TXF_CPU2_READ_MODIFY_DATA);
347 
348 			/* Read FIFO */
349 			fifo_len /= sizeof(u32); /* Size in DWORDS */
350 			for (j = 0; j < fifo_len; j++)
351 				fifo_data[j] =
352 					iwl_trans_read_prph(fwrt->trans,
353 							    TXF_CPU2_READ_MODIFY_DATA);
354 			*dump_data = iwl_fw_error_next_data(*dump_data);
355 		}
356 	}
357 
358 	iwl_trans_release_nic_access(fwrt->trans, &flags);
359 }
360 
361 #define IWL8260_ICCM_OFFSET		0x44000 /* Only for B-step */
362 #define IWL8260_ICCM_LEN		0xC000 /* Only for B-step */
363 
364 struct iwl_prph_range {
365 	u32 start, end;
366 };
367 
368 static const struct iwl_prph_range iwl_prph_dump_addr_comm[] = {
369 	{ .start = 0x00a00000, .end = 0x00a00000 },
370 	{ .start = 0x00a0000c, .end = 0x00a00024 },
371 	{ .start = 0x00a0002c, .end = 0x00a0003c },
372 	{ .start = 0x00a00410, .end = 0x00a00418 },
373 	{ .start = 0x00a00420, .end = 0x00a00420 },
374 	{ .start = 0x00a00428, .end = 0x00a00428 },
375 	{ .start = 0x00a00430, .end = 0x00a0043c },
376 	{ .start = 0x00a00444, .end = 0x00a00444 },
377 	{ .start = 0x00a004c0, .end = 0x00a004cc },
378 	{ .start = 0x00a004d8, .end = 0x00a004d8 },
379 	{ .start = 0x00a004e0, .end = 0x00a004f0 },
380 	{ .start = 0x00a00840, .end = 0x00a00840 },
381 	{ .start = 0x00a00850, .end = 0x00a00858 },
382 	{ .start = 0x00a01004, .end = 0x00a01008 },
383 	{ .start = 0x00a01010, .end = 0x00a01010 },
384 	{ .start = 0x00a01018, .end = 0x00a01018 },
385 	{ .start = 0x00a01024, .end = 0x00a01024 },
386 	{ .start = 0x00a0102c, .end = 0x00a01034 },
387 	{ .start = 0x00a0103c, .end = 0x00a01040 },
388 	{ .start = 0x00a01048, .end = 0x00a01094 },
389 	{ .start = 0x00a01c00, .end = 0x00a01c20 },
390 	{ .start = 0x00a01c58, .end = 0x00a01c58 },
391 	{ .start = 0x00a01c7c, .end = 0x00a01c7c },
392 	{ .start = 0x00a01c28, .end = 0x00a01c54 },
393 	{ .start = 0x00a01c5c, .end = 0x00a01c5c },
394 	{ .start = 0x00a01c60, .end = 0x00a01cdc },
395 	{ .start = 0x00a01ce0, .end = 0x00a01d0c },
396 	{ .start = 0x00a01d18, .end = 0x00a01d20 },
397 	{ .start = 0x00a01d2c, .end = 0x00a01d30 },
398 	{ .start = 0x00a01d40, .end = 0x00a01d5c },
399 	{ .start = 0x00a01d80, .end = 0x00a01d80 },
400 	{ .start = 0x00a01d98, .end = 0x00a01d9c },
401 	{ .start = 0x00a01da8, .end = 0x00a01da8 },
402 	{ .start = 0x00a01db8, .end = 0x00a01df4 },
403 	{ .start = 0x00a01dc0, .end = 0x00a01dfc },
404 	{ .start = 0x00a01e00, .end = 0x00a01e2c },
405 	{ .start = 0x00a01e40, .end = 0x00a01e60 },
406 	{ .start = 0x00a01e68, .end = 0x00a01e6c },
407 	{ .start = 0x00a01e74, .end = 0x00a01e74 },
408 	{ .start = 0x00a01e84, .end = 0x00a01e90 },
409 	{ .start = 0x00a01e9c, .end = 0x00a01ec4 },
410 	{ .start = 0x00a01ed0, .end = 0x00a01ee0 },
411 	{ .start = 0x00a01f00, .end = 0x00a01f1c },
412 	{ .start = 0x00a01f44, .end = 0x00a01ffc },
413 	{ .start = 0x00a02000, .end = 0x00a02048 },
414 	{ .start = 0x00a02068, .end = 0x00a020f0 },
415 	{ .start = 0x00a02100, .end = 0x00a02118 },
416 	{ .start = 0x00a02140, .end = 0x00a0214c },
417 	{ .start = 0x00a02168, .end = 0x00a0218c },
418 	{ .start = 0x00a021c0, .end = 0x00a021c0 },
419 	{ .start = 0x00a02400, .end = 0x00a02410 },
420 	{ .start = 0x00a02418, .end = 0x00a02420 },
421 	{ .start = 0x00a02428, .end = 0x00a0242c },
422 	{ .start = 0x00a02434, .end = 0x00a02434 },
423 	{ .start = 0x00a02440, .end = 0x00a02460 },
424 	{ .start = 0x00a02468, .end = 0x00a024b0 },
425 	{ .start = 0x00a024c8, .end = 0x00a024cc },
426 	{ .start = 0x00a02500, .end = 0x00a02504 },
427 	{ .start = 0x00a0250c, .end = 0x00a02510 },
428 	{ .start = 0x00a02540, .end = 0x00a02554 },
429 	{ .start = 0x00a02580, .end = 0x00a025f4 },
430 	{ .start = 0x00a02600, .end = 0x00a0260c },
431 	{ .start = 0x00a02648, .end = 0x00a02650 },
432 	{ .start = 0x00a02680, .end = 0x00a02680 },
433 	{ .start = 0x00a026c0, .end = 0x00a026d0 },
434 	{ .start = 0x00a02700, .end = 0x00a0270c },
435 	{ .start = 0x00a02804, .end = 0x00a02804 },
436 	{ .start = 0x00a02818, .end = 0x00a0281c },
437 	{ .start = 0x00a02c00, .end = 0x00a02db4 },
438 	{ .start = 0x00a02df4, .end = 0x00a02fb0 },
439 	{ .start = 0x00a03000, .end = 0x00a03014 },
440 	{ .start = 0x00a0301c, .end = 0x00a0302c },
441 	{ .start = 0x00a03034, .end = 0x00a03038 },
442 	{ .start = 0x00a03040, .end = 0x00a03048 },
443 	{ .start = 0x00a03060, .end = 0x00a03068 },
444 	{ .start = 0x00a03070, .end = 0x00a03074 },
445 	{ .start = 0x00a0307c, .end = 0x00a0307c },
446 	{ .start = 0x00a03080, .end = 0x00a03084 },
447 	{ .start = 0x00a0308c, .end = 0x00a03090 },
448 	{ .start = 0x00a03098, .end = 0x00a03098 },
449 	{ .start = 0x00a030a0, .end = 0x00a030a0 },
450 	{ .start = 0x00a030a8, .end = 0x00a030b4 },
451 	{ .start = 0x00a030bc, .end = 0x00a030bc },
452 	{ .start = 0x00a030c0, .end = 0x00a0312c },
453 	{ .start = 0x00a03c00, .end = 0x00a03c5c },
454 	{ .start = 0x00a04400, .end = 0x00a04454 },
455 	{ .start = 0x00a04460, .end = 0x00a04474 },
456 	{ .start = 0x00a044c0, .end = 0x00a044ec },
457 	{ .start = 0x00a04500, .end = 0x00a04504 },
458 	{ .start = 0x00a04510, .end = 0x00a04538 },
459 	{ .start = 0x00a04540, .end = 0x00a04548 },
460 	{ .start = 0x00a04560, .end = 0x00a0457c },
461 	{ .start = 0x00a04590, .end = 0x00a04598 },
462 	{ .start = 0x00a045c0, .end = 0x00a045f4 },
463 };
464 
465 static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = {
466 	{ .start = 0x00a05c00, .end = 0x00a05c18 },
467 	{ .start = 0x00a05400, .end = 0x00a056e8 },
468 	{ .start = 0x00a08000, .end = 0x00a098bc },
469 	{ .start = 0x00a02400, .end = 0x00a02758 },
470 };
471 
472 static void iwl_read_prph_block(struct iwl_trans *trans, u32 start,
473 				u32 len_bytes, __le32 *data)
474 {
475 	u32 i;
476 
477 	for (i = 0; i < len_bytes; i += 4)
478 		*data++ = cpu_to_le32(iwl_read_prph_no_grab(trans, start + i));
479 }
480 
481 static void iwl_dump_prph(struct iwl_trans *trans,
482 			  struct iwl_fw_error_dump_data **data,
483 			  const struct iwl_prph_range *iwl_prph_dump_addr,
484 			  u32 range_len)
485 {
486 	struct iwl_fw_error_dump_prph *prph;
487 	unsigned long flags;
488 	u32 i;
489 
490 	IWL_DEBUG_INFO(trans, "WRT PRPH dump\n");
491 
492 	if (!iwl_trans_grab_nic_access(trans, &flags))
493 		return;
494 
495 	for (i = 0; i < range_len; i++) {
496 		/* The range includes both boundaries */
497 		int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
498 			 iwl_prph_dump_addr[i].start + 4;
499 
500 		(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH);
501 		(*data)->len = cpu_to_le32(sizeof(*prph) +
502 					num_bytes_in_chunk);
503 		prph = (void *)(*data)->data;
504 		prph->prph_start = cpu_to_le32(iwl_prph_dump_addr[i].start);
505 
506 		iwl_read_prph_block(trans, iwl_prph_dump_addr[i].start,
507 				    /* our range is inclusive, hence + 4 */
508 				    iwl_prph_dump_addr[i].end -
509 				    iwl_prph_dump_addr[i].start + 4,
510 				    (void *)prph->data);
511 
512 		*data = iwl_fw_error_next_data(*data);
513 	}
514 
515 	iwl_trans_release_nic_access(trans, &flags);
516 }
517 
518 /*
519  * alloc_sgtable - allocates scallerlist table in the given size,
520  * fills it with pages and returns it
521  * @size: the size (in bytes) of the table
522 */
523 static struct scatterlist *alloc_sgtable(int size)
524 {
525 	int alloc_size, nents, i;
526 	struct page *new_page;
527 	struct scatterlist *iter;
528 	struct scatterlist *table;
529 
530 	nents = DIV_ROUND_UP(size, PAGE_SIZE);
531 	table = kcalloc(nents, sizeof(*table), GFP_KERNEL);
532 	if (!table)
533 		return NULL;
534 	sg_init_table(table, nents);
535 	iter = table;
536 	for_each_sg(table, iter, sg_nents(table), i) {
537 		new_page = alloc_page(GFP_KERNEL);
538 		if (!new_page) {
539 			/* release all previous allocated pages in the table */
540 			iter = table;
541 			for_each_sg(table, iter, sg_nents(table), i) {
542 				new_page = sg_page(iter);
543 				if (new_page)
544 					__free_page(new_page);
545 			}
546 			return NULL;
547 		}
548 		alloc_size = min_t(int, size, PAGE_SIZE);
549 		size -= PAGE_SIZE;
550 		sg_set_page(iter, new_page, alloc_size, 0);
551 	}
552 	return table;
553 }
554 
555 static int iwl_fw_get_prph_len(struct iwl_fw_runtime *fwrt)
556 {
557 	u32 prph_len = 0;
558 	int i;
559 
560 	for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm);
561 	     i++) {
562 		/* The range includes both boundaries */
563 		int num_bytes_in_chunk =
564 			iwl_prph_dump_addr_comm[i].end -
565 			iwl_prph_dump_addr_comm[i].start + 4;
566 
567 		prph_len += sizeof(struct iwl_fw_error_dump_data) +
568 			sizeof(struct iwl_fw_error_dump_prph) +
569 			num_bytes_in_chunk;
570 	}
571 
572 	if (fwrt->trans->cfg->mq_rx_supported) {
573 		for (i = 0; i <
574 			ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) {
575 			/* The range includes both boundaries */
576 			int num_bytes_in_chunk =
577 				iwl_prph_dump_addr_9000[i].end -
578 				iwl_prph_dump_addr_9000[i].start + 4;
579 
580 			prph_len += sizeof(struct iwl_fw_error_dump_data) +
581 				sizeof(struct iwl_fw_error_dump_prph) +
582 				num_bytes_in_chunk;
583 		}
584 	}
585 	return prph_len;
586 }
587 
588 static void iwl_fw_dump_mem(struct iwl_fw_runtime *fwrt,
589 			    struct iwl_fw_error_dump_data **dump_data,
590 			    u32 len, u32 ofs, u32 type)
591 {
592 	struct iwl_fw_error_dump_mem *dump_mem;
593 
594 	if (!len)
595 		return;
596 
597 	(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
598 	(*dump_data)->len = cpu_to_le32(len + sizeof(*dump_mem));
599 	dump_mem = (void *)(*dump_data)->data;
600 	dump_mem->type = cpu_to_le32(type);
601 	dump_mem->offset = cpu_to_le32(ofs);
602 	iwl_trans_read_mem_bytes(fwrt->trans, ofs, dump_mem->data, len);
603 	*dump_data = iwl_fw_error_next_data(*dump_data);
604 
605 	IWL_DEBUG_INFO(fwrt, "WRT memory dump. Type=%u\n", dump_mem->type);
606 }
607 
608 #define ADD_LEN(len, item_len, const_len) \
609 	do {size_t item = item_len; len += (!!item) * const_len + item; } \
610 	while (0)
611 
612 static int iwl_fw_rxf_len(struct iwl_fw_runtime *fwrt,
613 			  struct iwl_fwrt_shared_mem_cfg *mem_cfg)
614 {
615 	size_t hdr_len = sizeof(struct iwl_fw_error_dump_data) +
616 			 sizeof(struct iwl_fw_error_dump_fifo);
617 	u32 fifo_len = 0;
618 	int i;
619 
620 	if (!iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RXF))
621 		return 0;
622 
623 	/* Count RXF2 size */
624 	ADD_LEN(fifo_len, mem_cfg->rxfifo2_size, hdr_len);
625 
626 	/* Count RXF1 sizes */
627 	for (i = 0; i < mem_cfg->num_lmacs; i++)
628 		ADD_LEN(fifo_len, mem_cfg->lmac[i].rxfifo1_size, hdr_len);
629 
630 	return fifo_len;
631 }
632 
633 static int iwl_fw_txf_len(struct iwl_fw_runtime *fwrt,
634 			  struct iwl_fwrt_shared_mem_cfg *mem_cfg)
635 {
636 	size_t hdr_len = sizeof(struct iwl_fw_error_dump_data) +
637 			 sizeof(struct iwl_fw_error_dump_fifo);
638 	u32 fifo_len = 0;
639 	int i;
640 
641 	if (!iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_TXF))
642 		goto dump_internal_txf;
643 
644 	/* Count TXF sizes */
645 	for (i = 0; i < mem_cfg->num_lmacs; i++) {
646 		int j;
647 
648 		for (j = 0; j < mem_cfg->num_txfifo_entries; j++)
649 			ADD_LEN(fifo_len, mem_cfg->lmac[i].txfifo_size[j],
650 				hdr_len);
651 	}
652 
653 dump_internal_txf:
654 	if (!(iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_INTERNAL_TXF) &&
655 	      fw_has_capa(&fwrt->fw->ucode_capa,
656 			  IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)))
657 		goto out;
658 
659 	for (i = 0; i < ARRAY_SIZE(mem_cfg->internal_txfifo_size); i++)
660 		ADD_LEN(fifo_len, mem_cfg->internal_txfifo_size[i], hdr_len);
661 
662 out:
663 	return fifo_len;
664 }
665 
666 static void iwl_dump_paging(struct iwl_fw_runtime *fwrt,
667 			    struct iwl_fw_error_dump_data **data)
668 {
669 	int i;
670 
671 	IWL_DEBUG_INFO(fwrt, "WRT paging dump\n");
672 	for (i = 1; i < fwrt->num_of_paging_blk + 1; i++) {
673 		struct iwl_fw_error_dump_paging *paging;
674 		struct page *pages =
675 			fwrt->fw_paging_db[i].fw_paging_block;
676 		dma_addr_t addr = fwrt->fw_paging_db[i].fw_paging_phys;
677 
678 		(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
679 		(*data)->len = cpu_to_le32(sizeof(*paging) +
680 					     PAGING_BLOCK_SIZE);
681 		paging =  (void *)(*data)->data;
682 		paging->index = cpu_to_le32(i);
683 		dma_sync_single_for_cpu(fwrt->trans->dev, addr,
684 					PAGING_BLOCK_SIZE,
685 					DMA_BIDIRECTIONAL);
686 		memcpy(paging->data, page_address(pages),
687 		       PAGING_BLOCK_SIZE);
688 		(*data) = iwl_fw_error_next_data(*data);
689 	}
690 }
691 
692 static struct iwl_fw_error_dump_file *
693 _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
694 		   struct iwl_fw_dump_ptrs *fw_error_dump)
695 {
696 	struct iwl_fw_error_dump_file *dump_file;
697 	struct iwl_fw_error_dump_data *dump_data;
698 	struct iwl_fw_error_dump_info *dump_info;
699 	struct iwl_fw_error_dump_smem_cfg *dump_smem_cfg;
700 	struct iwl_fw_error_dump_trigger_desc *dump_trig;
701 	u32 sram_len, sram_ofs;
702 	const struct iwl_fw_dbg_mem_seg_tlv *fw_mem = fwrt->fw->dbg.mem_tlv;
703 	struct iwl_fwrt_shared_mem_cfg *mem_cfg = &fwrt->smem_cfg;
704 	u32 file_len, fifo_len = 0, prph_len = 0, radio_len = 0;
705 	u32 smem_len = fwrt->fw->dbg.n_mem_tlv ? 0 : fwrt->trans->cfg->smem_len;
706 	u32 sram2_len = fwrt->fw->dbg.n_mem_tlv ?
707 				0 : fwrt->trans->cfg->dccm2_len;
708 	int i;
709 
710 	/* SRAM - include stack CCM if driver knows the values for it */
711 	if (!fwrt->trans->cfg->dccm_offset || !fwrt->trans->cfg->dccm_len) {
712 		const struct fw_img *img;
713 
714 		img = &fwrt->fw->img[fwrt->cur_fw_img];
715 		sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
716 		sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
717 	} else {
718 		sram_ofs = fwrt->trans->cfg->dccm_offset;
719 		sram_len = fwrt->trans->cfg->dccm_len;
720 	}
721 
722 	/* reading RXF/TXF sizes */
723 	if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) {
724 		fifo_len = iwl_fw_rxf_len(fwrt, mem_cfg);
725 		fifo_len += iwl_fw_txf_len(fwrt, mem_cfg);
726 
727 		/* Make room for PRPH registers */
728 		if (!fwrt->trans->cfg->gen2 &&
729 		   iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PRPH))
730 			prph_len += iwl_fw_get_prph_len(fwrt);
731 
732 		if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 &&
733 		    iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RADIO_REG))
734 			radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ;
735 	}
736 
737 	file_len = sizeof(*dump_file) + fifo_len + prph_len + radio_len;
738 
739 	if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_DEV_FW_INFO))
740 		file_len += sizeof(*dump_data) + sizeof(*dump_info);
741 	if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM_CFG))
742 		file_len += sizeof(*dump_data) + sizeof(*dump_smem_cfg);
743 
744 	if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM)) {
745 		size_t hdr_len = sizeof(*dump_data) +
746 				 sizeof(struct iwl_fw_error_dump_mem);
747 
748 		/* Dump SRAM only if no mem_tlvs */
749 		if (!fwrt->fw->dbg.n_mem_tlv)
750 			ADD_LEN(file_len, sram_len, hdr_len);
751 
752 		/* Make room for all mem types that exist */
753 		ADD_LEN(file_len, smem_len, hdr_len);
754 		ADD_LEN(file_len, sram2_len, hdr_len);
755 
756 		for (i = 0; i < fwrt->fw->dbg.n_mem_tlv; i++)
757 			ADD_LEN(file_len, le32_to_cpu(fw_mem[i].len), hdr_len);
758 	}
759 
760 	/* Make room for fw's virtual image pages, if it exists */
761 	if (iwl_fw_dbg_is_paging_enabled(fwrt))
762 		file_len += fwrt->num_of_paging_blk *
763 			(sizeof(*dump_data) +
764 			 sizeof(struct iwl_fw_error_dump_paging) +
765 			 PAGING_BLOCK_SIZE);
766 
767 	if (iwl_fw_dbg_is_d3_debug_enabled(fwrt) && fwrt->dump.d3_debug_data) {
768 		file_len += sizeof(*dump_data) +
769 			fwrt->trans->cfg->d3_debug_data_length * 2;
770 	}
771 
772 	/* If we only want a monitor dump, reset the file length */
773 	if (fwrt->dump.monitor_only) {
774 		file_len = sizeof(*dump_file) + sizeof(*dump_data) * 2 +
775 			   sizeof(*dump_info) + sizeof(*dump_smem_cfg);
776 	}
777 
778 	if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) &&
779 	    fwrt->dump.desc)
780 		file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
781 			    fwrt->dump.desc->len;
782 
783 	dump_file = vzalloc(file_len);
784 	if (!dump_file)
785 		return NULL;
786 
787 	fw_error_dump->fwrt_ptr = dump_file;
788 
789 	dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
790 	dump_data = (void *)dump_file->data;
791 
792 	if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_DEV_FW_INFO)) {
793 		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
794 		dump_data->len = cpu_to_le32(sizeof(*dump_info));
795 		dump_info = (void *)dump_data->data;
796 		dump_info->device_family =
797 			fwrt->trans->cfg->device_family ==
798 			IWL_DEVICE_FAMILY_7000 ?
799 				cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) :
800 				cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8);
801 		dump_info->hw_step =
802 			cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev));
803 		memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable,
804 		       sizeof(dump_info->fw_human_readable));
805 		strncpy(dump_info->dev_human_readable, fwrt->trans->cfg->name,
806 			sizeof(dump_info->dev_human_readable) - 1);
807 		strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name,
808 			sizeof(dump_info->bus_human_readable) - 1);
809 
810 		dump_data = iwl_fw_error_next_data(dump_data);
811 	}
812 
813 	if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM_CFG)) {
814 		/* Dump shared memory configuration */
815 		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG);
816 		dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg));
817 		dump_smem_cfg = (void *)dump_data->data;
818 		dump_smem_cfg->num_lmacs = cpu_to_le32(mem_cfg->num_lmacs);
819 		dump_smem_cfg->num_txfifo_entries =
820 			cpu_to_le32(mem_cfg->num_txfifo_entries);
821 		for (i = 0; i < MAX_NUM_LMAC; i++) {
822 			int j;
823 			u32 *txf_size = mem_cfg->lmac[i].txfifo_size;
824 
825 			for (j = 0; j < TX_FIFO_MAX_NUM; j++)
826 				dump_smem_cfg->lmac[i].txfifo_size[j] =
827 					cpu_to_le32(txf_size[j]);
828 			dump_smem_cfg->lmac[i].rxfifo1_size =
829 				cpu_to_le32(mem_cfg->lmac[i].rxfifo1_size);
830 		}
831 		dump_smem_cfg->rxfifo2_size =
832 			cpu_to_le32(mem_cfg->rxfifo2_size);
833 		dump_smem_cfg->internal_txfifo_addr =
834 			cpu_to_le32(mem_cfg->internal_txfifo_addr);
835 		for (i = 0; i < TX_FIFO_INTERNAL_MAX_NUM; i++) {
836 			dump_smem_cfg->internal_txfifo_size[i] =
837 				cpu_to_le32(mem_cfg->internal_txfifo_size[i]);
838 		}
839 
840 		dump_data = iwl_fw_error_next_data(dump_data);
841 	}
842 
843 	/* We only dump the FIFOs if the FW is in error state */
844 	if (fifo_len) {
845 		iwl_fw_dump_rxf(fwrt, &dump_data);
846 		iwl_fw_dump_txf(fwrt, &dump_data);
847 		if (radio_len)
848 			iwl_read_radio_regs(fwrt, &dump_data);
849 	}
850 
851 	if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) &&
852 	    fwrt->dump.desc) {
853 		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
854 		dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
855 					     fwrt->dump.desc->len);
856 		dump_trig = (void *)dump_data->data;
857 		memcpy(dump_trig, &fwrt->dump.desc->trig_desc,
858 		       sizeof(*dump_trig) + fwrt->dump.desc->len);
859 
860 		dump_data = iwl_fw_error_next_data(dump_data);
861 	}
862 
863 	/* In case we only want monitor dump, skip to dump trasport data */
864 	if (fwrt->dump.monitor_only)
865 		goto out;
866 
867 	if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM)) {
868 		const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem =
869 			fwrt->fw->dbg.mem_tlv;
870 
871 		if (!fwrt->fw->dbg.n_mem_tlv)
872 			iwl_fw_dump_mem(fwrt, &dump_data, sram_len, sram_ofs,
873 					IWL_FW_ERROR_DUMP_MEM_SRAM);
874 
875 		for (i = 0; i < fwrt->fw->dbg.n_mem_tlv; i++) {
876 			u32 len = le32_to_cpu(fw_dbg_mem[i].len);
877 			u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs);
878 
879 			iwl_fw_dump_mem(fwrt, &dump_data, len, ofs,
880 					le32_to_cpu(fw_dbg_mem[i].data_type));
881 		}
882 
883 		iwl_fw_dump_mem(fwrt, &dump_data, smem_len,
884 				fwrt->trans->cfg->smem_offset,
885 				IWL_FW_ERROR_DUMP_MEM_SMEM);
886 
887 		iwl_fw_dump_mem(fwrt, &dump_data, sram2_len,
888 				fwrt->trans->cfg->dccm2_offset,
889 				IWL_FW_ERROR_DUMP_MEM_SRAM);
890 	}
891 
892 	if (iwl_fw_dbg_is_d3_debug_enabled(fwrt) && fwrt->dump.d3_debug_data) {
893 		u32 addr = fwrt->trans->cfg->d3_debug_data_base_addr;
894 		size_t data_size = fwrt->trans->cfg->d3_debug_data_length;
895 
896 		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_D3_DEBUG_DATA);
897 		dump_data->len = cpu_to_le32(data_size * 2);
898 
899 		memcpy(dump_data->data, fwrt->dump.d3_debug_data, data_size);
900 
901 		kfree(fwrt->dump.d3_debug_data);
902 		fwrt->dump.d3_debug_data = NULL;
903 
904 		iwl_trans_read_mem_bytes(fwrt->trans, addr,
905 					 dump_data->data + data_size,
906 					 data_size);
907 
908 		dump_data = iwl_fw_error_next_data(dump_data);
909 	}
910 
911 	/* Dump fw's virtual image */
912 	if (iwl_fw_dbg_is_paging_enabled(fwrt))
913 		iwl_dump_paging(fwrt, &dump_data);
914 
915 	if (prph_len) {
916 		iwl_dump_prph(fwrt->trans, &dump_data,
917 			      iwl_prph_dump_addr_comm,
918 			      ARRAY_SIZE(iwl_prph_dump_addr_comm));
919 
920 		if (fwrt->trans->cfg->mq_rx_supported)
921 			iwl_dump_prph(fwrt->trans, &dump_data,
922 				      iwl_prph_dump_addr_9000,
923 				      ARRAY_SIZE(iwl_prph_dump_addr_9000));
924 	}
925 
926 out:
927 	dump_file->file_len = cpu_to_le32(file_len);
928 	return dump_file;
929 }
930 
931 void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
932 {
933 	struct iwl_fw_dump_ptrs *fw_error_dump;
934 	struct iwl_fw_error_dump_file *dump_file;
935 	struct scatterlist *sg_dump_data;
936 	u32 file_len;
937 	u32 dump_mask = fwrt->fw->dbg.dump_mask;
938 
939 	IWL_DEBUG_INFO(fwrt, "WRT dump start\n");
940 
941 	/* there's no point in fw dump if the bus is dead */
942 	if (test_bit(STATUS_TRANS_DEAD, &fwrt->trans->status)) {
943 		IWL_ERR(fwrt, "Skip fw error dump since bus is dead\n");
944 		goto out;
945 	}
946 
947 	fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
948 	if (!fw_error_dump)
949 		goto out;
950 
951 	dump_file = _iwl_fw_error_dump(fwrt, fw_error_dump);
952 	if (!dump_file) {
953 		kfree(fw_error_dump);
954 		goto out;
955 	}
956 
957 	if (fwrt->dump.monitor_only)
958 		dump_mask &= IWL_FW_ERROR_DUMP_FW_MONITOR;
959 
960 	fw_error_dump->trans_ptr = iwl_trans_dump_data(fwrt->trans, dump_mask);
961 	file_len = le32_to_cpu(dump_file->file_len);
962 	fw_error_dump->fwrt_len = file_len;
963 	if (fw_error_dump->trans_ptr) {
964 		file_len += fw_error_dump->trans_ptr->len;
965 		dump_file->file_len = cpu_to_le32(file_len);
966 	}
967 
968 	sg_dump_data = alloc_sgtable(file_len);
969 	if (sg_dump_data) {
970 		sg_pcopy_from_buffer(sg_dump_data,
971 				     sg_nents(sg_dump_data),
972 				     fw_error_dump->fwrt_ptr,
973 				     fw_error_dump->fwrt_len, 0);
974 		if (fw_error_dump->trans_ptr)
975 			sg_pcopy_from_buffer(sg_dump_data,
976 					     sg_nents(sg_dump_data),
977 					     fw_error_dump->trans_ptr->data,
978 					     fw_error_dump->trans_ptr->len,
979 					     fw_error_dump->fwrt_len);
980 		dev_coredumpsg(fwrt->trans->dev, sg_dump_data, file_len,
981 			       GFP_KERNEL);
982 	}
983 	vfree(fw_error_dump->fwrt_ptr);
984 	vfree(fw_error_dump->trans_ptr);
985 	kfree(fw_error_dump);
986 
987 out:
988 	iwl_fw_free_dump_desc(fwrt);
989 	clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
990 	IWL_DEBUG_INFO(fwrt, "WRT dump done\n");
991 }
992 IWL_EXPORT_SYMBOL(iwl_fw_error_dump);
993 
994 const struct iwl_fw_dump_desc iwl_dump_desc_assert = {
995 	.trig_desc = {
996 		.type = cpu_to_le32(FW_DBG_TRIGGER_FW_ASSERT),
997 	},
998 };
999 IWL_EXPORT_SYMBOL(iwl_dump_desc_assert);
1000 
1001 void iwl_fw_assert_error_dump(struct iwl_fw_runtime *fwrt)
1002 {
1003 	IWL_INFO(fwrt, "error dump due to fw assert\n");
1004 	fwrt->dump.desc = &iwl_dump_desc_assert;
1005 	iwl_fw_error_dump(fwrt);
1006 }
1007 IWL_EXPORT_SYMBOL(iwl_fw_assert_error_dump);
1008 
1009 void iwl_fw_alive_error_dump(struct iwl_fw_runtime *fwrt)
1010 {
1011 	struct iwl_fw_dump_desc *iwl_dump_desc_no_alive =
1012 		kmalloc(sizeof(*iwl_dump_desc_no_alive), GFP_KERNEL);
1013 
1014 	if (!iwl_dump_desc_no_alive)
1015 		return;
1016 
1017 	iwl_dump_desc_no_alive->trig_desc.type =
1018 		cpu_to_le32(FW_DBG_TRIGGER_NO_ALIVE);
1019 	iwl_dump_desc_no_alive->len = 0;
1020 
1021 	if (WARN_ON(fwrt->dump.desc))
1022 		iwl_fw_free_dump_desc(fwrt);
1023 
1024 	IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n",
1025 		 FW_DBG_TRIGGER_NO_ALIVE);
1026 
1027 	fwrt->dump.desc = iwl_dump_desc_no_alive;
1028 	iwl_fw_error_dump(fwrt);
1029 	clear_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &fwrt->status);
1030 }
1031 IWL_EXPORT_SYMBOL(iwl_fw_alive_error_dump);
1032 
1033 int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
1034 			    const struct iwl_fw_dump_desc *desc,
1035 			    bool monitor_only,
1036 			    unsigned int delay)
1037 {
1038 	/*
1039 	 * If the loading of the FW completed successfully, the next step is to
1040 	 * get the SMEM config data. Thus, if fwrt->smem_cfg.num_lmacs is non
1041 	 * zero, the FW was already loaded successully. If the state is "NO_FW"
1042 	 * in such a case - exit, since FW may be dead. Otherwise, we
1043 	 * can try to collect the data, since FW might just not be fully
1044 	 * loaded (no "ALIVE" yet), and the debug data is accessible.
1045 	 *
1046 	 * Corner case: got the FW alive but crashed before getting the SMEM
1047 	 *	config. In such a case, due to HW access problems, we might
1048 	 *	collect garbage.
1049 	 */
1050 	if (fwrt->trans->state == IWL_TRANS_NO_FW &&
1051 	    fwrt->smem_cfg.num_lmacs)
1052 		return -EIO;
1053 
1054 	if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status) ||
1055 	    test_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &fwrt->status))
1056 		return -EBUSY;
1057 
1058 	if (WARN_ON(fwrt->dump.desc))
1059 		iwl_fw_free_dump_desc(fwrt);
1060 
1061 	IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n",
1062 		 le32_to_cpu(desc->trig_desc.type));
1063 
1064 	fwrt->dump.desc = desc;
1065 	fwrt->dump.monitor_only = monitor_only;
1066 
1067 	schedule_delayed_work(&fwrt->dump.wk, delay);
1068 
1069 	return 0;
1070 }
1071 IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_desc);
1072 
1073 int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
1074 		       enum iwl_fw_dbg_trigger trig,
1075 		       const char *str, size_t len,
1076 		       struct iwl_fw_dbg_trigger_tlv *trigger)
1077 {
1078 	struct iwl_fw_dump_desc *desc;
1079 	unsigned int delay = 0;
1080 	bool monitor_only = false;
1081 
1082 	if (trigger) {
1083 		u16 occurrences = le16_to_cpu(trigger->occurrences) - 1;
1084 
1085 		if (!le16_to_cpu(trigger->occurrences))
1086 			return 0;
1087 
1088 		if (trigger->flags & IWL_FW_DBG_FORCE_RESTART) {
1089 			IWL_WARN(fwrt, "Force restart: trigger %d fired.\n",
1090 				 trig);
1091 			iwl_force_nmi(fwrt->trans);
1092 			return 0;
1093 		}
1094 
1095 		trigger->occurrences = cpu_to_le16(occurrences);
1096 		delay = le16_to_cpu(trigger->trig_dis_ms);
1097 		monitor_only = trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY;
1098 	}
1099 
1100 	desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC);
1101 	if (!desc)
1102 		return -ENOMEM;
1103 
1104 
1105 	desc->len = len;
1106 	desc->trig_desc.type = cpu_to_le32(trig);
1107 	memcpy(desc->trig_desc.data, str, len);
1108 
1109 	return iwl_fw_dbg_collect_desc(fwrt, desc, monitor_only, delay);
1110 }
1111 IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect);
1112 
1113 int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
1114 			    struct iwl_fw_dbg_trigger_tlv *trigger,
1115 			    const char *fmt, ...)
1116 {
1117 	int ret, len = 0;
1118 	char buf[64];
1119 
1120 	if (fwrt->trans->ini_valid)
1121 		return 0;
1122 
1123 	if (fmt) {
1124 		va_list ap;
1125 
1126 		buf[sizeof(buf) - 1] = '\0';
1127 
1128 		va_start(ap, fmt);
1129 		vsnprintf(buf, sizeof(buf), fmt, ap);
1130 		va_end(ap);
1131 
1132 		/* check for truncation */
1133 		if (WARN_ON_ONCE(buf[sizeof(buf) - 1]))
1134 			buf[sizeof(buf) - 1] = '\0';
1135 
1136 		len = strlen(buf) + 1;
1137 	}
1138 
1139 	ret = iwl_fw_dbg_collect(fwrt, le32_to_cpu(trigger->id), buf, len,
1140 				 trigger);
1141 
1142 	if (ret)
1143 		return ret;
1144 
1145 	return 0;
1146 }
1147 IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_trig);
1148 
1149 int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id)
1150 {
1151 	u8 *ptr;
1152 	int ret;
1153 	int i;
1154 
1155 	if (WARN_ONCE(conf_id >= ARRAY_SIZE(fwrt->fw->dbg.conf_tlv),
1156 		      "Invalid configuration %d\n", conf_id))
1157 		return -EINVAL;
1158 
1159 	/* EARLY START - firmware's configuration is hard coded */
1160 	if ((!fwrt->fw->dbg.conf_tlv[conf_id] ||
1161 	     !fwrt->fw->dbg.conf_tlv[conf_id]->num_of_hcmds) &&
1162 	    conf_id == FW_DBG_START_FROM_ALIVE)
1163 		return 0;
1164 
1165 	if (!fwrt->fw->dbg.conf_tlv[conf_id])
1166 		return -EINVAL;
1167 
1168 	if (fwrt->dump.conf != FW_DBG_INVALID)
1169 		IWL_WARN(fwrt, "FW already configured (%d) - re-configuring\n",
1170 			 fwrt->dump.conf);
1171 
1172 	/* Send all HCMDs for configuring the FW debug */
1173 	ptr = (void *)&fwrt->fw->dbg.conf_tlv[conf_id]->hcmd;
1174 	for (i = 0; i < fwrt->fw->dbg.conf_tlv[conf_id]->num_of_hcmds; i++) {
1175 		struct iwl_fw_dbg_conf_hcmd *cmd = (void *)ptr;
1176 		struct iwl_host_cmd hcmd = {
1177 			.id = cmd->id,
1178 			.len = { le16_to_cpu(cmd->len), },
1179 			.data = { cmd->data, },
1180 		};
1181 
1182 		ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
1183 		if (ret)
1184 			return ret;
1185 
1186 		ptr += sizeof(*cmd);
1187 		ptr += le16_to_cpu(cmd->len);
1188 	}
1189 
1190 	fwrt->dump.conf = conf_id;
1191 
1192 	return 0;
1193 }
1194 IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf);
1195 
1196 /* this function assumes dump_start was called beforehand and dump_end will be
1197  * called afterwards
1198  */
1199 void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt)
1200 {
1201 	struct iwl_fw_dbg_params params = {0};
1202 
1203 	if (!test_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status))
1204 		return;
1205 
1206 	if (fwrt->ops && fwrt->ops->fw_running &&
1207 	    !fwrt->ops->fw_running(fwrt->ops_ctx)) {
1208 		IWL_ERR(fwrt, "Firmware not running - cannot dump error\n");
1209 		iwl_fw_free_dump_desc(fwrt);
1210 		clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
1211 		return;
1212 	}
1213 
1214 	iwl_fw_dbg_stop_recording(fwrt, &params);
1215 
1216 	iwl_fw_error_dump(fwrt);
1217 
1218 	/* start recording again if the firmware is not crashed */
1219 	if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) &&
1220 	    fwrt->fw->dbg.dest_tlv) {
1221 		/* wait before we collect the data till the DBGC stop */
1222 		udelay(500);
1223 		iwl_fw_dbg_restart_recording(fwrt, &params);
1224 	}
1225 }
1226 IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_sync);
1227 
1228 void iwl_fw_error_dump_wk(struct work_struct *work)
1229 {
1230 	struct iwl_fw_runtime *fwrt =
1231 		container_of(work, struct iwl_fw_runtime, dump.wk.work);
1232 
1233 	if (fwrt->ops && fwrt->ops->dump_start &&
1234 	    fwrt->ops->dump_start(fwrt->ops_ctx))
1235 		return;
1236 
1237 	iwl_fw_dbg_collect_sync(fwrt);
1238 
1239 	if (fwrt->ops && fwrt->ops->dump_end)
1240 		fwrt->ops->dump_end(fwrt->ops_ctx);
1241 }
1242 
1243 void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt)
1244 {
1245 	const struct iwl_cfg *cfg = fwrt->trans->cfg;
1246 
1247 	if (!iwl_fw_dbg_is_d3_debug_enabled(fwrt))
1248 		return;
1249 
1250 	if (!fwrt->dump.d3_debug_data) {
1251 		fwrt->dump.d3_debug_data = kmalloc(cfg->d3_debug_data_length,
1252 						   GFP_KERNEL);
1253 		if (!fwrt->dump.d3_debug_data) {
1254 			IWL_ERR(fwrt,
1255 				"failed to allocate memory for D3 debug data\n");
1256 			return;
1257 		}
1258 	}
1259 
1260 	/* if the buffer holds previous debug data it is overwritten */
1261 	iwl_trans_read_mem_bytes(fwrt->trans, cfg->d3_debug_data_base_addr,
1262 				 fwrt->dump.d3_debug_data,
1263 				 cfg->d3_debug_data_length);
1264 }
1265 IWL_EXPORT_SYMBOL(iwl_fw_dbg_read_d3_debug_data);
1266 
1267 static void
1268 iwl_fw_dbg_buffer_allocation(struct iwl_fw_runtime *fwrt,
1269 			     struct iwl_fw_ini_allocation_tlv *alloc)
1270 {
1271 	struct iwl_trans *trans = fwrt->trans;
1272 	struct iwl_continuous_record_cmd cont_rec = {};
1273 	struct iwl_buffer_allocation_cmd *cmd = (void *)&cont_rec.pad[0];
1274 	struct iwl_host_cmd hcmd = {
1275 		.id = LDBG_CONFIG_CMD,
1276 		.flags = CMD_ASYNC,
1277 		.data[0] = &cont_rec,
1278 		.len[0] = sizeof(cont_rec),
1279 	};
1280 	void *virtual_addr = NULL;
1281 	u32 size = le32_to_cpu(alloc->size);
1282 	dma_addr_t phys_addr;
1283 
1284 	cont_rec.record_mode.enable_recording = cpu_to_le16(BUFFER_ALLOCATION);
1285 
1286 	if (!trans->num_blocks &&
1287 	    le32_to_cpu(alloc->buffer_location) !=
1288 	    IWL_FW_INI_LOCATION_DRAM_PATH)
1289 		return;
1290 
1291 	virtual_addr = dma_alloc_coherent(fwrt->trans->dev, size,
1292 					  &phys_addr, GFP_KERNEL);
1293 
1294 	/* TODO: alloc fragments if needed */
1295 	if (!virtual_addr)
1296 		IWL_ERR(fwrt, "Failed to allocate debug memory\n");
1297 
1298 	if (WARN_ON_ONCE(trans->num_blocks == ARRAY_SIZE(trans->fw_mon)))
1299 		return;
1300 
1301 	trans->fw_mon[trans->num_blocks].block = virtual_addr;
1302 	trans->fw_mon[trans->num_blocks].physical = phys_addr;
1303 	trans->fw_mon[trans->num_blocks].size = size;
1304 	trans->num_blocks++;
1305 
1306 	IWL_DEBUG_FW(trans, "Allocated debug block of size %d\n", size);
1307 
1308 	/* First block is assigned via registers / context info */
1309 	if (trans->num_blocks == 1)
1310 		return;
1311 
1312 	cmd->num_frags = cpu_to_le32(1);
1313 	cmd->fragments[0].address = cpu_to_le64(phys_addr);
1314 	cmd->fragments[0].size = alloc->size;
1315 	cmd->allocation_id = alloc->allocation_id;
1316 	cmd->buffer_location = alloc->buffer_location;
1317 
1318 	iwl_trans_send_cmd(trans, &hcmd);
1319 }
1320 
1321 static void iwl_fw_dbg_send_hcmd(struct iwl_fw_runtime *fwrt,
1322 				 struct iwl_ucode_tlv *tlv)
1323 {
1324 	struct iwl_fw_ini_hcmd_tlv *hcmd_tlv = (void *)&tlv->data[0];
1325 	struct iwl_fw_ini_hcmd *data = &hcmd_tlv->hcmd;
1326 	u16 len = le32_to_cpu(tlv->length) - sizeof(*hcmd_tlv);
1327 
1328 	struct iwl_host_cmd hcmd = {
1329 		.id = WIDE_ID(data->group, data->id),
1330 		.len = { len, },
1331 		.data = { data->data, },
1332 	};
1333 
1334 	iwl_trans_send_cmd(fwrt->trans, &hcmd);
1335 }
1336 
1337 static void iwl_fw_dbg_update_regions(struct iwl_fw_runtime *fwrt,
1338 				      struct iwl_fw_ini_region_tlv *tlv,
1339 				      bool ext, enum iwl_fw_ini_apply_point pnt)
1340 {
1341 	void *iter = (void *)tlv->region_config;
1342 	int i, size = le32_to_cpu(tlv->num_regions);
1343 
1344 	for (i = 0; i < size; i++) {
1345 		struct iwl_fw_ini_region_cfg *reg = iter;
1346 		int id = le32_to_cpu(reg->region_id);
1347 		struct iwl_fw_ini_active_regs *active;
1348 
1349 		if (WARN_ON(id >= ARRAY_SIZE(fwrt->dump.active_regs)))
1350 			break;
1351 
1352 		active = &fwrt->dump.active_regs[id];
1353 
1354 		if (ext && active->apply_point == pnt)
1355 			IWL_WARN(fwrt->trans,
1356 				 "External region TLV overrides FW default %x\n",
1357 				 id);
1358 
1359 		IWL_DEBUG_FW(fwrt,
1360 			     "%s: apply point %d, activating region ID %d\n",
1361 			     __func__, pnt, id);
1362 
1363 		active->reg = reg;
1364 		active->apply_point = pnt;
1365 
1366 		if (le32_to_cpu(reg->region_type) !=
1367 		    IWL_FW_INI_REGION_DRAM_BUFFER)
1368 			iter += le32_to_cpu(reg->num_regions) * sizeof(__le32);
1369 
1370 		iter += sizeof(*reg);
1371 	}
1372 }
1373 
1374 static void iwl_fw_dbg_update_triggers(struct iwl_fw_runtime *fwrt,
1375 				       struct iwl_fw_ini_trigger_tlv *tlv,
1376 				       bool ext,
1377 				       enum iwl_fw_ini_apply_point apply_point)
1378 {
1379 	int i, size = le32_to_cpu(tlv->num_triggers);
1380 	void *iter = (void *)tlv->trigger_config;
1381 
1382 	for (i = 0; i < size; i++) {
1383 		struct iwl_fw_ini_trigger *trig = iter;
1384 		struct iwl_fw_ini_active_triggers *active;
1385 		int id = le32_to_cpu(trig->trigger_id);
1386 		u32 num;
1387 
1388 		if (WARN_ON(id >= ARRAY_SIZE(fwrt->dump.active_trigs)))
1389 			break;
1390 
1391 		active = &fwrt->dump.active_trigs[id];
1392 
1393 		if (active->apply_point != apply_point) {
1394 			active->conf = NULL;
1395 			active->conf_ext = NULL;
1396 		}
1397 
1398 		num = le32_to_cpu(trig->num_regions);
1399 
1400 		if (ext && active->apply_point == apply_point) {
1401 			num += le32_to_cpu(active->conf->num_regions);
1402 			if (trig->ignore_default) {
1403 				active->conf_ext = active->conf;
1404 				active->conf = trig;
1405 			} else {
1406 				active->conf_ext = trig;
1407 			}
1408 		} else {
1409 			active->conf = trig;
1410 		}
1411 
1412 		iter += sizeof(*trig) +
1413 			le32_to_cpu(trig->num_regions) * sizeof(__le32);
1414 
1415 		active->active = num;
1416 		active->apply_point = apply_point;
1417 	}
1418 }
1419 
1420 static void _iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt,
1421 				    struct iwl_apply_point_data *data,
1422 				    enum iwl_fw_ini_apply_point pnt,
1423 				    bool ext)
1424 {
1425 	void *iter = data->data;
1426 
1427 	while (iter && iter < data->data + data->size) {
1428 		struct iwl_ucode_tlv *tlv = iter;
1429 		void *ini_tlv = (void *)tlv->data;
1430 		u32 type = le32_to_cpu(tlv->type);
1431 
1432 		switch (type) {
1433 		case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
1434 			iwl_fw_dbg_buffer_allocation(fwrt, ini_tlv);
1435 			break;
1436 		case IWL_UCODE_TLV_TYPE_HCMD:
1437 			if (pnt < IWL_FW_INI_APPLY_AFTER_ALIVE) {
1438 				IWL_ERR(fwrt,
1439 					"Invalid apply point %x for host command\n",
1440 					pnt);
1441 				goto next;
1442 			}
1443 			iwl_fw_dbg_send_hcmd(fwrt, tlv);
1444 			break;
1445 		case IWL_UCODE_TLV_TYPE_REGIONS:
1446 			iwl_fw_dbg_update_regions(fwrt, ini_tlv, ext, pnt);
1447 			break;
1448 		case IWL_UCODE_TLV_TYPE_TRIGGERS:
1449 			iwl_fw_dbg_update_triggers(fwrt, ini_tlv, ext, pnt);
1450 			break;
1451 		case IWL_UCODE_TLV_TYPE_DEBUG_FLOW:
1452 			break;
1453 		default:
1454 			WARN_ONCE(1, "Invalid TLV %x for apply point\n", type);
1455 			break;
1456 		}
1457 next:
1458 		iter += sizeof(*tlv) + le32_to_cpu(tlv->length);
1459 	}
1460 }
1461 
1462 void iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt,
1463 			    enum iwl_fw_ini_apply_point apply_point)
1464 {
1465 	void *data = &fwrt->trans->apply_points[apply_point];
1466 
1467 	_iwl_fw_dbg_apply_point(fwrt, data, apply_point, false);
1468 
1469 	data = &fwrt->trans->apply_points_ext[apply_point];
1470 	_iwl_fw_dbg_apply_point(fwrt, data, apply_point, true);
1471 }
1472 IWL_EXPORT_SYMBOL(iwl_fw_dbg_apply_point);
1473