1 /****************************************************************************** 2 * 3 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * redistributing this file, you may do so under either license. 5 * 6 * GPL LICENSE SUMMARY 7 * 8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of version 2 of the GNU General Public License as 14 * published by the Free Software Foundation. 15 * 16 * This program is distributed in the hope that it will be useful, but 17 * WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; 23 * 24 * The full GNU General Public License is included in this distribution 25 * in the file called COPYING. 26 * 27 * Contact Information: 28 * Intel Linux Wireless <linuxwifi@intel.com> 29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 30 * 31 * BSD LICENSE 32 * 33 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. 34 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 35 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH 36 * All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 42 * * Redistributions of source code must retain the above copyright 43 * notice, this list of conditions and the following disclaimer. 44 * * Redistributions in binary form must reproduce the above copyright 45 * notice, this list of conditions and the following disclaimer in 46 * the documentation and/or other materials provided with the 47 * distribution. 48 * * Neither the name Intel Corporation nor the names of its 49 * contributors may be used to endorse or promote products derived 50 * from this software without specific prior written permission. 51 * 52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 63 * 64 *****************************************************************************/ 65 #include <linux/devcoredump.h> 66 #include "iwl-drv.h" 67 #include "runtime.h" 68 #include "dbg.h" 69 #include "iwl-io.h" 70 #include "iwl-prph.h" 71 #include "iwl-csr.h" 72 73 /** 74 * struct iwl_fw_dump_ptrs - set of pointers needed for the fw-error-dump 75 * 76 * @fwrt_ptr: pointer to the buffer coming from fwrt 77 * @trans_ptr: pointer to struct %iwl_trans_dump_data which contains the 78 * transport's data. 79 * @trans_len: length of the valid data in trans_ptr 80 * @fwrt_len: length of the valid data in fwrt_ptr 81 */ 82 struct iwl_fw_dump_ptrs { 83 struct iwl_trans_dump_data *trans_ptr; 84 void *fwrt_ptr; 85 u32 fwrt_len; 86 }; 87 88 #define RADIO_REG_MAX_READ 0x2ad 89 static void iwl_read_radio_regs(struct iwl_fw_runtime *fwrt, 90 struct iwl_fw_error_dump_data **dump_data) 91 { 92 u8 *pos = (void *)(*dump_data)->data; 93 unsigned long flags; 94 int i; 95 96 if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) 97 return; 98 99 (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RADIO_REG); 100 (*dump_data)->len = cpu_to_le32(RADIO_REG_MAX_READ); 101 102 for (i = 0; i < RADIO_REG_MAX_READ; i++) { 103 u32 rd_cmd = RADIO_RSP_RD_CMD; 104 105 rd_cmd |= i << RADIO_RSP_ADDR_POS; 106 iwl_write_prph_no_grab(fwrt->trans, RSP_RADIO_CMD, rd_cmd); 107 *pos = (u8)iwl_read_prph_no_grab(fwrt->trans, RSP_RADIO_RDDAT); 108 109 pos++; 110 } 111 112 *dump_data = iwl_fw_error_next_data(*dump_data); 113 114 iwl_trans_release_nic_access(fwrt->trans, &flags); 115 } 116 117 static void iwl_fwrt_dump_rxf(struct iwl_fw_runtime *fwrt, 118 struct iwl_fw_error_dump_data **dump_data, 119 int size, u32 offset, int fifo_num) 120 { 121 struct iwl_fw_error_dump_fifo *fifo_hdr; 122 u32 *fifo_data; 123 u32 fifo_len; 124 int i; 125 126 fifo_hdr = (void *)(*dump_data)->data; 127 fifo_data = (void *)fifo_hdr->data; 128 fifo_len = size; 129 130 /* No need to try to read the data if the length is 0 */ 131 if (fifo_len == 0) 132 return; 133 134 /* Add a TLV for the RXF */ 135 (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF); 136 (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); 137 138 fifo_hdr->fifo_num = cpu_to_le32(fifo_num); 139 fifo_hdr->available_bytes = 140 cpu_to_le32(iwl_trans_read_prph(fwrt->trans, 141 RXF_RD_D_SPACE + offset)); 142 fifo_hdr->wr_ptr = 143 cpu_to_le32(iwl_trans_read_prph(fwrt->trans, 144 RXF_RD_WR_PTR + offset)); 145 fifo_hdr->rd_ptr = 146 cpu_to_le32(iwl_trans_read_prph(fwrt->trans, 147 RXF_RD_RD_PTR + offset)); 148 fifo_hdr->fence_ptr = 149 cpu_to_le32(iwl_trans_read_prph(fwrt->trans, 150 RXF_RD_FENCE_PTR + offset)); 151 fifo_hdr->fence_mode = 152 cpu_to_le32(iwl_trans_read_prph(fwrt->trans, 153 RXF_SET_FENCE_MODE + offset)); 154 155 /* Lock fence */ 156 iwl_trans_write_prph(fwrt->trans, RXF_SET_FENCE_MODE + offset, 0x1); 157 /* Set fence pointer to the same place like WR pointer */ 158 iwl_trans_write_prph(fwrt->trans, RXF_LD_WR2FENCE + offset, 0x1); 159 /* Set fence offset */ 160 iwl_trans_write_prph(fwrt->trans, 161 RXF_LD_FENCE_OFFSET_ADDR + offset, 0x0); 162 163 /* Read FIFO */ 164 fifo_len /= sizeof(u32); /* Size in DWORDS */ 165 for (i = 0; i < fifo_len; i++) 166 fifo_data[i] = iwl_trans_read_prph(fwrt->trans, 167 RXF_FIFO_RD_FENCE_INC + 168 offset); 169 *dump_data = iwl_fw_error_next_data(*dump_data); 170 } 171 172 static void iwl_fwrt_dump_txf(struct iwl_fw_runtime *fwrt, 173 struct iwl_fw_error_dump_data **dump_data, 174 int size, u32 offset, int fifo_num) 175 { 176 struct iwl_fw_error_dump_fifo *fifo_hdr; 177 u32 *fifo_data; 178 u32 fifo_len; 179 int i; 180 181 fifo_hdr = (void *)(*dump_data)->data; 182 fifo_data = (void *)fifo_hdr->data; 183 fifo_len = size; 184 185 /* No need to try to read the data if the length is 0 */ 186 if (fifo_len == 0) 187 return; 188 189 /* Add a TLV for the FIFO */ 190 (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF); 191 (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); 192 193 fifo_hdr->fifo_num = cpu_to_le32(fifo_num); 194 fifo_hdr->available_bytes = 195 cpu_to_le32(iwl_trans_read_prph(fwrt->trans, 196 TXF_FIFO_ITEM_CNT + offset)); 197 fifo_hdr->wr_ptr = 198 cpu_to_le32(iwl_trans_read_prph(fwrt->trans, 199 TXF_WR_PTR + offset)); 200 fifo_hdr->rd_ptr = 201 cpu_to_le32(iwl_trans_read_prph(fwrt->trans, 202 TXF_RD_PTR + offset)); 203 fifo_hdr->fence_ptr = 204 cpu_to_le32(iwl_trans_read_prph(fwrt->trans, 205 TXF_FENCE_PTR + offset)); 206 fifo_hdr->fence_mode = 207 cpu_to_le32(iwl_trans_read_prph(fwrt->trans, 208 TXF_LOCK_FENCE + offset)); 209 210 /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */ 211 iwl_trans_write_prph(fwrt->trans, TXF_READ_MODIFY_ADDR + offset, 212 TXF_WR_PTR + offset); 213 214 /* Dummy-read to advance the read pointer to the head */ 215 iwl_trans_read_prph(fwrt->trans, TXF_READ_MODIFY_DATA + offset); 216 217 /* Read FIFO */ 218 fifo_len /= sizeof(u32); /* Size in DWORDS */ 219 for (i = 0; i < fifo_len; i++) 220 fifo_data[i] = iwl_trans_read_prph(fwrt->trans, 221 TXF_READ_MODIFY_DATA + 222 offset); 223 *dump_data = iwl_fw_error_next_data(*dump_data); 224 } 225 226 static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt, 227 struct iwl_fw_error_dump_data **dump_data) 228 { 229 struct iwl_fw_error_dump_fifo *fifo_hdr; 230 struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg; 231 u32 *fifo_data; 232 u32 fifo_len; 233 unsigned long flags; 234 int i, j; 235 236 if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) 237 return; 238 239 /* Pull RXF1 */ 240 iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[0].rxfifo1_size, 0, 0); 241 /* Pull RXF2 */ 242 iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size, 243 RXF_DIFF_FROM_PREV, 1); 244 /* Pull LMAC2 RXF1 */ 245 if (fwrt->smem_cfg.num_lmacs > 1) 246 iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[1].rxfifo1_size, 247 LMAC2_PRPH_OFFSET, 2); 248 249 /* Pull TXF data from LMAC1 */ 250 for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) { 251 /* Mark the number of TXF we're pulling now */ 252 iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM, i); 253 iwl_fwrt_dump_txf(fwrt, dump_data, cfg->lmac[0].txfifo_size[i], 254 0, i); 255 } 256 257 /* Pull TXF data from LMAC2 */ 258 if (fwrt->smem_cfg.num_lmacs > 1) { 259 for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) { 260 /* Mark the number of TXF we're pulling now */ 261 iwl_trans_write_prph(fwrt->trans, 262 TXF_LARC_NUM + LMAC2_PRPH_OFFSET, 263 i); 264 iwl_fwrt_dump_txf(fwrt, dump_data, 265 cfg->lmac[1].txfifo_size[i], 266 LMAC2_PRPH_OFFSET, 267 i + cfg->num_txfifo_entries); 268 } 269 } 270 271 if (fw_has_capa(&fwrt->fw->ucode_capa, 272 IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { 273 /* Pull UMAC internal TXF data from all TXFs */ 274 for (i = 0; 275 i < ARRAY_SIZE(fwrt->smem_cfg.internal_txfifo_size); 276 i++) { 277 fifo_hdr = (void *)(*dump_data)->data; 278 fifo_data = (void *)fifo_hdr->data; 279 fifo_len = fwrt->smem_cfg.internal_txfifo_size[i]; 280 281 /* No need to try to read the data if the length is 0 */ 282 if (fifo_len == 0) 283 continue; 284 285 /* Add a TLV for the internal FIFOs */ 286 (*dump_data)->type = 287 cpu_to_le32(IWL_FW_ERROR_DUMP_INTERNAL_TXF); 288 (*dump_data)->len = 289 cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); 290 291 fifo_hdr->fifo_num = cpu_to_le32(i); 292 293 /* Mark the number of TXF we're pulling now */ 294 iwl_trans_write_prph(fwrt->trans, TXF_CPU2_NUM, i + 295 fwrt->smem_cfg.num_txfifo_entries); 296 297 fifo_hdr->available_bytes = 298 cpu_to_le32(iwl_trans_read_prph(fwrt->trans, 299 TXF_CPU2_FIFO_ITEM_CNT)); 300 fifo_hdr->wr_ptr = 301 cpu_to_le32(iwl_trans_read_prph(fwrt->trans, 302 TXF_CPU2_WR_PTR)); 303 fifo_hdr->rd_ptr = 304 cpu_to_le32(iwl_trans_read_prph(fwrt->trans, 305 TXF_CPU2_RD_PTR)); 306 fifo_hdr->fence_ptr = 307 cpu_to_le32(iwl_trans_read_prph(fwrt->trans, 308 TXF_CPU2_FENCE_PTR)); 309 fifo_hdr->fence_mode = 310 cpu_to_le32(iwl_trans_read_prph(fwrt->trans, 311 TXF_CPU2_LOCK_FENCE)); 312 313 /* Set TXF_CPU2_READ_MODIFY_ADDR to TXF_CPU2_WR_PTR */ 314 iwl_trans_write_prph(fwrt->trans, 315 TXF_CPU2_READ_MODIFY_ADDR, 316 TXF_CPU2_WR_PTR); 317 318 /* Dummy-read to advance the read pointer to head */ 319 iwl_trans_read_prph(fwrt->trans, 320 TXF_CPU2_READ_MODIFY_DATA); 321 322 /* Read FIFO */ 323 fifo_len /= sizeof(u32); /* Size in DWORDS */ 324 for (j = 0; j < fifo_len; j++) 325 fifo_data[j] = 326 iwl_trans_read_prph(fwrt->trans, 327 TXF_CPU2_READ_MODIFY_DATA); 328 *dump_data = iwl_fw_error_next_data(*dump_data); 329 } 330 } 331 332 iwl_trans_release_nic_access(fwrt->trans, &flags); 333 } 334 335 #define IWL8260_ICCM_OFFSET 0x44000 /* Only for B-step */ 336 #define IWL8260_ICCM_LEN 0xC000 /* Only for B-step */ 337 338 struct iwl_prph_range { 339 u32 start, end; 340 }; 341 342 static const struct iwl_prph_range iwl_prph_dump_addr_comm[] = { 343 { .start = 0x00a00000, .end = 0x00a00000 }, 344 { .start = 0x00a0000c, .end = 0x00a00024 }, 345 { .start = 0x00a0002c, .end = 0x00a0003c }, 346 { .start = 0x00a00410, .end = 0x00a00418 }, 347 { .start = 0x00a00420, .end = 0x00a00420 }, 348 { .start = 0x00a00428, .end = 0x00a00428 }, 349 { .start = 0x00a00430, .end = 0x00a0043c }, 350 { .start = 0x00a00444, .end = 0x00a00444 }, 351 { .start = 0x00a004c0, .end = 0x00a004cc }, 352 { .start = 0x00a004d8, .end = 0x00a004d8 }, 353 { .start = 0x00a004e0, .end = 0x00a004f0 }, 354 { .start = 0x00a00840, .end = 0x00a00840 }, 355 { .start = 0x00a00850, .end = 0x00a00858 }, 356 { .start = 0x00a01004, .end = 0x00a01008 }, 357 { .start = 0x00a01010, .end = 0x00a01010 }, 358 { .start = 0x00a01018, .end = 0x00a01018 }, 359 { .start = 0x00a01024, .end = 0x00a01024 }, 360 { .start = 0x00a0102c, .end = 0x00a01034 }, 361 { .start = 0x00a0103c, .end = 0x00a01040 }, 362 { .start = 0x00a01048, .end = 0x00a01094 }, 363 { .start = 0x00a01c00, .end = 0x00a01c20 }, 364 { .start = 0x00a01c58, .end = 0x00a01c58 }, 365 { .start = 0x00a01c7c, .end = 0x00a01c7c }, 366 { .start = 0x00a01c28, .end = 0x00a01c54 }, 367 { .start = 0x00a01c5c, .end = 0x00a01c5c }, 368 { .start = 0x00a01c60, .end = 0x00a01cdc }, 369 { .start = 0x00a01ce0, .end = 0x00a01d0c }, 370 { .start = 0x00a01d18, .end = 0x00a01d20 }, 371 { .start = 0x00a01d2c, .end = 0x00a01d30 }, 372 { .start = 0x00a01d40, .end = 0x00a01d5c }, 373 { .start = 0x00a01d80, .end = 0x00a01d80 }, 374 { .start = 0x00a01d98, .end = 0x00a01d9c }, 375 { .start = 0x00a01da8, .end = 0x00a01da8 }, 376 { .start = 0x00a01db8, .end = 0x00a01df4 }, 377 { .start = 0x00a01dc0, .end = 0x00a01dfc }, 378 { .start = 0x00a01e00, .end = 0x00a01e2c }, 379 { .start = 0x00a01e40, .end = 0x00a01e60 }, 380 { .start = 0x00a01e68, .end = 0x00a01e6c }, 381 { .start = 0x00a01e74, .end = 0x00a01e74 }, 382 { .start = 0x00a01e84, .end = 0x00a01e90 }, 383 { .start = 0x00a01e9c, .end = 0x00a01ec4 }, 384 { .start = 0x00a01ed0, .end = 0x00a01ee0 }, 385 { .start = 0x00a01f00, .end = 0x00a01f1c }, 386 { .start = 0x00a01f44, .end = 0x00a01ffc }, 387 { .start = 0x00a02000, .end = 0x00a02048 }, 388 { .start = 0x00a02068, .end = 0x00a020f0 }, 389 { .start = 0x00a02100, .end = 0x00a02118 }, 390 { .start = 0x00a02140, .end = 0x00a0214c }, 391 { .start = 0x00a02168, .end = 0x00a0218c }, 392 { .start = 0x00a021c0, .end = 0x00a021c0 }, 393 { .start = 0x00a02400, .end = 0x00a02410 }, 394 { .start = 0x00a02418, .end = 0x00a02420 }, 395 { .start = 0x00a02428, .end = 0x00a0242c }, 396 { .start = 0x00a02434, .end = 0x00a02434 }, 397 { .start = 0x00a02440, .end = 0x00a02460 }, 398 { .start = 0x00a02468, .end = 0x00a024b0 }, 399 { .start = 0x00a024c8, .end = 0x00a024cc }, 400 { .start = 0x00a02500, .end = 0x00a02504 }, 401 { .start = 0x00a0250c, .end = 0x00a02510 }, 402 { .start = 0x00a02540, .end = 0x00a02554 }, 403 { .start = 0x00a02580, .end = 0x00a025f4 }, 404 { .start = 0x00a02600, .end = 0x00a0260c }, 405 { .start = 0x00a02648, .end = 0x00a02650 }, 406 { .start = 0x00a02680, .end = 0x00a02680 }, 407 { .start = 0x00a026c0, .end = 0x00a026d0 }, 408 { .start = 0x00a02700, .end = 0x00a0270c }, 409 { .start = 0x00a02804, .end = 0x00a02804 }, 410 { .start = 0x00a02818, .end = 0x00a0281c }, 411 { .start = 0x00a02c00, .end = 0x00a02db4 }, 412 { .start = 0x00a02df4, .end = 0x00a02fb0 }, 413 { .start = 0x00a03000, .end = 0x00a03014 }, 414 { .start = 0x00a0301c, .end = 0x00a0302c }, 415 { .start = 0x00a03034, .end = 0x00a03038 }, 416 { .start = 0x00a03040, .end = 0x00a03048 }, 417 { .start = 0x00a03060, .end = 0x00a03068 }, 418 { .start = 0x00a03070, .end = 0x00a03074 }, 419 { .start = 0x00a0307c, .end = 0x00a0307c }, 420 { .start = 0x00a03080, .end = 0x00a03084 }, 421 { .start = 0x00a0308c, .end = 0x00a03090 }, 422 { .start = 0x00a03098, .end = 0x00a03098 }, 423 { .start = 0x00a030a0, .end = 0x00a030a0 }, 424 { .start = 0x00a030a8, .end = 0x00a030b4 }, 425 { .start = 0x00a030bc, .end = 0x00a030bc }, 426 { .start = 0x00a030c0, .end = 0x00a0312c }, 427 { .start = 0x00a03c00, .end = 0x00a03c5c }, 428 { .start = 0x00a04400, .end = 0x00a04454 }, 429 { .start = 0x00a04460, .end = 0x00a04474 }, 430 { .start = 0x00a044c0, .end = 0x00a044ec }, 431 { .start = 0x00a04500, .end = 0x00a04504 }, 432 { .start = 0x00a04510, .end = 0x00a04538 }, 433 { .start = 0x00a04540, .end = 0x00a04548 }, 434 { .start = 0x00a04560, .end = 0x00a0457c }, 435 { .start = 0x00a04590, .end = 0x00a04598 }, 436 { .start = 0x00a045c0, .end = 0x00a045f4 }, 437 }; 438 439 static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = { 440 { .start = 0x00a05c00, .end = 0x00a05c18 }, 441 { .start = 0x00a05400, .end = 0x00a056e8 }, 442 { .start = 0x00a08000, .end = 0x00a098bc }, 443 { .start = 0x00a02400, .end = 0x00a02758 }, 444 }; 445 446 static void _iwl_read_prph_block(struct iwl_trans *trans, u32 start, 447 u32 len_bytes, __le32 *data) 448 { 449 u32 i; 450 451 for (i = 0; i < len_bytes; i += 4) 452 *data++ = cpu_to_le32(iwl_read_prph_no_grab(trans, start + i)); 453 } 454 455 static bool iwl_read_prph_block(struct iwl_trans *trans, u32 start, 456 u32 len_bytes, __le32 *data) 457 { 458 unsigned long flags; 459 bool success = false; 460 461 if (iwl_trans_grab_nic_access(trans, &flags)) { 462 success = true; 463 _iwl_read_prph_block(trans, start, len_bytes, data); 464 iwl_trans_release_nic_access(trans, &flags); 465 } 466 467 return success; 468 } 469 470 static void iwl_dump_prph(struct iwl_trans *trans, 471 struct iwl_fw_error_dump_data **data, 472 const struct iwl_prph_range *iwl_prph_dump_addr, 473 u32 range_len) 474 { 475 struct iwl_fw_error_dump_prph *prph; 476 unsigned long flags; 477 u32 i; 478 479 if (!iwl_trans_grab_nic_access(trans, &flags)) 480 return; 481 482 for (i = 0; i < range_len; i++) { 483 /* The range includes both boundaries */ 484 int num_bytes_in_chunk = iwl_prph_dump_addr[i].end - 485 iwl_prph_dump_addr[i].start + 4; 486 487 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH); 488 (*data)->len = cpu_to_le32(sizeof(*prph) + 489 num_bytes_in_chunk); 490 prph = (void *)(*data)->data; 491 prph->prph_start = cpu_to_le32(iwl_prph_dump_addr[i].start); 492 493 _iwl_read_prph_block(trans, iwl_prph_dump_addr[i].start, 494 /* our range is inclusive, hence + 4 */ 495 iwl_prph_dump_addr[i].end - 496 iwl_prph_dump_addr[i].start + 4, 497 (void *)prph->data); 498 499 *data = iwl_fw_error_next_data(*data); 500 } 501 502 iwl_trans_release_nic_access(trans, &flags); 503 } 504 505 /* 506 * alloc_sgtable - allocates scallerlist table in the given size, 507 * fills it with pages and returns it 508 * @size: the size (in bytes) of the table 509 */ 510 static struct scatterlist *alloc_sgtable(int size) 511 { 512 int alloc_size, nents, i; 513 struct page *new_page; 514 struct scatterlist *iter; 515 struct scatterlist *table; 516 517 nents = DIV_ROUND_UP(size, PAGE_SIZE); 518 table = kcalloc(nents, sizeof(*table), GFP_KERNEL); 519 if (!table) 520 return NULL; 521 sg_init_table(table, nents); 522 iter = table; 523 for_each_sg(table, iter, sg_nents(table), i) { 524 new_page = alloc_page(GFP_KERNEL); 525 if (!new_page) { 526 /* release all previous allocated pages in the table */ 527 iter = table; 528 for_each_sg(table, iter, sg_nents(table), i) { 529 new_page = sg_page(iter); 530 if (new_page) 531 __free_page(new_page); 532 } 533 return NULL; 534 } 535 alloc_size = min_t(int, size, PAGE_SIZE); 536 size -= PAGE_SIZE; 537 sg_set_page(iter, new_page, alloc_size, 0); 538 } 539 return table; 540 } 541 542 void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) 543 { 544 struct iwl_fw_error_dump_file *dump_file; 545 struct iwl_fw_error_dump_data *dump_data; 546 struct iwl_fw_error_dump_info *dump_info; 547 struct iwl_fw_error_dump_mem *dump_mem; 548 struct iwl_fw_error_dump_smem_cfg *dump_smem_cfg; 549 struct iwl_fw_error_dump_trigger_desc *dump_trig; 550 struct iwl_fw_dump_ptrs *fw_error_dump; 551 struct scatterlist *sg_dump_data; 552 u32 sram_len, sram_ofs; 553 const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem = fwrt->fw->dbg_mem_tlv; 554 struct iwl_fwrt_shared_mem_cfg *mem_cfg = &fwrt->smem_cfg; 555 u32 file_len, fifo_data_len = 0, prph_len = 0, radio_len = 0; 556 u32 smem_len = fwrt->fw->n_dbg_mem_tlv ? 0 : fwrt->trans->cfg->smem_len; 557 u32 sram2_len = fwrt->fw->n_dbg_mem_tlv ? 558 0 : fwrt->trans->cfg->dccm2_len; 559 bool monitor_dump_only = false; 560 int i; 561 562 /* there's no point in fw dump if the bus is dead */ 563 if (test_bit(STATUS_TRANS_DEAD, &fwrt->trans->status)) { 564 IWL_ERR(fwrt, "Skip fw error dump since bus is dead\n"); 565 goto out; 566 } 567 568 if (fwrt->dump.trig && 569 fwrt->dump.trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY) 570 monitor_dump_only = true; 571 572 fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL); 573 if (!fw_error_dump) 574 goto out; 575 576 /* SRAM - include stack CCM if driver knows the values for it */ 577 if (!fwrt->trans->cfg->dccm_offset || !fwrt->trans->cfg->dccm_len) { 578 const struct fw_img *img; 579 580 img = &fwrt->fw->img[fwrt->cur_fw_img]; 581 sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset; 582 sram_len = img->sec[IWL_UCODE_SECTION_DATA].len; 583 } else { 584 sram_ofs = fwrt->trans->cfg->dccm_offset; 585 sram_len = fwrt->trans->cfg->dccm_len; 586 } 587 588 /* reading RXF/TXF sizes */ 589 if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) { 590 fifo_data_len = 0; 591 592 /* Count RXF2 size */ 593 if (mem_cfg->rxfifo2_size) { 594 /* Add header info */ 595 fifo_data_len += mem_cfg->rxfifo2_size + 596 sizeof(*dump_data) + 597 sizeof(struct iwl_fw_error_dump_fifo); 598 } 599 600 /* Count RXF1 sizes */ 601 for (i = 0; i < mem_cfg->num_lmacs; i++) { 602 if (!mem_cfg->lmac[i].rxfifo1_size) 603 continue; 604 605 /* Add header info */ 606 fifo_data_len += mem_cfg->lmac[i].rxfifo1_size + 607 sizeof(*dump_data) + 608 sizeof(struct iwl_fw_error_dump_fifo); 609 } 610 611 /* Count TXF sizes */ 612 for (i = 0; i < mem_cfg->num_lmacs; i++) { 613 int j; 614 615 for (j = 0; j < mem_cfg->num_txfifo_entries; j++) { 616 if (!mem_cfg->lmac[i].txfifo_size[j]) 617 continue; 618 619 /* Add header info */ 620 fifo_data_len += 621 mem_cfg->lmac[i].txfifo_size[j] + 622 sizeof(*dump_data) + 623 sizeof(struct iwl_fw_error_dump_fifo); 624 } 625 } 626 627 if (fw_has_capa(&fwrt->fw->ucode_capa, 628 IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { 629 for (i = 0; 630 i < ARRAY_SIZE(mem_cfg->internal_txfifo_size); 631 i++) { 632 if (!mem_cfg->internal_txfifo_size[i]) 633 continue; 634 635 /* Add header info */ 636 fifo_data_len += 637 mem_cfg->internal_txfifo_size[i] + 638 sizeof(*dump_data) + 639 sizeof(struct iwl_fw_error_dump_fifo); 640 } 641 } 642 643 /* Make room for PRPH registers */ 644 if (!fwrt->trans->cfg->gen2) { 645 for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm); 646 i++) { 647 /* The range includes both boundaries */ 648 int num_bytes_in_chunk = 649 iwl_prph_dump_addr_comm[i].end - 650 iwl_prph_dump_addr_comm[i].start + 4; 651 652 prph_len += sizeof(*dump_data) + 653 sizeof(struct iwl_fw_error_dump_prph) + 654 num_bytes_in_chunk; 655 } 656 } 657 658 if (!fwrt->trans->cfg->gen2 && 659 fwrt->trans->cfg->mq_rx_supported) { 660 for (i = 0; i < 661 ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) { 662 /* The range includes both boundaries */ 663 int num_bytes_in_chunk = 664 iwl_prph_dump_addr_9000[i].end - 665 iwl_prph_dump_addr_9000[i].start + 4; 666 667 prph_len += sizeof(*dump_data) + 668 sizeof(struct iwl_fw_error_dump_prph) + 669 num_bytes_in_chunk; 670 } 671 } 672 673 if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) 674 radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ; 675 } 676 677 file_len = sizeof(*dump_file) + 678 sizeof(*dump_data) * 3 + 679 sizeof(*dump_smem_cfg) + 680 fifo_data_len + 681 prph_len + 682 radio_len + 683 sizeof(*dump_info); 684 685 /* Make room for the SMEM, if it exists */ 686 if (smem_len) 687 file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len; 688 689 /* Make room for the secondary SRAM, if it exists */ 690 if (sram2_len) 691 file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len; 692 693 /* Make room for MEM segments */ 694 for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) { 695 file_len += sizeof(*dump_data) + sizeof(*dump_mem) + 696 le32_to_cpu(fw_dbg_mem[i].len); 697 } 698 699 /* Make room for fw's virtual image pages, if it exists */ 700 if (!fwrt->trans->cfg->gen2 && 701 fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size && 702 fwrt->fw_paging_db[0].fw_paging_block) 703 file_len += fwrt->num_of_paging_blk * 704 (sizeof(*dump_data) + 705 sizeof(struct iwl_fw_error_dump_paging) + 706 PAGING_BLOCK_SIZE); 707 708 /* If we only want a monitor dump, reset the file length */ 709 if (monitor_dump_only) { 710 file_len = sizeof(*dump_file) + sizeof(*dump_data) * 2 + 711 sizeof(*dump_info) + sizeof(*dump_smem_cfg); 712 } 713 714 if (fwrt->dump.desc) 715 file_len += sizeof(*dump_data) + sizeof(*dump_trig) + 716 fwrt->dump.desc->len; 717 718 if (!fwrt->fw->n_dbg_mem_tlv) 719 file_len += sram_len + sizeof(*dump_mem); 720 721 dump_file = vzalloc(file_len); 722 if (!dump_file) { 723 kfree(fw_error_dump); 724 goto out; 725 } 726 727 fw_error_dump->fwrt_ptr = dump_file; 728 729 dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER); 730 dump_data = (void *)dump_file->data; 731 732 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO); 733 dump_data->len = cpu_to_le32(sizeof(*dump_info)); 734 dump_info = (void *)dump_data->data; 735 dump_info->device_family = 736 fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 ? 737 cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) : 738 cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8); 739 dump_info->hw_step = cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev)); 740 memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable, 741 sizeof(dump_info->fw_human_readable)); 742 strncpy(dump_info->dev_human_readable, fwrt->trans->cfg->name, 743 sizeof(dump_info->dev_human_readable)); 744 strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name, 745 sizeof(dump_info->bus_human_readable)); 746 747 dump_data = iwl_fw_error_next_data(dump_data); 748 749 /* Dump shared memory configuration */ 750 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG); 751 dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg)); 752 dump_smem_cfg = (void *)dump_data->data; 753 dump_smem_cfg->num_lmacs = cpu_to_le32(mem_cfg->num_lmacs); 754 dump_smem_cfg->num_txfifo_entries = 755 cpu_to_le32(mem_cfg->num_txfifo_entries); 756 for (i = 0; i < MAX_NUM_LMAC; i++) { 757 int j; 758 759 for (j = 0; j < TX_FIFO_MAX_NUM; j++) 760 dump_smem_cfg->lmac[i].txfifo_size[j] = 761 cpu_to_le32(mem_cfg->lmac[i].txfifo_size[j]); 762 dump_smem_cfg->lmac[i].rxfifo1_size = 763 cpu_to_le32(mem_cfg->lmac[i].rxfifo1_size); 764 } 765 dump_smem_cfg->rxfifo2_size = cpu_to_le32(mem_cfg->rxfifo2_size); 766 dump_smem_cfg->internal_txfifo_addr = 767 cpu_to_le32(mem_cfg->internal_txfifo_addr); 768 for (i = 0; i < TX_FIFO_INTERNAL_MAX_NUM; i++) { 769 dump_smem_cfg->internal_txfifo_size[i] = 770 cpu_to_le32(mem_cfg->internal_txfifo_size[i]); 771 } 772 773 dump_data = iwl_fw_error_next_data(dump_data); 774 775 /* We only dump the FIFOs if the FW is in error state */ 776 if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) { 777 iwl_fw_dump_fifos(fwrt, &dump_data); 778 if (radio_len) 779 iwl_read_radio_regs(fwrt, &dump_data); 780 } 781 782 if (fwrt->dump.desc) { 783 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO); 784 dump_data->len = cpu_to_le32(sizeof(*dump_trig) + 785 fwrt->dump.desc->len); 786 dump_trig = (void *)dump_data->data; 787 memcpy(dump_trig, &fwrt->dump.desc->trig_desc, 788 sizeof(*dump_trig) + fwrt->dump.desc->len); 789 790 dump_data = iwl_fw_error_next_data(dump_data); 791 } 792 793 /* In case we only want monitor dump, skip to dump trasport data */ 794 if (monitor_dump_only) 795 goto dump_trans_data; 796 797 if (!fwrt->fw->n_dbg_mem_tlv) { 798 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); 799 dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem)); 800 dump_mem = (void *)dump_data->data; 801 dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM); 802 dump_mem->offset = cpu_to_le32(sram_ofs); 803 iwl_trans_read_mem_bytes(fwrt->trans, sram_ofs, dump_mem->data, 804 sram_len); 805 dump_data = iwl_fw_error_next_data(dump_data); 806 } 807 808 for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) { 809 u32 len = le32_to_cpu(fw_dbg_mem[i].len); 810 u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs); 811 bool success; 812 813 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); 814 dump_data->len = cpu_to_le32(len + sizeof(*dump_mem)); 815 dump_mem = (void *)dump_data->data; 816 dump_mem->type = fw_dbg_mem[i].data_type; 817 dump_mem->offset = cpu_to_le32(ofs); 818 819 switch (dump_mem->type & cpu_to_le32(FW_DBG_MEM_TYPE_MASK)) { 820 case cpu_to_le32(FW_DBG_MEM_TYPE_REGULAR): 821 iwl_trans_read_mem_bytes(fwrt->trans, ofs, 822 dump_mem->data, 823 len); 824 success = true; 825 break; 826 case cpu_to_le32(FW_DBG_MEM_TYPE_PRPH): 827 success = iwl_read_prph_block(fwrt->trans, ofs, len, 828 (void *)dump_mem->data); 829 break; 830 default: 831 /* 832 * shouldn't get here, we ignored this kind 833 * of TLV earlier during the TLV parsing?! 834 */ 835 WARN_ON(1); 836 success = false; 837 } 838 839 if (success) 840 dump_data = iwl_fw_error_next_data(dump_data); 841 } 842 843 if (smem_len) { 844 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); 845 dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem)); 846 dump_mem = (void *)dump_data->data; 847 dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SMEM); 848 dump_mem->offset = cpu_to_le32(fwrt->trans->cfg->smem_offset); 849 iwl_trans_read_mem_bytes(fwrt->trans, 850 fwrt->trans->cfg->smem_offset, 851 dump_mem->data, smem_len); 852 dump_data = iwl_fw_error_next_data(dump_data); 853 } 854 855 if (sram2_len) { 856 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); 857 dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem)); 858 dump_mem = (void *)dump_data->data; 859 dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM); 860 dump_mem->offset = cpu_to_le32(fwrt->trans->cfg->dccm2_offset); 861 iwl_trans_read_mem_bytes(fwrt->trans, 862 fwrt->trans->cfg->dccm2_offset, 863 dump_mem->data, sram2_len); 864 dump_data = iwl_fw_error_next_data(dump_data); 865 } 866 867 /* Dump fw's virtual image */ 868 if (!fwrt->trans->cfg->gen2 && 869 fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size && 870 fwrt->fw_paging_db[0].fw_paging_block) { 871 for (i = 1; i < fwrt->num_of_paging_blk + 1; i++) { 872 struct iwl_fw_error_dump_paging *paging; 873 struct page *pages = 874 fwrt->fw_paging_db[i].fw_paging_block; 875 dma_addr_t addr = fwrt->fw_paging_db[i].fw_paging_phys; 876 877 dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING); 878 dump_data->len = cpu_to_le32(sizeof(*paging) + 879 PAGING_BLOCK_SIZE); 880 paging = (void *)dump_data->data; 881 paging->index = cpu_to_le32(i); 882 dma_sync_single_for_cpu(fwrt->trans->dev, addr, 883 PAGING_BLOCK_SIZE, 884 DMA_BIDIRECTIONAL); 885 memcpy(paging->data, page_address(pages), 886 PAGING_BLOCK_SIZE); 887 dump_data = iwl_fw_error_next_data(dump_data); 888 } 889 } 890 891 if (prph_len) { 892 iwl_dump_prph(fwrt->trans, &dump_data, 893 iwl_prph_dump_addr_comm, 894 ARRAY_SIZE(iwl_prph_dump_addr_comm)); 895 896 if (fwrt->trans->cfg->mq_rx_supported) 897 iwl_dump_prph(fwrt->trans, &dump_data, 898 iwl_prph_dump_addr_9000, 899 ARRAY_SIZE(iwl_prph_dump_addr_9000)); 900 } 901 902 dump_trans_data: 903 fw_error_dump->trans_ptr = iwl_trans_dump_data(fwrt->trans, 904 fwrt->dump.trig); 905 fw_error_dump->fwrt_len = file_len; 906 if (fw_error_dump->trans_ptr) 907 file_len += fw_error_dump->trans_ptr->len; 908 dump_file->file_len = cpu_to_le32(file_len); 909 910 sg_dump_data = alloc_sgtable(file_len); 911 if (sg_dump_data) { 912 sg_pcopy_from_buffer(sg_dump_data, 913 sg_nents(sg_dump_data), 914 fw_error_dump->fwrt_ptr, 915 fw_error_dump->fwrt_len, 0); 916 if (fw_error_dump->trans_ptr) 917 sg_pcopy_from_buffer(sg_dump_data, 918 sg_nents(sg_dump_data), 919 fw_error_dump->trans_ptr->data, 920 fw_error_dump->trans_ptr->len, 921 fw_error_dump->fwrt_len); 922 dev_coredumpsg(fwrt->trans->dev, sg_dump_data, file_len, 923 GFP_KERNEL); 924 } 925 vfree(fw_error_dump->fwrt_ptr); 926 vfree(fw_error_dump->trans_ptr); 927 kfree(fw_error_dump); 928 929 out: 930 iwl_fw_free_dump_desc(fwrt); 931 fwrt->dump.trig = NULL; 932 clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status); 933 } 934 IWL_EXPORT_SYMBOL(iwl_fw_error_dump); 935 936 const struct iwl_fw_dump_desc iwl_dump_desc_assert = { 937 .trig_desc = { 938 .type = cpu_to_le32(FW_DBG_TRIGGER_FW_ASSERT), 939 }, 940 }; 941 IWL_EXPORT_SYMBOL(iwl_dump_desc_assert); 942 943 int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, 944 const struct iwl_fw_dump_desc *desc, 945 const struct iwl_fw_dbg_trigger_tlv *trigger) 946 { 947 unsigned int delay = 0; 948 949 if (trigger) 950 delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay)); 951 952 if (WARN(fwrt->trans->state == IWL_TRANS_NO_FW, 953 "Can't collect dbg data when FW isn't alive\n")) 954 return -EIO; 955 956 if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status)) 957 return -EBUSY; 958 959 if (WARN_ON(fwrt->dump.desc)) 960 iwl_fw_free_dump_desc(fwrt); 961 962 IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n", 963 le32_to_cpu(desc->trig_desc.type)); 964 965 fwrt->dump.desc = desc; 966 fwrt->dump.trig = trigger; 967 968 schedule_delayed_work(&fwrt->dump.wk, delay); 969 970 return 0; 971 } 972 IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_desc); 973 974 int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, 975 enum iwl_fw_dbg_trigger trig, 976 const char *str, size_t len, 977 const struct iwl_fw_dbg_trigger_tlv *trigger) 978 { 979 struct iwl_fw_dump_desc *desc; 980 981 desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC); 982 if (!desc) 983 return -ENOMEM; 984 985 desc->len = len; 986 desc->trig_desc.type = cpu_to_le32(trig); 987 memcpy(desc->trig_desc.data, str, len); 988 989 return iwl_fw_dbg_collect_desc(fwrt, desc, trigger); 990 } 991 IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect); 992 993 int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt, 994 struct iwl_fw_dbg_trigger_tlv *trigger, 995 const char *fmt, ...) 996 { 997 u16 occurrences = le16_to_cpu(trigger->occurrences); 998 int ret, len = 0; 999 char buf[64]; 1000 1001 if (!occurrences) 1002 return 0; 1003 1004 if (fmt) { 1005 va_list ap; 1006 1007 buf[sizeof(buf) - 1] = '\0'; 1008 1009 va_start(ap, fmt); 1010 vsnprintf(buf, sizeof(buf), fmt, ap); 1011 va_end(ap); 1012 1013 /* check for truncation */ 1014 if (WARN_ON_ONCE(buf[sizeof(buf) - 1])) 1015 buf[sizeof(buf) - 1] = '\0'; 1016 1017 len = strlen(buf) + 1; 1018 } 1019 1020 ret = iwl_fw_dbg_collect(fwrt, le32_to_cpu(trigger->id), buf, len, 1021 trigger); 1022 1023 if (ret) 1024 return ret; 1025 1026 trigger->occurrences = cpu_to_le16(occurrences - 1); 1027 return 0; 1028 } 1029 IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_trig); 1030 1031 int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id) 1032 { 1033 u8 *ptr; 1034 int ret; 1035 int i; 1036 1037 if (WARN_ONCE(conf_id >= ARRAY_SIZE(fwrt->fw->dbg_conf_tlv), 1038 "Invalid configuration %d\n", conf_id)) 1039 return -EINVAL; 1040 1041 /* EARLY START - firmware's configuration is hard coded */ 1042 if ((!fwrt->fw->dbg_conf_tlv[conf_id] || 1043 !fwrt->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) && 1044 conf_id == FW_DBG_START_FROM_ALIVE) 1045 return 0; 1046 1047 if (!fwrt->fw->dbg_conf_tlv[conf_id]) 1048 return -EINVAL; 1049 1050 if (fwrt->dump.conf != FW_DBG_INVALID) 1051 IWL_WARN(fwrt, "FW already configured (%d) - re-configuring\n", 1052 fwrt->dump.conf); 1053 1054 /* Send all HCMDs for configuring the FW debug */ 1055 ptr = (void *)&fwrt->fw->dbg_conf_tlv[conf_id]->hcmd; 1056 for (i = 0; i < fwrt->fw->dbg_conf_tlv[conf_id]->num_of_hcmds; i++) { 1057 struct iwl_fw_dbg_conf_hcmd *cmd = (void *)ptr; 1058 struct iwl_host_cmd hcmd = { 1059 .id = cmd->id, 1060 .len = { le16_to_cpu(cmd->len), }, 1061 .data = { cmd->data, }, 1062 }; 1063 1064 ret = iwl_trans_send_cmd(fwrt->trans, &hcmd); 1065 if (ret) 1066 return ret; 1067 1068 ptr += sizeof(*cmd); 1069 ptr += le16_to_cpu(cmd->len); 1070 } 1071 1072 fwrt->dump.conf = conf_id; 1073 1074 return 0; 1075 } 1076 IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf); 1077 1078 void iwl_fw_error_dump_wk(struct work_struct *work) 1079 { 1080 struct iwl_fw_runtime *fwrt = 1081 container_of(work, struct iwl_fw_runtime, dump.wk.work); 1082 1083 if (fwrt->ops && fwrt->ops->dump_start && 1084 fwrt->ops->dump_start(fwrt->ops_ctx)) 1085 return; 1086 1087 if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) { 1088 /* stop recording */ 1089 iwl_set_bits_prph(fwrt->trans, MON_BUFF_SAMPLE_CTL, 0x100); 1090 1091 iwl_fw_error_dump(fwrt); 1092 1093 /* start recording again if the firmware is not crashed */ 1094 if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) && 1095 fwrt->fw->dbg_dest_tlv) { 1096 iwl_clear_bits_prph(fwrt->trans, 1097 MON_BUFF_SAMPLE_CTL, 0x100); 1098 iwl_clear_bits_prph(fwrt->trans, 1099 MON_BUFF_SAMPLE_CTL, 0x1); 1100 iwl_set_bits_prph(fwrt->trans, 1101 MON_BUFF_SAMPLE_CTL, 0x1); 1102 } 1103 } else { 1104 u32 in_sample = iwl_read_prph(fwrt->trans, DBGC_IN_SAMPLE); 1105 u32 out_ctrl = iwl_read_prph(fwrt->trans, DBGC_OUT_CTRL); 1106 1107 /* stop recording */ 1108 iwl_write_prph(fwrt->trans, DBGC_IN_SAMPLE, 0); 1109 udelay(100); 1110 iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, 0); 1111 /* wait before we collect the data till the DBGC stop */ 1112 udelay(500); 1113 1114 iwl_fw_error_dump(fwrt); 1115 1116 /* start recording again if the firmware is not crashed */ 1117 if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) && 1118 fwrt->fw->dbg_dest_tlv) { 1119 iwl_write_prph(fwrt->trans, DBGC_IN_SAMPLE, in_sample); 1120 iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, out_ctrl); 1121 } 1122 } 1123 1124 if (fwrt->ops && fwrt->ops->dump_end) 1125 fwrt->ops->dump_end(fwrt->ops_ctx); 1126 } 1127 1128