1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10  * Copyright(c) 2016 Intel Deutschland GmbH
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of version 2 of the GNU General Public License as
14  * published by the Free Software Foundation.
15  *
16  * This program is distributed in the hope that it will be useful, but
17  * WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24  * USA
25  *
26  * The full GNU General Public License is included in this distribution
27  * in the file called COPYING.
28  *
29  * Contact Information:
30  *  Intel Linux Wireless <linuxwifi@intel.com>
31  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32  *
33  * BSD LICENSE
34  *
35  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37  * All rights reserved.
38  *
39  * Redistribution and use in source and binary forms, with or without
40  * modification, are permitted provided that the following conditions
41  * are met:
42  *
43  *  * Redistributions of source code must retain the above copyright
44  *    notice, this list of conditions and the following disclaimer.
45  *  * Redistributions in binary form must reproduce the above copyright
46  *    notice, this list of conditions and the following disclaimer in
47  *    the documentation and/or other materials provided with the
48  *    distribution.
49  *  * Neither the name Intel Corporation nor the names of its
50  *    contributors may be used to endorse or promote products derived
51  *    from this software without specific prior written permission.
52  *
53  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
54  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
55  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
56  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
57  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
58  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
59  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
63  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64  *
65  *****************************************************************************/
66 #include <net/mac80211.h>
67 #include <linux/netdevice.h>
68 #include <linux/acpi.h>
69 
70 #include "iwl-trans.h"
71 #include "iwl-op-mode.h"
72 #include "iwl-fw.h"
73 #include "iwl-debug.h"
74 #include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */
75 #include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */
76 #include "iwl-prph.h"
77 #include "iwl-eeprom-parse.h"
78 
79 #include "mvm.h"
80 #include "fw-dbg.h"
81 #include "iwl-phy-db.h"
82 
83 #define MVM_UCODE_ALIVE_TIMEOUT	HZ
84 #define MVM_UCODE_CALIB_TIMEOUT	(2*HZ)
85 
86 #define UCODE_VALID_OK	cpu_to_le32(0x1)
87 
88 struct iwl_mvm_alive_data {
89 	bool valid;
90 	u32 scd_base_addr;
91 };
92 
93 static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
94 {
95 	struct iwl_tx_ant_cfg_cmd tx_ant_cmd = {
96 		.valid = cpu_to_le32(valid_tx_ant),
97 	};
98 
99 	IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant);
100 	return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, 0,
101 				    sizeof(tx_ant_cmd), &tx_ant_cmd);
102 }
103 
104 static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
105 {
106 	int i;
107 	struct iwl_rss_config_cmd cmd = {
108 		.flags = cpu_to_le32(IWL_RSS_ENABLE),
109 		.hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP |
110 			     IWL_RSS_HASH_TYPE_IPV4_UDP |
111 			     IWL_RSS_HASH_TYPE_IPV4_PAYLOAD |
112 			     IWL_RSS_HASH_TYPE_IPV6_TCP |
113 			     IWL_RSS_HASH_TYPE_IPV6_UDP |
114 			     IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
115 	};
116 
117 	if (mvm->trans->num_rx_queues == 1)
118 		return 0;
119 
120 	/* Do not direct RSS traffic to Q 0 which is our fallback queue */
121 	for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
122 		cmd.indirection_table[i] =
123 			1 + (i % (mvm->trans->num_rx_queues - 1));
124 	netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
125 
126 	return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
127 }
128 
129 static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm)
130 {
131 	struct iwl_dqa_enable_cmd dqa_cmd = {
132 		.cmd_queue = cpu_to_le32(IWL_MVM_DQA_CMD_QUEUE),
133 	};
134 	u32 cmd_id = iwl_cmd_id(DQA_ENABLE_CMD, DATA_PATH_GROUP, 0);
135 	int ret;
136 
137 	ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
138 	if (ret)
139 		IWL_ERR(mvm, "Failed to send DQA enabling command: %d\n", ret);
140 	else
141 		IWL_DEBUG_FW(mvm, "Working in DQA mode\n");
142 
143 	return ret;
144 }
145 
146 void iwl_free_fw_paging(struct iwl_mvm *mvm)
147 {
148 	int i;
149 
150 	if (!mvm->fw_paging_db[0].fw_paging_block)
151 		return;
152 
153 	for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) {
154 		struct iwl_fw_paging *paging = &mvm->fw_paging_db[i];
155 
156 		if (!paging->fw_paging_block) {
157 			IWL_DEBUG_FW(mvm,
158 				     "Paging: block %d already freed, continue to next page\n",
159 				     i);
160 
161 			continue;
162 		}
163 		dma_unmap_page(mvm->trans->dev, paging->fw_paging_phys,
164 			       paging->fw_paging_size, DMA_BIDIRECTIONAL);
165 
166 		__free_pages(paging->fw_paging_block,
167 			     get_order(paging->fw_paging_size));
168 		paging->fw_paging_block = NULL;
169 	}
170 	kfree(mvm->trans->paging_download_buf);
171 	mvm->trans->paging_download_buf = NULL;
172 	mvm->trans->paging_db = NULL;
173 
174 	memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
175 }
176 
177 static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
178 {
179 	int sec_idx, idx;
180 	u32 offset = 0;
181 
182 	/*
183 	 * find where is the paging image start point:
184 	 * if CPU2 exist and it's in paging format, then the image looks like:
185 	 * CPU1 sections (2 or more)
186 	 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
187 	 * CPU2 sections (not paged)
188 	 * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
189 	 * non paged to CPU2 paging sec
190 	 * CPU2 paging CSS
191 	 * CPU2 paging image (including instruction and data)
192 	 */
193 	for (sec_idx = 0; sec_idx < image->num_sec; sec_idx++) {
194 		if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) {
195 			sec_idx++;
196 			break;
197 		}
198 	}
199 
200 	/*
201 	 * If paging is enabled there should be at least 2 more sections left
202 	 * (one for CSS and one for Paging data)
203 	 */
204 	if (sec_idx >= image->num_sec - 1) {
205 		IWL_ERR(mvm, "Paging: Missing CSS and/or paging sections\n");
206 		iwl_free_fw_paging(mvm);
207 		return -EINVAL;
208 	}
209 
210 	/* copy the CSS block to the dram */
211 	IWL_DEBUG_FW(mvm, "Paging: load paging CSS to FW, sec = %d\n",
212 		     sec_idx);
213 
214 	memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block),
215 	       image->sec[sec_idx].data,
216 	       mvm->fw_paging_db[0].fw_paging_size);
217 	dma_sync_single_for_device(mvm->trans->dev,
218 				   mvm->fw_paging_db[0].fw_paging_phys,
219 				   mvm->fw_paging_db[0].fw_paging_size,
220 				   DMA_BIDIRECTIONAL);
221 
222 	IWL_DEBUG_FW(mvm,
223 		     "Paging: copied %d CSS bytes to first block\n",
224 		     mvm->fw_paging_db[0].fw_paging_size);
225 
226 	sec_idx++;
227 
228 	/*
229 	 * copy the paging blocks to the dram
230 	 * loop index start from 1 since that CSS block already copied to dram
231 	 * and CSS index is 0.
232 	 * loop stop at num_of_paging_blk since that last block is not full.
233 	 */
234 	for (idx = 1; idx < mvm->num_of_paging_blk; idx++) {
235 		struct iwl_fw_paging *block = &mvm->fw_paging_db[idx];
236 
237 		memcpy(page_address(block->fw_paging_block),
238 		       image->sec[sec_idx].data + offset,
239 		       block->fw_paging_size);
240 		dma_sync_single_for_device(mvm->trans->dev,
241 					   block->fw_paging_phys,
242 					   block->fw_paging_size,
243 					   DMA_BIDIRECTIONAL);
244 
245 
246 		IWL_DEBUG_FW(mvm,
247 			     "Paging: copied %d paging bytes to block %d\n",
248 			     mvm->fw_paging_db[idx].fw_paging_size,
249 			     idx);
250 
251 		offset += mvm->fw_paging_db[idx].fw_paging_size;
252 	}
253 
254 	/* copy the last paging block */
255 	if (mvm->num_of_pages_in_last_blk > 0) {
256 		struct iwl_fw_paging *block = &mvm->fw_paging_db[idx];
257 
258 		memcpy(page_address(block->fw_paging_block),
259 		       image->sec[sec_idx].data + offset,
260 		       FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk);
261 		dma_sync_single_for_device(mvm->trans->dev,
262 					   block->fw_paging_phys,
263 					   block->fw_paging_size,
264 					   DMA_BIDIRECTIONAL);
265 
266 		IWL_DEBUG_FW(mvm,
267 			     "Paging: copied %d pages in the last block %d\n",
268 			     mvm->num_of_pages_in_last_blk, idx);
269 	}
270 
271 	return 0;
272 }
273 
274 static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
275 				   const struct fw_img *image)
276 {
277 	struct page *block;
278 	dma_addr_t phys = 0;
279 	int blk_idx, order, num_of_pages, size, dma_enabled;
280 
281 	if (mvm->fw_paging_db[0].fw_paging_block)
282 		return 0;
283 
284 	dma_enabled = is_device_dma_capable(mvm->trans->dev);
285 
286 	/* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */
287 	BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE);
288 
289 	num_of_pages = image->paging_mem_size / FW_PAGING_SIZE;
290 	mvm->num_of_paging_blk =
291 		DIV_ROUND_UP(num_of_pages, NUM_OF_PAGE_PER_GROUP);
292 	mvm->num_of_pages_in_last_blk =
293 		num_of_pages -
294 		NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1);
295 
296 	IWL_DEBUG_FW(mvm,
297 		     "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n",
298 		     mvm->num_of_paging_blk,
299 		     mvm->num_of_pages_in_last_blk);
300 
301 	/*
302 	 * Allocate CSS and paging blocks in dram.
303 	 */
304 	for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
305 		/* For CSS allocate 4KB, for others PAGING_BLOCK_SIZE (32K) */
306 		size = blk_idx ? PAGING_BLOCK_SIZE : FW_PAGING_SIZE;
307 		order = get_order(size);
308 		block = alloc_pages(GFP_KERNEL, order);
309 		if (!block) {
310 			/* free all the previous pages since we failed */
311 			iwl_free_fw_paging(mvm);
312 			return -ENOMEM;
313 		}
314 
315 		mvm->fw_paging_db[blk_idx].fw_paging_block = block;
316 		mvm->fw_paging_db[blk_idx].fw_paging_size = size;
317 
318 		if (dma_enabled) {
319 			phys = dma_map_page(mvm->trans->dev, block, 0,
320 					    PAGE_SIZE << order,
321 					    DMA_BIDIRECTIONAL);
322 			if (dma_mapping_error(mvm->trans->dev, phys)) {
323 				/*
324 				 * free the previous pages and the current one
325 				 * since we failed to map_page.
326 				 */
327 				iwl_free_fw_paging(mvm);
328 				return -ENOMEM;
329 			}
330 			mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
331 		} else {
332 			mvm->fw_paging_db[blk_idx].fw_paging_phys =
333 				PAGING_ADDR_SIG |
334 				blk_idx << BLOCK_2_EXP_SIZE;
335 		}
336 
337 		if (!blk_idx)
338 			IWL_DEBUG_FW(mvm,
339 				     "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
340 				     order);
341 		else
342 			IWL_DEBUG_FW(mvm,
343 				     "Paging: allocated 32K bytes (order %d) for firmware paging.\n",
344 				     order);
345 	}
346 
347 	return 0;
348 }
349 
350 static int iwl_save_fw_paging(struct iwl_mvm *mvm,
351 			      const struct fw_img *fw)
352 {
353 	int ret;
354 
355 	ret = iwl_alloc_fw_paging_mem(mvm, fw);
356 	if (ret)
357 		return ret;
358 
359 	return iwl_fill_paging_mem(mvm, fw);
360 }
361 
362 /* send paging cmd to FW in case CPU2 has paging image */
363 static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
364 {
365 	struct iwl_fw_paging_cmd paging_cmd = {
366 		.flags =
367 			cpu_to_le32(PAGING_CMD_IS_SECURED |
368 				    PAGING_CMD_IS_ENABLED |
369 				    (mvm->num_of_pages_in_last_blk <<
370 				    PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
371 		.block_size = cpu_to_le32(BLOCK_2_EXP_SIZE),
372 		.block_num = cpu_to_le32(mvm->num_of_paging_blk),
373 	};
374 	int blk_idx, size = sizeof(paging_cmd);
375 
376 	/* A bit hard coded - but this is the old API and will be deprecated */
377 	if (!iwl_mvm_has_new_tx_api(mvm))
378 		size -= NUM_OF_FW_PAGING_BLOCKS * 4;
379 
380 	/* loop for for all paging blocks + CSS block */
381 	for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
382 		dma_addr_t addr = mvm->fw_paging_db[blk_idx].fw_paging_phys;
383 
384 		addr = addr >> PAGE_2_EXP_SIZE;
385 
386 		if (iwl_mvm_has_new_tx_api(mvm)) {
387 			__le64 phy_addr = cpu_to_le64(addr);
388 
389 			paging_cmd.device_phy_addr.addr64[blk_idx] = phy_addr;
390 		} else {
391 			__le32 phy_addr = cpu_to_le32(addr);
392 
393 			paging_cmd.device_phy_addr.addr32[blk_idx] = phy_addr;
394 		}
395 	}
396 
397 	return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD,
398 						    IWL_ALWAYS_LONG_GROUP, 0),
399 				    0, size, &paging_cmd);
400 }
401 
402 /*
403  * Send paging item cmd to FW in case CPU2 has paging image
404  */
405 static int iwl_trans_get_paging_item(struct iwl_mvm *mvm)
406 {
407 	int ret;
408 	struct iwl_fw_get_item_cmd fw_get_item_cmd = {
409 		.item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING),
410 	};
411 
412 	struct iwl_fw_get_item_resp *item_resp;
413 	struct iwl_host_cmd cmd = {
414 		.id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0),
415 		.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
416 		.data = { &fw_get_item_cmd, },
417 	};
418 
419 	cmd.len[0] = sizeof(struct iwl_fw_get_item_cmd);
420 
421 	ret = iwl_mvm_send_cmd(mvm, &cmd);
422 	if (ret) {
423 		IWL_ERR(mvm,
424 			"Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n",
425 			ret);
426 		return ret;
427 	}
428 
429 	item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data;
430 	if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) {
431 		IWL_ERR(mvm,
432 			"Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n",
433 			le32_to_cpu(item_resp->item_id));
434 		ret = -EIO;
435 		goto exit;
436 	}
437 
438 	/* Add an extra page for headers */
439 	mvm->trans->paging_download_buf = kzalloc(PAGING_BLOCK_SIZE +
440 						  FW_PAGING_SIZE,
441 						  GFP_KERNEL);
442 	if (!mvm->trans->paging_download_buf) {
443 		ret = -ENOMEM;
444 		goto exit;
445 	}
446 	mvm->trans->paging_req_addr = le32_to_cpu(item_resp->item_val);
447 	mvm->trans->paging_db = mvm->fw_paging_db;
448 	IWL_DEBUG_FW(mvm,
449 		     "Paging: got paging request address (paging_req_addr 0x%08x)\n",
450 		     mvm->trans->paging_req_addr);
451 
452 exit:
453 	iwl_free_resp(&cmd);
454 
455 	return ret;
456 }
457 
458 static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
459 			 struct iwl_rx_packet *pkt, void *data)
460 {
461 	struct iwl_mvm *mvm =
462 		container_of(notif_wait, struct iwl_mvm, notif_wait);
463 	struct iwl_mvm_alive_data *alive_data = data;
464 	struct mvm_alive_resp_v3 *palive3;
465 	struct mvm_alive_resp *palive;
466 	struct iwl_umac_alive *umac;
467 	struct iwl_lmac_alive *lmac1;
468 	struct iwl_lmac_alive *lmac2 = NULL;
469 	u16 status;
470 
471 	if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
472 		palive = (void *)pkt->data;
473 		umac = &palive->umac_data;
474 		lmac1 = &palive->lmac_data[0];
475 		lmac2 = &palive->lmac_data[1];
476 		status = le16_to_cpu(palive->status);
477 	} else {
478 		palive3 = (void *)pkt->data;
479 		umac = &palive3->umac_data;
480 		lmac1 = &palive3->lmac_data;
481 		status = le16_to_cpu(palive3->status);
482 	}
483 
484 	mvm->error_event_table[0] = le32_to_cpu(lmac1->error_event_table_ptr);
485 	if (lmac2)
486 		mvm->error_event_table[1] =
487 			le32_to_cpu(lmac2->error_event_table_ptr);
488 	mvm->log_event_table = le32_to_cpu(lmac1->log_event_table_ptr);
489 	mvm->sf_space.addr = le32_to_cpu(lmac1->st_fwrd_addr);
490 	mvm->sf_space.size = le32_to_cpu(lmac1->st_fwrd_size);
491 
492 	mvm->umac_error_event_table = le32_to_cpu(umac->error_info_addr);
493 
494 	alive_data->scd_base_addr = le32_to_cpu(lmac1->scd_base_ptr);
495 	alive_data->valid = status == IWL_ALIVE_STATUS_OK;
496 	if (mvm->umac_error_event_table)
497 		mvm->support_umac_log = true;
498 
499 	IWL_DEBUG_FW(mvm,
500 		     "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
501 		     status, lmac1->ver_type, lmac1->ver_subtype);
502 
503 	if (lmac2)
504 		IWL_DEBUG_FW(mvm, "Alive ucode CDB\n");
505 
506 	IWL_DEBUG_FW(mvm,
507 		     "UMAC version: Major - 0x%x, Minor - 0x%x\n",
508 		     le32_to_cpu(umac->umac_major),
509 		     le32_to_cpu(umac->umac_minor));
510 
511 	return true;
512 }
513 
514 static bool iwl_wait_init_complete(struct iwl_notif_wait_data *notif_wait,
515 				   struct iwl_rx_packet *pkt, void *data)
516 {
517 	WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
518 
519 	return true;
520 }
521 
522 static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
523 				  struct iwl_rx_packet *pkt, void *data)
524 {
525 	struct iwl_phy_db *phy_db = data;
526 
527 	if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) {
528 		WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
529 		return true;
530 	}
531 
532 	WARN_ON(iwl_phy_db_set_section(phy_db, pkt));
533 
534 	return false;
535 }
536 
537 static int iwl_mvm_init_paging(struct iwl_mvm *mvm)
538 {
539 	const struct fw_img *fw = &mvm->fw->img[mvm->cur_ucode];
540 	int ret;
541 
542 	/*
543 	 * Configure and operate fw paging mechanism.
544 	 * The driver configures the paging flow only once.
545 	 * The CPU2 paging image is included in the IWL_UCODE_INIT image.
546 	 */
547 	if (!fw->paging_mem_size)
548 		return 0;
549 
550 	/*
551 	 * When dma is not enabled, the driver needs to copy / write
552 	 * the downloaded / uploaded page to / from the smem.
553 	 * This gets the location of the place were the pages are
554 	 * stored.
555 	 */
556 	if (!is_device_dma_capable(mvm->trans->dev)) {
557 		ret = iwl_trans_get_paging_item(mvm);
558 		if (ret) {
559 			IWL_ERR(mvm, "failed to get FW paging item\n");
560 			return ret;
561 		}
562 	}
563 
564 	ret = iwl_save_fw_paging(mvm, fw);
565 	if (ret) {
566 		IWL_ERR(mvm, "failed to save the FW paging image\n");
567 		return ret;
568 	}
569 
570 	ret = iwl_send_paging_cmd(mvm, fw);
571 	if (ret) {
572 		IWL_ERR(mvm, "failed to send the paging cmd\n");
573 		iwl_free_fw_paging(mvm);
574 		return ret;
575 	}
576 
577 	return 0;
578 }
579 static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
580 					 enum iwl_ucode_type ucode_type)
581 {
582 	struct iwl_notification_wait alive_wait;
583 	struct iwl_mvm_alive_data alive_data;
584 	const struct fw_img *fw;
585 	int ret, i;
586 	enum iwl_ucode_type old_type = mvm->cur_ucode;
587 	static const u16 alive_cmd[] = { MVM_ALIVE };
588 	struct iwl_sf_region st_fwrd_space;
589 
590 	if (ucode_type == IWL_UCODE_REGULAR &&
591 	    iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
592 	    !(fw_has_capa(&mvm->fw->ucode_capa,
593 			  IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED)))
594 		fw = iwl_get_ucode_image(mvm->fw, IWL_UCODE_REGULAR_USNIFFER);
595 	else
596 		fw = iwl_get_ucode_image(mvm->fw, ucode_type);
597 	if (WARN_ON(!fw))
598 		return -EINVAL;
599 	mvm->cur_ucode = ucode_type;
600 	mvm->ucode_loaded = false;
601 
602 	iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
603 				   alive_cmd, ARRAY_SIZE(alive_cmd),
604 				   iwl_alive_fn, &alive_data);
605 
606 	ret = iwl_trans_start_fw(mvm->trans, fw, ucode_type == IWL_UCODE_INIT);
607 	if (ret) {
608 		mvm->cur_ucode = old_type;
609 		iwl_remove_notification(&mvm->notif_wait, &alive_wait);
610 		return ret;
611 	}
612 
613 	/*
614 	 * Some things may run in the background now, but we
615 	 * just wait for the ALIVE notification here.
616 	 */
617 	ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
618 				    MVM_UCODE_ALIVE_TIMEOUT);
619 	if (ret) {
620 		if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
621 			IWL_ERR(mvm,
622 				"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
623 				iwl_read_prph(mvm->trans, SB_CPU_1_STATUS),
624 				iwl_read_prph(mvm->trans, SB_CPU_2_STATUS));
625 		mvm->cur_ucode = old_type;
626 		return ret;
627 	}
628 
629 	if (!alive_data.valid) {
630 		IWL_ERR(mvm, "Loaded ucode is not valid!\n");
631 		mvm->cur_ucode = old_type;
632 		return -EIO;
633 	}
634 
635 	/*
636 	 * update the sdio allocation according to the pointer we get in the
637 	 * alive notification.
638 	 */
639 	st_fwrd_space.addr = mvm->sf_space.addr;
640 	st_fwrd_space.size = mvm->sf_space.size;
641 	ret = iwl_trans_update_sf(mvm->trans, &st_fwrd_space);
642 	if (ret) {
643 		IWL_ERR(mvm, "Failed to update SF size. ret %d\n", ret);
644 		return ret;
645 	}
646 
647 	iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
648 
649 	/*
650 	 * Note: all the queues are enabled as part of the interface
651 	 * initialization, but in firmware restart scenarios they
652 	 * could be stopped, so wake them up. In firmware restart,
653 	 * mac80211 will have the queues stopped as well until the
654 	 * reconfiguration completes. During normal startup, they
655 	 * will be empty.
656 	 */
657 
658 	memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
659 	if (iwl_mvm_is_dqa_supported(mvm))
660 		mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1;
661 	else
662 		mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1;
663 
664 	for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
665 		atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
666 
667 	mvm->ucode_loaded = true;
668 
669 	return 0;
670 }
671 
672 static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
673 {
674 	struct iwl_phy_cfg_cmd phy_cfg_cmd;
675 	enum iwl_ucode_type ucode_type = mvm->cur_ucode;
676 
677 	/* Set parameters */
678 	phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm));
679 	phy_cfg_cmd.calib_control.event_trigger =
680 		mvm->fw->default_calib[ucode_type].event_trigger;
681 	phy_cfg_cmd.calib_control.flow_trigger =
682 		mvm->fw->default_calib[ucode_type].flow_trigger;
683 
684 	IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n",
685 		       phy_cfg_cmd.phy_cfg);
686 
687 	return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0,
688 				    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
689 }
690 
691 int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
692 {
693 	struct iwl_notification_wait calib_wait;
694 	static const u16 init_complete[] = {
695 		INIT_COMPLETE_NOTIF,
696 		CALIB_RES_NOTIF_PHY_DB
697 	};
698 	int ret;
699 
700 	lockdep_assert_held(&mvm->mutex);
701 
702 	if (WARN_ON_ONCE(mvm->calibrating))
703 		return 0;
704 
705 	iwl_init_notification_wait(&mvm->notif_wait,
706 				   &calib_wait,
707 				   init_complete,
708 				   ARRAY_SIZE(init_complete),
709 				   iwl_wait_phy_db_entry,
710 				   mvm->phy_db);
711 
712 	/* Will also start the device */
713 	ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT);
714 	if (ret) {
715 		IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret);
716 		goto error;
717 	}
718 
719 	ret = iwl_send_bt_init_conf(mvm);
720 	if (ret)
721 		goto error;
722 
723 	/* Read the NVM only at driver load time, no need to do this twice */
724 	if (read_nvm) {
725 		/* Read nvm */
726 		ret = iwl_nvm_init(mvm, true);
727 		if (ret) {
728 			IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
729 			goto error;
730 		}
731 	}
732 
733 	/* In case we read the NVM from external file, load it to the NIC */
734 	if (mvm->nvm_file_name)
735 		iwl_mvm_load_nvm_to_nic(mvm);
736 
737 	ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
738 	WARN_ON(ret);
739 
740 	/*
741 	 * abort after reading the nvm in case RF Kill is on, we will complete
742 	 * the init seq later when RF kill will switch to off
743 	 */
744 	if (iwl_mvm_is_radio_hw_killed(mvm)) {
745 		IWL_DEBUG_RF_KILL(mvm,
746 				  "jump over all phy activities due to RF kill\n");
747 		iwl_remove_notification(&mvm->notif_wait, &calib_wait);
748 		ret = 1;
749 		goto out;
750 	}
751 
752 	mvm->calibrating = true;
753 
754 	/* Send TX valid antennas before triggering calibrations */
755 	ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
756 	if (ret)
757 		goto error;
758 
759 	/*
760 	 * Send phy configurations command to init uCode
761 	 * to start the 16.0 uCode init image internal calibrations.
762 	 */
763 	ret = iwl_send_phy_cfg_cmd(mvm);
764 	if (ret) {
765 		IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
766 			ret);
767 		goto error;
768 	}
769 
770 	/*
771 	 * Some things may run in the background now, but we
772 	 * just wait for the calibration complete notification.
773 	 */
774 	ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
775 			MVM_UCODE_CALIB_TIMEOUT);
776 
777 	if (ret && iwl_mvm_is_radio_hw_killed(mvm)) {
778 		IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
779 		ret = 1;
780 	}
781 	goto out;
782 
783 error:
784 	iwl_remove_notification(&mvm->notif_wait, &calib_wait);
785 out:
786 	mvm->calibrating = false;
787 	if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
788 		/* we want to debug INIT and we have no NVM - fake */
789 		mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
790 					sizeof(struct ieee80211_channel) +
791 					sizeof(struct ieee80211_rate),
792 					GFP_KERNEL);
793 		if (!mvm->nvm_data)
794 			return -ENOMEM;
795 		mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels;
796 		mvm->nvm_data->bands[0].n_channels = 1;
797 		mvm->nvm_data->bands[0].n_bitrates = 1;
798 		mvm->nvm_data->bands[0].bitrates =
799 			(void *)mvm->nvm_data->channels + 1;
800 		mvm->nvm_data->bands[0].bitrates->hw_value = 10;
801 	}
802 
803 	return ret;
804 }
805 
806 int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
807 {
808 	struct iwl_notification_wait init_wait;
809 	struct iwl_nvm_access_complete_cmd nvm_complete = {};
810 	static const u16 init_complete[] = {
811 		INIT_COMPLETE_NOTIF,
812 	};
813 	int ret;
814 
815 	lockdep_assert_held(&mvm->mutex);
816 
817 	iwl_init_notification_wait(&mvm->notif_wait,
818 				   &init_wait,
819 				   init_complete,
820 				   ARRAY_SIZE(init_complete),
821 				   iwl_wait_init_complete,
822 				   NULL);
823 
824 	/* Will also start the device */
825 	ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
826 	if (ret) {
827 		IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
828 		goto error;
829 	}
830 
831 	/* TODO: remove when integrating context info */
832 	ret = iwl_mvm_init_paging(mvm);
833 	if (ret) {
834 		IWL_ERR(mvm, "Failed to init paging: %d\n",
835 			ret);
836 		goto error;
837 	}
838 
839 	/* Read the NVM only at driver load time, no need to do this twice */
840 	if (read_nvm) {
841 		/* Read nvm */
842 		ret = iwl_nvm_init(mvm, true);
843 		if (ret) {
844 			IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
845 			goto error;
846 		}
847 	}
848 
849 	/* In case we read the NVM from external file, load it to the NIC */
850 	if (mvm->nvm_file_name)
851 		iwl_mvm_load_nvm_to_nic(mvm);
852 
853 	ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
854 	if (WARN_ON(ret))
855 		goto error;
856 
857 	ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
858 						NVM_ACCESS_COMPLETE), 0,
859 				   sizeof(nvm_complete), &nvm_complete);
860 	if (ret) {
861 		IWL_ERR(mvm, "Failed to run complete NVM access: %d\n",
862 			ret);
863 		goto error;
864 	}
865 
866 	/* We wait for the INIT complete notification */
867 	return iwl_wait_notification(&mvm->notif_wait, &init_wait,
868 				     MVM_UCODE_ALIVE_TIMEOUT);
869 
870 error:
871 	iwl_remove_notification(&mvm->notif_wait, &init_wait);
872 	return ret;
873 }
874 
875 static void iwl_mvm_parse_shared_mem_a000(struct iwl_mvm *mvm,
876 					  struct iwl_rx_packet *pkt)
877 {
878 	struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data;
879 	int i;
880 
881 	mvm->shared_mem_cfg.num_txfifo_entries =
882 		ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size);
883 	for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++)
884 		mvm->shared_mem_cfg.txfifo_size[i] =
885 			le32_to_cpu(mem_cfg->txfifo_size[i]);
886 	for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++)
887 		mvm->shared_mem_cfg.rxfifo_size[i] =
888 			le32_to_cpu(mem_cfg->rxfifo_size[i]);
889 
890 	BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) !=
891 		     sizeof(mem_cfg->internal_txfifo_size));
892 
893 	for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
894 	     i++)
895 		mvm->shared_mem_cfg.internal_txfifo_size[i] =
896 			le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
897 }
898 
899 static void iwl_mvm_parse_shared_mem(struct iwl_mvm *mvm,
900 				     struct iwl_rx_packet *pkt)
901 {
902 	struct iwl_shared_mem_cfg_v1 *mem_cfg = (void *)pkt->data;
903 	int i;
904 
905 	mvm->shared_mem_cfg.num_txfifo_entries =
906 		ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size);
907 	for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++)
908 		mvm->shared_mem_cfg.txfifo_size[i] =
909 			le32_to_cpu(mem_cfg->txfifo_size[i]);
910 	for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++)
911 		mvm->shared_mem_cfg.rxfifo_size[i] =
912 			le32_to_cpu(mem_cfg->rxfifo_size[i]);
913 
914 	/* new API has more data, from rxfifo_addr field and on */
915 	if (fw_has_capa(&mvm->fw->ucode_capa,
916 			IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
917 		BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) !=
918 			     sizeof(mem_cfg->internal_txfifo_size));
919 
920 		for (i = 0;
921 		     i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
922 		     i++)
923 			mvm->shared_mem_cfg.internal_txfifo_size[i] =
924 				le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
925 	}
926 }
927 
928 static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
929 {
930 	struct iwl_host_cmd cmd = {
931 		.flags = CMD_WANT_SKB,
932 		.data = { NULL, },
933 		.len = { 0, },
934 	};
935 	struct iwl_rx_packet *pkt;
936 
937 	lockdep_assert_held(&mvm->mutex);
938 
939 	if (fw_has_capa(&mvm->fw->ucode_capa,
940 			IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
941 		cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0);
942 	else
943 		cmd.id = SHARED_MEM_CFG;
944 
945 	if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd)))
946 		return;
947 
948 	pkt = cmd.resp_pkt;
949 	if (iwl_mvm_has_new_tx_api(mvm))
950 		iwl_mvm_parse_shared_mem_a000(mvm, pkt);
951 	else
952 		iwl_mvm_parse_shared_mem(mvm, pkt);
953 
954 	IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n");
955 
956 	iwl_free_resp(&cmd);
957 }
958 
959 static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
960 {
961 	struct iwl_ltr_config_cmd cmd = {
962 		.flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
963 	};
964 
965 	if (!mvm->trans->ltr_enabled)
966 		return 0;
967 
968 	return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
969 				    sizeof(cmd), &cmd);
970 }
971 
972 #define ACPI_WRDS_METHOD	"WRDS"
973 #define ACPI_WRDS_WIFI		(0x07)
974 #define ACPI_WRDS_TABLE_SIZE	10
975 
976 struct iwl_mvm_sar_table {
977 	bool enabled;
978 	u8 values[ACPI_WRDS_TABLE_SIZE];
979 };
980 
981 #ifdef CONFIG_ACPI
982 static int iwl_mvm_sar_get_wrds(struct iwl_mvm *mvm, union acpi_object *wrds,
983 				struct iwl_mvm_sar_table *sar_table)
984 {
985 	union acpi_object *data_pkg;
986 	u32 i;
987 
988 	/* We need at least two packages, one for the revision and one
989 	 * for the data itself.  Also check that the revision is valid
990 	 * (i.e. it is an integer set to 0).
991 	*/
992 	if (wrds->type != ACPI_TYPE_PACKAGE ||
993 	    wrds->package.count < 2 ||
994 	    wrds->package.elements[0].type != ACPI_TYPE_INTEGER ||
995 	    wrds->package.elements[0].integer.value != 0) {
996 		IWL_DEBUG_RADIO(mvm, "Unsupported wrds structure\n");
997 		return -EINVAL;
998 	}
999 
1000 	/* loop through all the packages to find the one for WiFi */
1001 	for (i = 1; i < wrds->package.count; i++) {
1002 		union acpi_object *domain;
1003 
1004 		data_pkg = &wrds->package.elements[i];
1005 
1006 		/* Skip anything that is not a package with the right
1007 		 * amount of elements (i.e. domain_type,
1008 		 * enabled/disabled plus the sar table size.
1009 		 */
1010 		if (data_pkg->type != ACPI_TYPE_PACKAGE ||
1011 		    data_pkg->package.count != ACPI_WRDS_TABLE_SIZE + 2)
1012 			continue;
1013 
1014 		domain = &data_pkg->package.elements[0];
1015 		if (domain->type == ACPI_TYPE_INTEGER &&
1016 		    domain->integer.value == ACPI_WRDS_WIFI)
1017 			break;
1018 
1019 		data_pkg = NULL;
1020 	}
1021 
1022 	if (!data_pkg)
1023 		return -ENOENT;
1024 
1025 	if (data_pkg->package.elements[1].type != ACPI_TYPE_INTEGER)
1026 		return -EINVAL;
1027 
1028 	sar_table->enabled = !!(data_pkg->package.elements[1].integer.value);
1029 
1030 	for (i = 0; i < ACPI_WRDS_TABLE_SIZE; i++) {
1031 		union acpi_object *entry;
1032 
1033 		entry = &data_pkg->package.elements[i + 2];
1034 		if ((entry->type != ACPI_TYPE_INTEGER) ||
1035 		    (entry->integer.value > U8_MAX))
1036 			return -EINVAL;
1037 
1038 		sar_table->values[i] = entry->integer.value;
1039 	}
1040 
1041 	return 0;
1042 }
1043 
1044 static int iwl_mvm_sar_get_table(struct iwl_mvm *mvm,
1045 				 struct iwl_mvm_sar_table *sar_table)
1046 {
1047 	acpi_handle root_handle;
1048 	acpi_handle handle;
1049 	struct acpi_buffer wrds = {ACPI_ALLOCATE_BUFFER, NULL};
1050 	acpi_status status;
1051 	int ret;
1052 
1053 	root_handle = ACPI_HANDLE(mvm->dev);
1054 	if (!root_handle) {
1055 		IWL_DEBUG_RADIO(mvm,
1056 				"Could not retrieve root port ACPI handle\n");
1057 		return -ENOENT;
1058 	}
1059 
1060 	/* Get the method's handle */
1061 	status = acpi_get_handle(root_handle, (acpi_string)ACPI_WRDS_METHOD,
1062 				 &handle);
1063 	if (ACPI_FAILURE(status)) {
1064 		IWL_DEBUG_RADIO(mvm, "WRDS method not found\n");
1065 		return -ENOENT;
1066 	}
1067 
1068 	/* Call WRDS with no arguments */
1069 	status = acpi_evaluate_object(handle, NULL, NULL, &wrds);
1070 	if (ACPI_FAILURE(status)) {
1071 		IWL_DEBUG_RADIO(mvm, "WRDS invocation failed (0x%x)\n", status);
1072 		return -ENOENT;
1073 	}
1074 
1075 	ret = iwl_mvm_sar_get_wrds(mvm, wrds.pointer, sar_table);
1076 	kfree(wrds.pointer);
1077 
1078 	return ret;
1079 }
1080 #else /* CONFIG_ACPI */
1081 static int iwl_mvm_sar_get_table(struct iwl_mvm *mvm,
1082 				 struct iwl_mvm_sar_table *sar_table)
1083 {
1084 	return -ENOENT;
1085 }
1086 #endif /* CONFIG_ACPI */
1087 
1088 static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
1089 {
1090 	struct iwl_mvm_sar_table sar_table;
1091 	struct iwl_dev_tx_power_cmd cmd = {
1092 		.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
1093 	};
1094 	int ret, i, j, idx;
1095 	int len = sizeof(cmd);
1096 
1097 	if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
1098 		len = sizeof(cmd.v3);
1099 
1100 	ret = iwl_mvm_sar_get_table(mvm, &sar_table);
1101 	if (ret < 0) {
1102 		IWL_DEBUG_RADIO(mvm,
1103 				"SAR BIOS table invalid or unavailable. (%d)\n",
1104 				ret);
1105 		/* we don't fail if the table is not available */
1106 		return 0;
1107 	}
1108 
1109 	if (!sar_table.enabled)
1110 		return 0;
1111 
1112 	IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
1113 
1114 	BUILD_BUG_ON(IWL_NUM_CHAIN_LIMITS * IWL_NUM_SUB_BANDS !=
1115 		     ACPI_WRDS_TABLE_SIZE);
1116 
1117 	for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
1118 		IWL_DEBUG_RADIO(mvm, "  Chain[%d]:\n", i);
1119 		for (j = 0; j < IWL_NUM_SUB_BANDS; j++) {
1120 			idx = (i * IWL_NUM_SUB_BANDS) + j;
1121 			cmd.v3.per_chain_restriction[i][j] =
1122 				cpu_to_le16(sar_table.values[idx]);
1123 			IWL_DEBUG_RADIO(mvm, "    Band[%d] = %d * .125dBm\n",
1124 					j, sar_table.values[idx]);
1125 		}
1126 	}
1127 
1128 	ret = iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
1129 	if (ret)
1130 		IWL_ERR(mvm, "failed to set per-chain TX power: %d\n", ret);
1131 
1132 	return ret;
1133 }
1134 
1135 static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
1136 {
1137 	int ret;
1138 
1139 	if (iwl_mvm_has_new_tx_api(mvm))
1140 		return iwl_run_unified_mvm_ucode(mvm, false);
1141 
1142 	ret = iwl_run_init_mvm_ucode(mvm, false);
1143 
1144 	if (iwlmvm_mod_params.init_dbg)
1145 		return 0;
1146 
1147 	if (ret) {
1148 		IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
1149 		/* this can't happen */
1150 		if (WARN_ON(ret > 0))
1151 			ret = -ERFKILL;
1152 		return ret;
1153 	}
1154 
1155 	/*
1156 	 * Stop and start the transport without entering low power
1157 	 * mode. This will save the state of other components on the
1158 	 * device that are triggered by the INIT firwmare (MFUART).
1159 	 */
1160 	_iwl_trans_stop_device(mvm->trans, false);
1161 	ret = _iwl_trans_start_hw(mvm->trans, false);
1162 	if (ret)
1163 		return ret;
1164 
1165 	ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
1166 	if (ret)
1167 		return ret;
1168 
1169 	return iwl_mvm_init_paging(mvm);
1170 }
1171 
1172 int iwl_mvm_up(struct iwl_mvm *mvm)
1173 {
1174 	int ret, i;
1175 	struct ieee80211_channel *chan;
1176 	struct cfg80211_chan_def chandef;
1177 
1178 	lockdep_assert_held(&mvm->mutex);
1179 
1180 	ret = iwl_trans_start_hw(mvm->trans);
1181 	if (ret)
1182 		return ret;
1183 
1184 	ret = iwl_mvm_load_rt_fw(mvm);
1185 	if (ret) {
1186 		IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
1187 		goto error;
1188 	}
1189 
1190 	iwl_mvm_get_shared_mem_conf(mvm);
1191 
1192 	ret = iwl_mvm_sf_update(mvm, NULL, false);
1193 	if (ret)
1194 		IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
1195 
1196 	mvm->fw_dbg_conf = FW_DBG_INVALID;
1197 	/* if we have a destination, assume EARLY START */
1198 	if (mvm->fw->dbg_dest_tlv)
1199 		mvm->fw_dbg_conf = FW_DBG_START_FROM_ALIVE;
1200 	iwl_mvm_start_fw_dbg_conf(mvm, FW_DBG_START_FROM_ALIVE);
1201 
1202 	ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
1203 	if (ret)
1204 		goto error;
1205 
1206 	ret = iwl_send_bt_init_conf(mvm);
1207 	if (ret)
1208 		goto error;
1209 
1210 	/* Send phy db control command and then phy db calibration*/
1211 	if (!iwl_mvm_has_new_tx_api(mvm)) {
1212 		ret = iwl_send_phy_db_data(mvm->phy_db);
1213 		if (ret)
1214 			goto error;
1215 
1216 		ret = iwl_send_phy_cfg_cmd(mvm);
1217 		if (ret)
1218 			goto error;
1219 	}
1220 
1221 	/* Init RSS configuration */
1222 	if (iwl_mvm_has_new_rx_api(mvm)) {
1223 		ret = iwl_send_rss_cfg_cmd(mvm);
1224 		if (ret) {
1225 			IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
1226 				ret);
1227 			goto error;
1228 		}
1229 	}
1230 
1231 	/* init the fw <-> mac80211 STA mapping */
1232 	for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
1233 		RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
1234 
1235 	mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
1236 
1237 	/* reset quota debouncing buffer - 0xff will yield invalid data */
1238 	memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd));
1239 
1240 	/* Enable DQA-mode if required */
1241 	if (iwl_mvm_is_dqa_supported(mvm)) {
1242 		ret = iwl_mvm_send_dqa_cmd(mvm);
1243 		if (ret)
1244 			goto error;
1245 	} else {
1246 		IWL_DEBUG_FW(mvm, "Working in non-DQA mode\n");
1247 	}
1248 
1249 	/* Add auxiliary station for scanning */
1250 	ret = iwl_mvm_add_aux_sta(mvm);
1251 	if (ret)
1252 		goto error;
1253 
1254 	/* Add all the PHY contexts */
1255 	chan = &mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[0];
1256 	cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
1257 	for (i = 0; i < NUM_PHY_CTX; i++) {
1258 		/*
1259 		 * The channel used here isn't relevant as it's
1260 		 * going to be overwritten in the other flows.
1261 		 * For now use the first channel we have.
1262 		 */
1263 		ret = iwl_mvm_phy_ctxt_add(mvm, &mvm->phy_ctxts[i],
1264 					   &chandef, 1, 1);
1265 		if (ret)
1266 			goto error;
1267 	}
1268 
1269 #ifdef CONFIG_THERMAL
1270 	if (iwl_mvm_is_tt_in_fw(mvm)) {
1271 		/* in order to give the responsibility of ct-kill and
1272 		 * TX backoff to FW we need to send empty temperature reporting
1273 		 * cmd during init time
1274 		 */
1275 		iwl_mvm_send_temp_report_ths_cmd(mvm);
1276 	} else {
1277 		/* Initialize tx backoffs to the minimal possible */
1278 		iwl_mvm_tt_tx_backoff(mvm, 0);
1279 	}
1280 
1281 	/* TODO: read the budget from BIOS / Platform NVM */
1282 	if (iwl_mvm_is_ctdp_supported(mvm) && mvm->cooling_dev.cur_state > 0) {
1283 		ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
1284 					   mvm->cooling_dev.cur_state);
1285 		if (ret)
1286 			goto error;
1287 	}
1288 #else
1289 	/* Initialize tx backoffs to the minimal possible */
1290 	iwl_mvm_tt_tx_backoff(mvm, 0);
1291 #endif
1292 
1293 	WARN_ON(iwl_mvm_config_ltr(mvm));
1294 
1295 	ret = iwl_mvm_power_update_device(mvm);
1296 	if (ret)
1297 		goto error;
1298 
1299 	/*
1300 	 * RTNL is not taken during Ct-kill, but we don't need to scan/Tx
1301 	 * anyway, so don't init MCC.
1302 	 */
1303 	if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) {
1304 		ret = iwl_mvm_init_mcc(mvm);
1305 		if (ret)
1306 			goto error;
1307 	}
1308 
1309 	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1310 		mvm->scan_type = IWL_SCAN_TYPE_NOT_SET;
1311 		ret = iwl_mvm_config_scan(mvm);
1312 		if (ret)
1313 			goto error;
1314 	}
1315 
1316 	if (iwl_mvm_is_csum_supported(mvm) &&
1317 	    mvm->cfg->features & NETIF_F_RXCSUM)
1318 		iwl_trans_write_prph(mvm->trans, RX_EN_CSUM, 0x3);
1319 
1320 	/* allow FW/transport low power modes if not during restart */
1321 	if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1322 		iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
1323 
1324 	ret = iwl_mvm_sar_init(mvm);
1325 	if (ret)
1326 		goto error;
1327 
1328 	IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
1329 	return 0;
1330  error:
1331 	iwl_mvm_stop_device(mvm);
1332 	return ret;
1333 }
1334 
1335 int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
1336 {
1337 	int ret, i;
1338 
1339 	lockdep_assert_held(&mvm->mutex);
1340 
1341 	ret = iwl_trans_start_hw(mvm->trans);
1342 	if (ret)
1343 		return ret;
1344 
1345 	ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_WOWLAN);
1346 	if (ret) {
1347 		IWL_ERR(mvm, "Failed to start WoWLAN firmware: %d\n", ret);
1348 		goto error;
1349 	}
1350 
1351 	ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
1352 	if (ret)
1353 		goto error;
1354 
1355 	/* Send phy db control command and then phy db calibration*/
1356 	ret = iwl_send_phy_db_data(mvm->phy_db);
1357 	if (ret)
1358 		goto error;
1359 
1360 	ret = iwl_send_phy_cfg_cmd(mvm);
1361 	if (ret)
1362 		goto error;
1363 
1364 	/* init the fw <-> mac80211 STA mapping */
1365 	for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
1366 		RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
1367 
1368 	/* Add auxiliary station for scanning */
1369 	ret = iwl_mvm_add_aux_sta(mvm);
1370 	if (ret)
1371 		goto error;
1372 
1373 	return 0;
1374  error:
1375 	iwl_mvm_stop_device(mvm);
1376 	return ret;
1377 }
1378 
1379 void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
1380 				 struct iwl_rx_cmd_buffer *rxb)
1381 {
1382 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1383 	struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
1384 	u32 flags = le32_to_cpu(card_state_notif->flags);
1385 
1386 	IWL_DEBUG_RF_KILL(mvm, "Card state received: HW:%s SW:%s CT:%s\n",
1387 			  (flags & HW_CARD_DISABLED) ? "Kill" : "On",
1388 			  (flags & SW_CARD_DISABLED) ? "Kill" : "On",
1389 			  (flags & CT_KILL_CARD_DISABLED) ?
1390 			  "Reached" : "Not reached");
1391 }
1392 
1393 void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
1394 			     struct iwl_rx_cmd_buffer *rxb)
1395 {
1396 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1397 	struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;
1398 
1399 	IWL_DEBUG_INFO(mvm,
1400 		       "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n",
1401 		       le32_to_cpu(mfuart_notif->installed_ver),
1402 		       le32_to_cpu(mfuart_notif->external_ver),
1403 		       le32_to_cpu(mfuart_notif->status),
1404 		       le32_to_cpu(mfuart_notif->duration));
1405 
1406 	if (iwl_rx_packet_payload_len(pkt) == sizeof(*mfuart_notif))
1407 		IWL_DEBUG_INFO(mvm,
1408 			       "MFUART: image size: 0x%08x\n",
1409 			       le32_to_cpu(mfuart_notif->image_size));
1410 }
1411