1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2017 Intel Deutschland GmbH
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of version 2 of the GNU General Public License as
12  * published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * General Public License for more details.
18  *
19  * BSD LICENSE
20  *
21  * Copyright(c) 2017 Intel Deutschland GmbH
22  * All rights reserved.
23  *
24  * Redistribution and use in source and binary forms, with or without
25  * modification, are permitted provided that the following conditions
26  * are met:
27  *
28  *  * Redistributions of source code must retain the above copyright
29  *    notice, this list of conditions and the following disclaimer.
30  *  * Redistributions in binary form must reproduce the above copyright
31  *    notice, this list of conditions and the following disclaimer in
32  *    the documentation and/or other materials provided with the
33  *    distribution.
34  *  * Neither the name Intel Corporation nor the names of its
35  *    contributors may be used to endorse or promote products derived
36  *    from this software without specific prior written permission.
37  *
38  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
39  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
40  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
41  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
42  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
43  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
44  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
45  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
46  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
47  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
48  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49  *
50  *****************************************************************************/
51 #include "iwl-trans.h"
52 #include "iwl-context-info.h"
53 #include "internal.h"
54 
55 /*
56  * Start up NIC's basic functionality after it has been reset
57  * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
58  * NOTE:  This does not load uCode nor start the embedded processor
59  */
60 static int iwl_pcie_gen2_apm_init(struct iwl_trans *trans)
61 {
62 	int ret = 0;
63 
64 	IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
65 
66 	/*
67 	 * Use "set_bit" below rather than "write", to preserve any hardware
68 	 * bits already set by default after reset.
69 	 */
70 
71 	/*
72 	 * Disable L0s without affecting L1;
73 	 * don't wait for ICH L0s (ICH bug W/A)
74 	 */
75 	iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
76 		    CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
77 
78 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
79 	iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
80 
81 	/*
82 	 * Enable HAP INTA (interrupt from management bus) to
83 	 * wake device's PCI Express link L1a -> L0s
84 	 */
85 	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
86 		    CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
87 
88 	iwl_pcie_apm_config(trans);
89 
90 	/*
91 	 * Set "initialization complete" bit to move adapter from
92 	 * D0U* --> D0A* (powered-up active) state.
93 	 */
94 	iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
95 
96 	/*
97 	 * Wait for clock stabilization; once stabilized, access to
98 	 * device-internal resources is supported, e.g. iwl_write_prph()
99 	 * and accesses to uCode SRAM.
100 	 */
101 	ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
102 			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
103 			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
104 	if (ret < 0) {
105 		IWL_DEBUG_INFO(trans, "Failed to init the card\n");
106 		return ret;
107 	}
108 
109 	set_bit(STATUS_DEVICE_ENABLED, &trans->status);
110 
111 	return 0;
112 }
113 
114 static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
115 {
116 	IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
117 
118 	if (op_mode_leave) {
119 		if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
120 			iwl_pcie_gen2_apm_init(trans);
121 
122 		/* inform ME that we are leaving */
123 		iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
124 			    CSR_RESET_LINK_PWR_MGMT_DISABLED);
125 		iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
126 			    CSR_HW_IF_CONFIG_REG_PREPARE |
127 			    CSR_HW_IF_CONFIG_REG_ENABLE_PME);
128 		mdelay(1);
129 		iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
130 			      CSR_RESET_LINK_PWR_MGMT_DISABLED);
131 		mdelay(5);
132 	}
133 
134 	clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
135 
136 	/* Stop device's DMA activity */
137 	iwl_pcie_apm_stop_master(trans);
138 
139 	iwl_pcie_sw_reset(trans);
140 
141 	/*
142 	 * Clear "initialization complete" bit to move adapter from
143 	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
144 	 */
145 	iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
146 }
147 
148 void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
149 {
150 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
151 
152 	lockdep_assert_held(&trans_pcie->mutex);
153 
154 	if (trans_pcie->is_down)
155 		return;
156 
157 	trans_pcie->is_down = true;
158 
159 	/* tell the device to stop sending interrupts */
160 	iwl_disable_interrupts(trans);
161 
162 	/* device going down, Stop using ICT table */
163 	iwl_pcie_disable_ict(trans);
164 
165 	/*
166 	 * If a HW restart happens during firmware loading,
167 	 * then the firmware loading might call this function
168 	 * and later it might be called again due to the
169 	 * restart. So don't process again if the device is
170 	 * already dead.
171 	 */
172 	if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
173 		IWL_DEBUG_INFO(trans,
174 			       "DEVICE_ENABLED bit was set and is now cleared\n");
175 		iwl_pcie_gen2_tx_stop(trans);
176 		iwl_pcie_rx_stop(trans);
177 	}
178 
179 	iwl_pcie_ctxt_info_free_paging(trans);
180 	iwl_pcie_ctxt_info_free(trans);
181 
182 	/* Make sure (redundant) we've released our request to stay awake */
183 	iwl_clear_bit(trans, CSR_GP_CNTRL,
184 		      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
185 
186 	/* Stop the device, and put it in low power state */
187 	iwl_pcie_gen2_apm_stop(trans, false);
188 
189 	iwl_pcie_sw_reset(trans);
190 
191 	/*
192 	 * Upon stop, the IVAR table gets erased, so msi-x won't
193 	 * work. This causes a bug in RF-KILL flows, since the interrupt
194 	 * that enables radio won't fire on the correct irq, and the
195 	 * driver won't be able to handle the interrupt.
196 	 * Configure the IVAR table again after reset.
197 	 */
198 	iwl_pcie_conf_msix_hw(trans_pcie);
199 
200 	/*
201 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
202 	 * This is a bug in certain verions of the hardware.
203 	 * Certain devices also keep sending HW RF kill interrupt all
204 	 * the time, unless the interrupt is ACKed even if the interrupt
205 	 * should be masked. Re-ACK all the interrupts here.
206 	 */
207 	iwl_disable_interrupts(trans);
208 
209 	/* clear all status bits */
210 	clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
211 	clear_bit(STATUS_INT_ENABLED, &trans->status);
212 	clear_bit(STATUS_TPOWER_PMI, &trans->status);
213 
214 	/*
215 	 * Even if we stop the HW, we still want the RF kill
216 	 * interrupt
217 	 */
218 	iwl_enable_rfkill_int(trans);
219 
220 	/* re-take ownership to prevent other users from stealing the device */
221 	iwl_pcie_prepare_card_hw(trans);
222 }
223 
224 void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
225 {
226 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
227 	bool was_in_rfkill;
228 
229 	mutex_lock(&trans_pcie->mutex);
230 	trans_pcie->opmode_down = true;
231 	was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
232 	_iwl_trans_pcie_gen2_stop_device(trans, low_power);
233 	iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill);
234 	mutex_unlock(&trans_pcie->mutex);
235 }
236 
237 static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
238 {
239 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
240 
241 	/* TODO: most of the logic can be removed in A0 - but not in Z0 */
242 	spin_lock(&trans_pcie->irq_lock);
243 	iwl_pcie_gen2_apm_init(trans);
244 	spin_unlock(&trans_pcie->irq_lock);
245 
246 	iwl_op_mode_nic_config(trans->op_mode);
247 
248 	/* Allocate the RX queue, or reset if it is already allocated */
249 	if (iwl_pcie_gen2_rx_init(trans))
250 		return -ENOMEM;
251 
252 	/* Allocate or reset and init all Tx and Command queues */
253 	if (iwl_pcie_gen2_tx_init(trans))
254 		return -ENOMEM;
255 
256 	/* enable shadow regs in HW */
257 	iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
258 	IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
259 
260 	return 0;
261 }
262 
263 void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
264 {
265 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
266 
267 	iwl_pcie_reset_ict(trans);
268 
269 	/* make sure all queue are not stopped/used */
270 	memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
271 	memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
272 
273 	/* now that we got alive we can free the fw image & the context info.
274 	 * paging memory cannot be freed included since FW will still use it
275 	 */
276 	iwl_pcie_ctxt_info_free(trans);
277 }
278 
279 int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
280 				 const struct fw_img *fw, bool run_in_rfkill)
281 {
282 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
283 	bool hw_rfkill;
284 	int ret;
285 
286 	/* This may fail if AMT took ownership of the device */
287 	if (iwl_pcie_prepare_card_hw(trans)) {
288 		IWL_WARN(trans, "Exit HW not ready\n");
289 		ret = -EIO;
290 		goto out;
291 	}
292 
293 	iwl_enable_rfkill_int(trans);
294 
295 	iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
296 
297 	/*
298 	 * We enabled the RF-Kill interrupt and the handler may very
299 	 * well be running. Disable the interrupts to make sure no other
300 	 * interrupt can be fired.
301 	 */
302 	iwl_disable_interrupts(trans);
303 
304 	/* Make sure it finished running */
305 	iwl_pcie_synchronize_irqs(trans);
306 
307 	mutex_lock(&trans_pcie->mutex);
308 
309 	/* If platform's RF_KILL switch is NOT set to KILL */
310 	hw_rfkill = iwl_trans_check_hw_rf_kill(trans);
311 	if (hw_rfkill && !run_in_rfkill) {
312 		ret = -ERFKILL;
313 		goto out;
314 	}
315 
316 	/* Someone called stop_device, don't try to start_fw */
317 	if (trans_pcie->is_down) {
318 		IWL_WARN(trans,
319 			 "Can't start_fw since the HW hasn't been started\n");
320 		ret = -EIO;
321 		goto out;
322 	}
323 
324 	/* make sure rfkill handshake bits are cleared */
325 	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
326 	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
327 		    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
328 
329 	/* clear (again), then enable host interrupts */
330 	iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
331 
332 	ret = iwl_pcie_gen2_nic_init(trans);
333 	if (ret) {
334 		IWL_ERR(trans, "Unable to init nic\n");
335 		goto out;
336 	}
337 
338 	ret = iwl_pcie_ctxt_info_init(trans, fw);
339 	if (ret)
340 		goto out;
341 
342 	/* re-check RF-Kill state since we may have missed the interrupt */
343 	hw_rfkill = iwl_trans_check_hw_rf_kill(trans);
344 	if (hw_rfkill && !run_in_rfkill)
345 		ret = -ERFKILL;
346 
347 out:
348 	mutex_unlock(&trans_pcie->mutex);
349 	return ret;
350 }
351