1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c)  2018 Intel Corporation */
3 
4 #include <linux/delay.h>
5 
6 #include "igc_hw.h"
7 #include "igc_i225.h"
8 #include "igc_mac.h"
9 #include "igc_base.h"
10 #include "igc.h"
11 
12 /**
13  * igc_reset_hw_base - Reset hardware
14  * @hw: pointer to the HW structure
15  *
16  * This resets the hardware into a known state.  This is a
17  * function pointer entry point called by the api module.
18  */
19 static s32 igc_reset_hw_base(struct igc_hw *hw)
20 {
21 	s32 ret_val;
22 	u32 ctrl;
23 
24 	/* Prevent the PCI-E bus from sticking if there is no TLP connection
25 	 * on the last TLP read/write transaction when MAC is reset.
26 	 */
27 	ret_val = igc_disable_pcie_master(hw);
28 	if (ret_val)
29 		hw_dbg("PCI-E Master disable polling has failed\n");
30 
31 	hw_dbg("Masking off all interrupts\n");
32 	wr32(IGC_IMC, 0xffffffff);
33 
34 	wr32(IGC_RCTL, 0);
35 	wr32(IGC_TCTL, IGC_TCTL_PSP);
36 	wrfl();
37 
38 	usleep_range(10000, 20000);
39 
40 	ctrl = rd32(IGC_CTRL);
41 
42 	hw_dbg("Issuing a global reset to MAC\n");
43 	wr32(IGC_CTRL, ctrl | IGC_CTRL_DEV_RST);
44 
45 	ret_val = igc_get_auto_rd_done(hw);
46 	if (ret_val) {
47 		/* When auto config read does not complete, do not
48 		 * return with an error. This can happen in situations
49 		 * where there is no eeprom and prevents getting link.
50 		 */
51 		hw_dbg("Auto Read Done did not complete\n");
52 	}
53 
54 	/* Clear any pending interrupt events. */
55 	wr32(IGC_IMC, 0xffffffff);
56 	rd32(IGC_ICR);
57 
58 	return ret_val;
59 }
60 
61 /**
62  * igc_init_nvm_params_base - Init NVM func ptrs.
63  * @hw: pointer to the HW structure
64  */
65 static s32 igc_init_nvm_params_base(struct igc_hw *hw)
66 {
67 	struct igc_nvm_info *nvm = &hw->nvm;
68 	u32 eecd = rd32(IGC_EECD);
69 	u16 size;
70 
71 	size = (u16)((eecd & IGC_EECD_SIZE_EX_MASK) >>
72 		     IGC_EECD_SIZE_EX_SHIFT);
73 
74 	/* Added to a constant, "size" becomes the left-shift value
75 	 * for setting word_size.
76 	 */
77 	size += NVM_WORD_SIZE_BASE_SHIFT;
78 
79 	/* Just in case size is out of range, cap it to the largest
80 	 * EEPROM size supported
81 	 */
82 	if (size > 15)
83 		size = 15;
84 
85 	nvm->type = igc_nvm_eeprom_spi;
86 	nvm->word_size = BIT(size);
87 	nvm->opcode_bits = 8;
88 	nvm->delay_usec = 1;
89 
90 	nvm->page_size = eecd & IGC_EECD_ADDR_BITS ? 32 : 8;
91 	nvm->address_bits = eecd & IGC_EECD_ADDR_BITS ?
92 			    16 : 8;
93 
94 	if (nvm->word_size == BIT(15))
95 		nvm->page_size = 128;
96 
97 	return 0;
98 }
99 
100 /**
101  * igc_setup_copper_link_base - Configure copper link settings
102  * @hw: pointer to the HW structure
103  *
104  * Configures the link for auto-neg or forced speed and duplex.  Then we check
105  * for link, once link is established calls to configure collision distance
106  * and flow control are called.
107  */
108 static s32 igc_setup_copper_link_base(struct igc_hw *hw)
109 {
110 	s32  ret_val = 0;
111 	u32 ctrl;
112 
113 	ctrl = rd32(IGC_CTRL);
114 	ctrl |= IGC_CTRL_SLU;
115 	ctrl &= ~(IGC_CTRL_FRCSPD | IGC_CTRL_FRCDPX);
116 	wr32(IGC_CTRL, ctrl);
117 
118 	ret_val = igc_setup_copper_link(hw);
119 
120 	return ret_val;
121 }
122 
123 /**
124  * igc_init_mac_params_base - Init MAC func ptrs.
125  * @hw: pointer to the HW structure
126  */
127 static s32 igc_init_mac_params_base(struct igc_hw *hw)
128 {
129 	struct igc_dev_spec_base *dev_spec = &hw->dev_spec._base;
130 	struct igc_mac_info *mac = &hw->mac;
131 
132 	/* Set mta register count */
133 	mac->mta_reg_count = 128;
134 	mac->rar_entry_count = IGC_RAR_ENTRIES;
135 
136 	/* reset */
137 	mac->ops.reset_hw = igc_reset_hw_base;
138 
139 	mac->ops.acquire_swfw_sync = igc_acquire_swfw_sync_i225;
140 	mac->ops.release_swfw_sync = igc_release_swfw_sync_i225;
141 
142 	/* Allow a single clear of the SW semaphore on I225 */
143 	if (mac->type == igc_i225)
144 		dev_spec->clear_semaphore_once = true;
145 
146 	/* physical interface link setup */
147 	mac->ops.setup_physical_interface = igc_setup_copper_link_base;
148 
149 	return 0;
150 }
151 
152 /**
153  * igc_init_phy_params_base - Init PHY func ptrs.
154  * @hw: pointer to the HW structure
155  */
156 static s32 igc_init_phy_params_base(struct igc_hw *hw)
157 {
158 	struct igc_phy_info *phy = &hw->phy;
159 	s32 ret_val = 0;
160 
161 	if (hw->phy.media_type != igc_media_type_copper) {
162 		phy->type = igc_phy_none;
163 		goto out;
164 	}
165 
166 	phy->autoneg_mask	= AUTONEG_ADVERTISE_SPEED_DEFAULT_2500;
167 	phy->reset_delay_us	= 100;
168 
169 	/* set lan id */
170 	hw->bus.func = (rd32(IGC_STATUS) & IGC_STATUS_FUNC_MASK) >>
171 			IGC_STATUS_FUNC_SHIFT;
172 
173 	/* Make sure the PHY is in a good state. Several people have reported
174 	 * firmware leaving the PHY's page select register set to something
175 	 * other than the default of zero, which causes the PHY ID read to
176 	 * access something other than the intended register.
177 	 */
178 	ret_val = hw->phy.ops.reset(hw);
179 	if (ret_val) {
180 		hw_dbg("Error resetting the PHY\n");
181 		goto out;
182 	}
183 
184 	ret_val = igc_get_phy_id(hw);
185 	if (ret_val)
186 		return ret_val;
187 
188 	igc_check_for_copper_link(hw);
189 
190 	/* Verify phy id and set remaining function pointers */
191 	switch (phy->id) {
192 	case I225_I_PHY_ID:
193 		phy->type	= igc_phy_i225;
194 		break;
195 	default:
196 		ret_val = -IGC_ERR_PHY;
197 		goto out;
198 	}
199 
200 out:
201 	return ret_val;
202 }
203 
204 static s32 igc_get_invariants_base(struct igc_hw *hw)
205 {
206 	struct igc_mac_info *mac = &hw->mac;
207 	s32 ret_val = 0;
208 
209 	switch (hw->device_id) {
210 	case IGC_DEV_ID_I225_LM:
211 	case IGC_DEV_ID_I225_V:
212 	case IGC_DEV_ID_I225_I:
213 	case IGC_DEV_ID_I220_V:
214 	case IGC_DEV_ID_I225_K:
215 	case IGC_DEV_ID_I225_K2:
216 	case IGC_DEV_ID_I225_LMVP:
217 	case IGC_DEV_ID_I225_IT:
218 	case IGC_DEV_ID_I226_LM:
219 	case IGC_DEV_ID_I226_V:
220 	case IGC_DEV_ID_I226_IT:
221 	case IGC_DEV_ID_I221_V:
222 	case IGC_DEV_ID_I226_BLANK_NVM:
223 	case IGC_DEV_ID_I225_BLANK_NVM:
224 		mac->type = igc_i225;
225 		break;
226 	default:
227 		return -IGC_ERR_MAC_INIT;
228 	}
229 
230 	hw->phy.media_type = igc_media_type_copper;
231 
232 	/* mac initialization and operations */
233 	ret_val = igc_init_mac_params_base(hw);
234 	if (ret_val)
235 		goto out;
236 
237 	/* NVM initialization */
238 	ret_val = igc_init_nvm_params_base(hw);
239 	switch (hw->mac.type) {
240 	case igc_i225:
241 		ret_val = igc_init_nvm_params_i225(hw);
242 		break;
243 	default:
244 		break;
245 	}
246 
247 	/* setup PHY parameters */
248 	ret_val = igc_init_phy_params_base(hw);
249 	if (ret_val)
250 		goto out;
251 
252 out:
253 	return ret_val;
254 }
255 
256 /**
257  * igc_acquire_phy_base - Acquire rights to access PHY
258  * @hw: pointer to the HW structure
259  *
260  * Acquire access rights to the correct PHY.  This is a
261  * function pointer entry point called by the api module.
262  */
263 static s32 igc_acquire_phy_base(struct igc_hw *hw)
264 {
265 	u16 mask = IGC_SWFW_PHY0_SM;
266 
267 	return hw->mac.ops.acquire_swfw_sync(hw, mask);
268 }
269 
270 /**
271  * igc_release_phy_base - Release rights to access PHY
272  * @hw: pointer to the HW structure
273  *
274  * A wrapper to release access rights to the correct PHY.  This is a
275  * function pointer entry point called by the api module.
276  */
277 static void igc_release_phy_base(struct igc_hw *hw)
278 {
279 	u16 mask = IGC_SWFW_PHY0_SM;
280 
281 	hw->mac.ops.release_swfw_sync(hw, mask);
282 }
283 
284 /**
285  * igc_init_hw_base - Initialize hardware
286  * @hw: pointer to the HW structure
287  *
288  * This inits the hardware readying it for operation.
289  */
290 static s32 igc_init_hw_base(struct igc_hw *hw)
291 {
292 	struct igc_mac_info *mac = &hw->mac;
293 	u16 i, rar_count = mac->rar_entry_count;
294 	s32 ret_val = 0;
295 
296 	/* Setup the receive address */
297 	igc_init_rx_addrs(hw, rar_count);
298 
299 	/* Zero out the Multicast HASH table */
300 	hw_dbg("Zeroing the MTA\n");
301 	for (i = 0; i < mac->mta_reg_count; i++)
302 		array_wr32(IGC_MTA, i, 0);
303 
304 	/* Zero out the Unicast HASH table */
305 	hw_dbg("Zeroing the UTA\n");
306 	for (i = 0; i < mac->uta_reg_count; i++)
307 		array_wr32(IGC_UTA, i, 0);
308 
309 	/* Setup link and flow control */
310 	ret_val = igc_setup_link(hw);
311 
312 	/* Clear all of the statistics registers (clear on read).  It is
313 	 * important that we do this after we have tried to establish link
314 	 * because the symbol error count will increment wildly if there
315 	 * is no link.
316 	 */
317 	igc_clear_hw_cntrs_base(hw);
318 
319 	return ret_val;
320 }
321 
322 /**
323  * igc_power_down_phy_copper_base - Remove link during PHY power down
324  * @hw: pointer to the HW structure
325  *
326  * In the case of a PHY power down to save power, or to turn off link during a
327  * driver unload, or wake on lan is not enabled, remove the link.
328  */
329 void igc_power_down_phy_copper_base(struct igc_hw *hw)
330 {
331 	/* If the management interface is not enabled, then power down */
332 	if (!(igc_enable_mng_pass_thru(hw) || igc_check_reset_block(hw)))
333 		igc_power_down_phy_copper(hw);
334 }
335 
336 /**
337  * igc_rx_fifo_flush_base - Clean rx fifo after Rx enable
338  * @hw: pointer to the HW structure
339  *
340  * After Rx enable, if manageability is enabled then there is likely some
341  * bad data at the start of the fifo and possibly in the DMA fifo.  This
342  * function clears the fifos and flushes any packets that came in as rx was
343  * being enabled.
344  */
345 void igc_rx_fifo_flush_base(struct igc_hw *hw)
346 {
347 	u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
348 	int i, ms_wait;
349 
350 	/* disable IPv6 options as per hardware errata */
351 	rfctl = rd32(IGC_RFCTL);
352 	rfctl |= IGC_RFCTL_IPV6_EX_DIS;
353 	wr32(IGC_RFCTL, rfctl);
354 
355 	if (!(rd32(IGC_MANC) & IGC_MANC_RCV_TCO_EN))
356 		return;
357 
358 	/* Disable all Rx queues */
359 	for (i = 0; i < 4; i++) {
360 		rxdctl[i] = rd32(IGC_RXDCTL(i));
361 		wr32(IGC_RXDCTL(i),
362 		     rxdctl[i] & ~IGC_RXDCTL_QUEUE_ENABLE);
363 	}
364 	/* Poll all queues to verify they have shut down */
365 	for (ms_wait = 0; ms_wait < 10; ms_wait++) {
366 		usleep_range(1000, 2000);
367 		rx_enabled = 0;
368 		for (i = 0; i < 4; i++)
369 			rx_enabled |= rd32(IGC_RXDCTL(i));
370 		if (!(rx_enabled & IGC_RXDCTL_QUEUE_ENABLE))
371 			break;
372 	}
373 
374 	if (ms_wait == 10)
375 		hw_dbg("Queue disable timed out after 10ms\n");
376 
377 	/* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
378 	 * incoming packets are rejected.  Set enable and wait 2ms so that
379 	 * any packet that was coming in as RCTL.EN was set is flushed
380 	 */
381 	wr32(IGC_RFCTL, rfctl & ~IGC_RFCTL_LEF);
382 
383 	rlpml = rd32(IGC_RLPML);
384 	wr32(IGC_RLPML, 0);
385 
386 	rctl = rd32(IGC_RCTL);
387 	temp_rctl = rctl & ~(IGC_RCTL_EN | IGC_RCTL_SBP);
388 	temp_rctl |= IGC_RCTL_LPE;
389 
390 	wr32(IGC_RCTL, temp_rctl);
391 	wr32(IGC_RCTL, temp_rctl | IGC_RCTL_EN);
392 	wrfl();
393 	usleep_range(2000, 3000);
394 
395 	/* Enable Rx queues that were previously enabled and restore our
396 	 * previous state
397 	 */
398 	for (i = 0; i < 4; i++)
399 		wr32(IGC_RXDCTL(i), rxdctl[i]);
400 	wr32(IGC_RCTL, rctl);
401 	wrfl();
402 
403 	wr32(IGC_RLPML, rlpml);
404 	wr32(IGC_RFCTL, rfctl);
405 
406 	/* Flush receive errors generated by workaround */
407 	rd32(IGC_ROC);
408 	rd32(IGC_RNBC);
409 	rd32(IGC_MPC);
410 }
411 
412 static struct igc_mac_operations igc_mac_ops_base = {
413 	.init_hw		= igc_init_hw_base,
414 	.check_for_link		= igc_check_for_copper_link,
415 	.rar_set		= igc_rar_set,
416 	.read_mac_addr		= igc_read_mac_addr,
417 	.get_speed_and_duplex	= igc_get_speed_and_duplex_copper,
418 };
419 
420 static const struct igc_phy_operations igc_phy_ops_base = {
421 	.acquire		= igc_acquire_phy_base,
422 	.release		= igc_release_phy_base,
423 	.reset			= igc_phy_hw_reset,
424 	.read_reg		= igc_read_phy_reg_gpy,
425 	.write_reg		= igc_write_phy_reg_gpy,
426 };
427 
428 const struct igc_info igc_base_info = {
429 	.get_invariants		= igc_get_invariants_base,
430 	.mac_ops		= &igc_mac_ops_base,
431 	.phy_ops		= &igc_phy_ops_base,
432 };
433