1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c)  2018 Intel Corporation */
3 
4 #include <linux/delay.h>
5 
6 #include "igc_hw.h"
7 #include "igc_i225.h"
8 #include "igc_mac.h"
9 #include "igc_base.h"
10 #include "igc.h"
11 
12 /**
13  * igc_reset_hw_base - Reset hardware
14  * @hw: pointer to the HW structure
15  *
16  * This resets the hardware into a known state.  This is a
17  * function pointer entry point called by the api module.
18  */
19 static s32 igc_reset_hw_base(struct igc_hw *hw)
20 {
21 	s32 ret_val;
22 	u32 ctrl;
23 
24 	/* Prevent the PCI-E bus from sticking if there is no TLP connection
25 	 * on the last TLP read/write transaction when MAC is reset.
26 	 */
27 	ret_val = igc_disable_pcie_master(hw);
28 	if (ret_val)
29 		hw_dbg("PCI-E Master disable polling has failed\n");
30 
31 	hw_dbg("Masking off all interrupts\n");
32 	wr32(IGC_IMC, 0xffffffff);
33 
34 	wr32(IGC_RCTL, 0);
35 	wr32(IGC_TCTL, IGC_TCTL_PSP);
36 	wrfl();
37 
38 	usleep_range(10000, 20000);
39 
40 	ctrl = rd32(IGC_CTRL);
41 
42 	hw_dbg("Issuing a global reset to MAC\n");
43 	wr32(IGC_CTRL, ctrl | IGC_CTRL_DEV_RST);
44 
45 	ret_val = igc_get_auto_rd_done(hw);
46 	if (ret_val) {
47 		/* When auto config read does not complete, do not
48 		 * return with an error. This can happen in situations
49 		 * where there is no eeprom and prevents getting link.
50 		 */
51 		hw_dbg("Auto Read Done did not complete\n");
52 	}
53 
54 	/* Clear any pending interrupt events. */
55 	wr32(IGC_IMC, 0xffffffff);
56 	rd32(IGC_ICR);
57 
58 	return ret_val;
59 }
60 
61 /**
62  * igc_init_nvm_params_base - Init NVM func ptrs.
63  * @hw: pointer to the HW structure
64  */
65 static s32 igc_init_nvm_params_base(struct igc_hw *hw)
66 {
67 	struct igc_nvm_info *nvm = &hw->nvm;
68 	u32 eecd = rd32(IGC_EECD);
69 	u16 size;
70 
71 	size = (u16)((eecd & IGC_EECD_SIZE_EX_MASK) >>
72 		     IGC_EECD_SIZE_EX_SHIFT);
73 
74 	/* Added to a constant, "size" becomes the left-shift value
75 	 * for setting word_size.
76 	 */
77 	size += NVM_WORD_SIZE_BASE_SHIFT;
78 
79 	/* Just in case size is out of range, cap it to the largest
80 	 * EEPROM size supported
81 	 */
82 	if (size > 15)
83 		size = 15;
84 
85 	nvm->type = igc_nvm_eeprom_spi;
86 	nvm->word_size = BIT(size);
87 	nvm->opcode_bits = 8;
88 	nvm->delay_usec = 1;
89 
90 	nvm->page_size = eecd & IGC_EECD_ADDR_BITS ? 32 : 8;
91 	nvm->address_bits = eecd & IGC_EECD_ADDR_BITS ?
92 			    16 : 8;
93 
94 	if (nvm->word_size == BIT(15))
95 		nvm->page_size = 128;
96 
97 	return 0;
98 }
99 
100 /**
101  * igc_setup_copper_link_base - Configure copper link settings
102  * @hw: pointer to the HW structure
103  *
104  * Configures the link for auto-neg or forced speed and duplex.  Then we check
105  * for link, once link is established calls to configure collision distance
106  * and flow control are called.
107  */
108 static s32 igc_setup_copper_link_base(struct igc_hw *hw)
109 {
110 	s32  ret_val = 0;
111 	u32 ctrl;
112 
113 	ctrl = rd32(IGC_CTRL);
114 	ctrl |= IGC_CTRL_SLU;
115 	ctrl &= ~(IGC_CTRL_FRCSPD | IGC_CTRL_FRCDPX);
116 	wr32(IGC_CTRL, ctrl);
117 
118 	ret_val = igc_setup_copper_link(hw);
119 
120 	return ret_val;
121 }
122 
123 /**
124  * igc_init_mac_params_base - Init MAC func ptrs.
125  * @hw: pointer to the HW structure
126  */
127 static s32 igc_init_mac_params_base(struct igc_hw *hw)
128 {
129 	struct igc_dev_spec_base *dev_spec = &hw->dev_spec._base;
130 	struct igc_mac_info *mac = &hw->mac;
131 
132 	/* Set mta register count */
133 	mac->mta_reg_count = 128;
134 	mac->rar_entry_count = IGC_RAR_ENTRIES;
135 
136 	/* reset */
137 	mac->ops.reset_hw = igc_reset_hw_base;
138 
139 	mac->ops.acquire_swfw_sync = igc_acquire_swfw_sync_i225;
140 	mac->ops.release_swfw_sync = igc_release_swfw_sync_i225;
141 
142 	/* Allow a single clear of the SW semaphore on I225 */
143 	if (mac->type == igc_i225)
144 		dev_spec->clear_semaphore_once = true;
145 
146 	/* physical interface link setup */
147 	mac->ops.setup_physical_interface = igc_setup_copper_link_base;
148 
149 	return 0;
150 }
151 
152 /**
153  * igc_init_phy_params_base - Init PHY func ptrs.
154  * @hw: pointer to the HW structure
155  */
156 static s32 igc_init_phy_params_base(struct igc_hw *hw)
157 {
158 	struct igc_phy_info *phy = &hw->phy;
159 	s32 ret_val = 0;
160 
161 	if (hw->phy.media_type != igc_media_type_copper) {
162 		phy->type = igc_phy_none;
163 		goto out;
164 	}
165 
166 	phy->autoneg_mask	= AUTONEG_ADVERTISE_SPEED_DEFAULT_2500;
167 	phy->reset_delay_us	= 100;
168 
169 	/* set lan id */
170 	hw->bus.func = (rd32(IGC_STATUS) & IGC_STATUS_FUNC_MASK) >>
171 			IGC_STATUS_FUNC_SHIFT;
172 
173 	/* Make sure the PHY is in a good state. Several people have reported
174 	 * firmware leaving the PHY's page select register set to something
175 	 * other than the default of zero, which causes the PHY ID read to
176 	 * access something other than the intended register.
177 	 */
178 	ret_val = hw->phy.ops.reset(hw);
179 	if (ret_val) {
180 		hw_dbg("Error resetting the PHY\n");
181 		goto out;
182 	}
183 
184 	ret_val = igc_get_phy_id(hw);
185 	if (ret_val)
186 		return ret_val;
187 
188 	igc_check_for_copper_link(hw);
189 
190 	/* Verify phy id and set remaining function pointers */
191 	switch (phy->id) {
192 	case I225_I_PHY_ID:
193 		phy->type	= igc_phy_i225;
194 		break;
195 	default:
196 		ret_val = -IGC_ERR_PHY;
197 		goto out;
198 	}
199 
200 out:
201 	return ret_val;
202 }
203 
204 static s32 igc_get_invariants_base(struct igc_hw *hw)
205 {
206 	struct igc_mac_info *mac = &hw->mac;
207 	s32 ret_val = 0;
208 
209 	switch (hw->device_id) {
210 	case IGC_DEV_ID_I225_LM:
211 	case IGC_DEV_ID_I225_V:
212 	case IGC_DEV_ID_I225_I:
213 	case IGC_DEV_ID_I220_V:
214 	case IGC_DEV_ID_I225_K:
215 	case IGC_DEV_ID_I225_K2:
216 	case IGC_DEV_ID_I226_K:
217 	case IGC_DEV_ID_I225_LMVP:
218 	case IGC_DEV_ID_I225_IT:
219 	case IGC_DEV_ID_I226_LM:
220 	case IGC_DEV_ID_I226_V:
221 	case IGC_DEV_ID_I226_IT:
222 	case IGC_DEV_ID_I221_V:
223 	case IGC_DEV_ID_I226_BLANK_NVM:
224 	case IGC_DEV_ID_I225_BLANK_NVM:
225 		mac->type = igc_i225;
226 		break;
227 	default:
228 		return -IGC_ERR_MAC_INIT;
229 	}
230 
231 	hw->phy.media_type = igc_media_type_copper;
232 
233 	/* mac initialization and operations */
234 	ret_val = igc_init_mac_params_base(hw);
235 	if (ret_val)
236 		goto out;
237 
238 	/* NVM initialization */
239 	ret_val = igc_init_nvm_params_base(hw);
240 	switch (hw->mac.type) {
241 	case igc_i225:
242 		ret_val = igc_init_nvm_params_i225(hw);
243 		break;
244 	default:
245 		break;
246 	}
247 
248 	/* setup PHY parameters */
249 	ret_val = igc_init_phy_params_base(hw);
250 	if (ret_val)
251 		goto out;
252 
253 out:
254 	return ret_val;
255 }
256 
257 /**
258  * igc_acquire_phy_base - Acquire rights to access PHY
259  * @hw: pointer to the HW structure
260  *
261  * Acquire access rights to the correct PHY.  This is a
262  * function pointer entry point called by the api module.
263  */
264 static s32 igc_acquire_phy_base(struct igc_hw *hw)
265 {
266 	u16 mask = IGC_SWFW_PHY0_SM;
267 
268 	return hw->mac.ops.acquire_swfw_sync(hw, mask);
269 }
270 
271 /**
272  * igc_release_phy_base - Release rights to access PHY
273  * @hw: pointer to the HW structure
274  *
275  * A wrapper to release access rights to the correct PHY.  This is a
276  * function pointer entry point called by the api module.
277  */
278 static void igc_release_phy_base(struct igc_hw *hw)
279 {
280 	u16 mask = IGC_SWFW_PHY0_SM;
281 
282 	hw->mac.ops.release_swfw_sync(hw, mask);
283 }
284 
285 /**
286  * igc_init_hw_base - Initialize hardware
287  * @hw: pointer to the HW structure
288  *
289  * This inits the hardware readying it for operation.
290  */
291 static s32 igc_init_hw_base(struct igc_hw *hw)
292 {
293 	struct igc_mac_info *mac = &hw->mac;
294 	u16 i, rar_count = mac->rar_entry_count;
295 	s32 ret_val = 0;
296 
297 	/* Setup the receive address */
298 	igc_init_rx_addrs(hw, rar_count);
299 
300 	/* Zero out the Multicast HASH table */
301 	hw_dbg("Zeroing the MTA\n");
302 	for (i = 0; i < mac->mta_reg_count; i++)
303 		array_wr32(IGC_MTA, i, 0);
304 
305 	/* Zero out the Unicast HASH table */
306 	hw_dbg("Zeroing the UTA\n");
307 	for (i = 0; i < mac->uta_reg_count; i++)
308 		array_wr32(IGC_UTA, i, 0);
309 
310 	/* Setup link and flow control */
311 	ret_val = igc_setup_link(hw);
312 
313 	/* Clear all of the statistics registers (clear on read).  It is
314 	 * important that we do this after we have tried to establish link
315 	 * because the symbol error count will increment wildly if there
316 	 * is no link.
317 	 */
318 	igc_clear_hw_cntrs_base(hw);
319 
320 	return ret_val;
321 }
322 
323 /**
324  * igc_power_down_phy_copper_base - Remove link during PHY power down
325  * @hw: pointer to the HW structure
326  *
327  * In the case of a PHY power down to save power, or to turn off link during a
328  * driver unload, or wake on lan is not enabled, remove the link.
329  */
330 void igc_power_down_phy_copper_base(struct igc_hw *hw)
331 {
332 	/* If the management interface is not enabled, then power down */
333 	if (!(igc_enable_mng_pass_thru(hw) || igc_check_reset_block(hw)))
334 		igc_power_down_phy_copper(hw);
335 }
336 
337 /**
338  * igc_rx_fifo_flush_base - Clean rx fifo after Rx enable
339  * @hw: pointer to the HW structure
340  *
341  * After Rx enable, if manageability is enabled then there is likely some
342  * bad data at the start of the fifo and possibly in the DMA fifo.  This
343  * function clears the fifos and flushes any packets that came in as rx was
344  * being enabled.
345  */
346 void igc_rx_fifo_flush_base(struct igc_hw *hw)
347 {
348 	u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
349 	int i, ms_wait;
350 
351 	/* disable IPv6 options as per hardware errata */
352 	rfctl = rd32(IGC_RFCTL);
353 	rfctl |= IGC_RFCTL_IPV6_EX_DIS;
354 	wr32(IGC_RFCTL, rfctl);
355 
356 	if (!(rd32(IGC_MANC) & IGC_MANC_RCV_TCO_EN))
357 		return;
358 
359 	/* Disable all Rx queues */
360 	for (i = 0; i < 4; i++) {
361 		rxdctl[i] = rd32(IGC_RXDCTL(i));
362 		wr32(IGC_RXDCTL(i),
363 		     rxdctl[i] & ~IGC_RXDCTL_QUEUE_ENABLE);
364 	}
365 	/* Poll all queues to verify they have shut down */
366 	for (ms_wait = 0; ms_wait < 10; ms_wait++) {
367 		usleep_range(1000, 2000);
368 		rx_enabled = 0;
369 		for (i = 0; i < 4; i++)
370 			rx_enabled |= rd32(IGC_RXDCTL(i));
371 		if (!(rx_enabled & IGC_RXDCTL_QUEUE_ENABLE))
372 			break;
373 	}
374 
375 	if (ms_wait == 10)
376 		hw_dbg("Queue disable timed out after 10ms\n");
377 
378 	/* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
379 	 * incoming packets are rejected.  Set enable and wait 2ms so that
380 	 * any packet that was coming in as RCTL.EN was set is flushed
381 	 */
382 	wr32(IGC_RFCTL, rfctl & ~IGC_RFCTL_LEF);
383 
384 	rlpml = rd32(IGC_RLPML);
385 	wr32(IGC_RLPML, 0);
386 
387 	rctl = rd32(IGC_RCTL);
388 	temp_rctl = rctl & ~(IGC_RCTL_EN | IGC_RCTL_SBP);
389 	temp_rctl |= IGC_RCTL_LPE;
390 
391 	wr32(IGC_RCTL, temp_rctl);
392 	wr32(IGC_RCTL, temp_rctl | IGC_RCTL_EN);
393 	wrfl();
394 	usleep_range(2000, 3000);
395 
396 	/* Enable Rx queues that were previously enabled and restore our
397 	 * previous state
398 	 */
399 	for (i = 0; i < 4; i++)
400 		wr32(IGC_RXDCTL(i), rxdctl[i]);
401 	wr32(IGC_RCTL, rctl);
402 	wrfl();
403 
404 	wr32(IGC_RLPML, rlpml);
405 	wr32(IGC_RFCTL, rfctl);
406 
407 	/* Flush receive errors generated by workaround */
408 	rd32(IGC_ROC);
409 	rd32(IGC_RNBC);
410 	rd32(IGC_MPC);
411 }
412 
413 static struct igc_mac_operations igc_mac_ops_base = {
414 	.init_hw		= igc_init_hw_base,
415 	.check_for_link		= igc_check_for_copper_link,
416 	.rar_set		= igc_rar_set,
417 	.read_mac_addr		= igc_read_mac_addr,
418 	.get_speed_and_duplex	= igc_get_speed_and_duplex_copper,
419 };
420 
421 static const struct igc_phy_operations igc_phy_ops_base = {
422 	.acquire		= igc_acquire_phy_base,
423 	.release		= igc_release_phy_base,
424 	.reset			= igc_phy_hw_reset,
425 	.read_reg		= igc_read_phy_reg_gpy,
426 	.write_reg		= igc_write_phy_reg_gpy,
427 };
428 
429 const struct igc_info igc_base_info = {
430 	.get_invariants		= igc_get_invariants_base,
431 	.mac_ops		= &igc_mac_ops_base,
432 	.phy_ops		= &igc_phy_ops_base,
433 };
434