1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */
3 
4 #include <linux/etherdevice.h>
5 #include <linux/netdevice.h>
6 #include <linux/if_ether.h>
7 #include <linux/if_vlan.h>
8 #include <linux/iopoll.h>
9 #include <linux/pci.h>
10 
11 #include "wx_type.h"
12 #include "wx_lib.h"
13 #include "wx_hw.h"
14 
15 static void wx_intr_disable(struct wx *wx, u64 qmask)
16 {
17 	u32 mask;
18 
19 	mask = (qmask & U32_MAX);
20 	if (mask)
21 		wr32(wx, WX_PX_IMS(0), mask);
22 
23 	if (wx->mac.type == wx_mac_sp) {
24 		mask = (qmask >> 32);
25 		if (mask)
26 			wr32(wx, WX_PX_IMS(1), mask);
27 	}
28 }
29 
30 void wx_intr_enable(struct wx *wx, u64 qmask)
31 {
32 	u32 mask;
33 
34 	mask = (qmask & U32_MAX);
35 	if (mask)
36 		wr32(wx, WX_PX_IMC(0), mask);
37 	if (wx->mac.type == wx_mac_sp) {
38 		mask = (qmask >> 32);
39 		if (mask)
40 			wr32(wx, WX_PX_IMC(1), mask);
41 	}
42 }
43 EXPORT_SYMBOL(wx_intr_enable);
44 
45 /**
46  * wx_irq_disable - Mask off interrupt generation on the NIC
47  * @wx: board private structure
48  **/
49 void wx_irq_disable(struct wx *wx)
50 {
51 	struct pci_dev *pdev = wx->pdev;
52 
53 	wr32(wx, WX_PX_MISC_IEN, 0);
54 	wx_intr_disable(wx, WX_INTR_ALL);
55 
56 	if (pdev->msix_enabled) {
57 		int vector;
58 
59 		for (vector = 0; vector < wx->num_q_vectors; vector++)
60 			synchronize_irq(wx->msix_entries[vector].vector);
61 
62 		synchronize_irq(wx->msix_entries[vector].vector);
63 	} else {
64 		synchronize_irq(pdev->irq);
65 	}
66 }
67 EXPORT_SYMBOL(wx_irq_disable);
68 
69 /* cmd_addr is used for some special command:
70  * 1. to be sector address, when implemented erase sector command
71  * 2. to be flash address when implemented read, write flash address
72  */
73 static int wx_fmgr_cmd_op(struct wx *wx, u32 cmd, u32 cmd_addr)
74 {
75 	u32 cmd_val = 0, val = 0;
76 
77 	cmd_val = WX_SPI_CMD_CMD(cmd) |
78 		  WX_SPI_CMD_CLK(WX_SPI_CLK_DIV) |
79 		  cmd_addr;
80 	wr32(wx, WX_SPI_CMD, cmd_val);
81 
82 	return read_poll_timeout(rd32, val, (val & 0x1), 10, 100000,
83 				 false, wx, WX_SPI_STATUS);
84 }
85 
86 static int wx_flash_read_dword(struct wx *wx, u32 addr, u32 *data)
87 {
88 	int ret = 0;
89 
90 	ret = wx_fmgr_cmd_op(wx, WX_SPI_CMD_READ_DWORD, addr);
91 	if (ret < 0)
92 		return ret;
93 
94 	*data = rd32(wx, WX_SPI_DATA);
95 
96 	return ret;
97 }
98 
99 int wx_check_flash_load(struct wx *hw, u32 check_bit)
100 {
101 	u32 reg = 0;
102 	int err = 0;
103 
104 	/* if there's flash existing */
105 	if (!(rd32(hw, WX_SPI_STATUS) &
106 	      WX_SPI_STATUS_FLASH_BYPASS)) {
107 		/* wait hw load flash done */
108 		err = read_poll_timeout(rd32, reg, !(reg & check_bit), 20000, 2000000,
109 					false, hw, WX_SPI_ILDR_STATUS);
110 		if (err < 0)
111 			wx_err(hw, "Check flash load timeout.\n");
112 	}
113 
114 	return err;
115 }
116 EXPORT_SYMBOL(wx_check_flash_load);
117 
118 void wx_control_hw(struct wx *wx, bool drv)
119 {
120 	/* True : Let firmware know the driver has taken over
121 	 * False : Let firmware take over control of hw
122 	 */
123 	wr32m(wx, WX_CFG_PORT_CTL, WX_CFG_PORT_CTL_DRV_LOAD,
124 	      drv ? WX_CFG_PORT_CTL_DRV_LOAD : 0);
125 }
126 EXPORT_SYMBOL(wx_control_hw);
127 
128 /**
129  * wx_mng_present - returns 0 when management capability is present
130  * @wx: pointer to hardware structure
131  */
132 int wx_mng_present(struct wx *wx)
133 {
134 	u32 fwsm;
135 
136 	fwsm = rd32(wx, WX_MIS_ST);
137 	if (fwsm & WX_MIS_ST_MNG_INIT_DN)
138 		return 0;
139 	else
140 		return -EACCES;
141 }
142 EXPORT_SYMBOL(wx_mng_present);
143 
144 /* Software lock to be held while software semaphore is being accessed. */
145 static DEFINE_MUTEX(wx_sw_sync_lock);
146 
147 /**
148  *  wx_release_sw_sync - Release SW semaphore
149  *  @wx: pointer to hardware structure
150  *  @mask: Mask to specify which semaphore to release
151  *
152  *  Releases the SW semaphore for the specified
153  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
154  **/
155 static void wx_release_sw_sync(struct wx *wx, u32 mask)
156 {
157 	mutex_lock(&wx_sw_sync_lock);
158 	wr32m(wx, WX_MNG_SWFW_SYNC, mask, 0);
159 	mutex_unlock(&wx_sw_sync_lock);
160 }
161 
162 /**
163  *  wx_acquire_sw_sync - Acquire SW semaphore
164  *  @wx: pointer to hardware structure
165  *  @mask: Mask to specify which semaphore to acquire
166  *
167  *  Acquires the SW semaphore for the specified
168  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
169  **/
170 static int wx_acquire_sw_sync(struct wx *wx, u32 mask)
171 {
172 	u32 sem = 0;
173 	int ret = 0;
174 
175 	mutex_lock(&wx_sw_sync_lock);
176 	ret = read_poll_timeout(rd32, sem, !(sem & mask),
177 				5000, 2000000, false, wx, WX_MNG_SWFW_SYNC);
178 	if (!ret) {
179 		sem |= mask;
180 		wr32(wx, WX_MNG_SWFW_SYNC, sem);
181 	} else {
182 		wx_err(wx, "SW Semaphore not granted: 0x%x.\n", sem);
183 	}
184 	mutex_unlock(&wx_sw_sync_lock);
185 
186 	return ret;
187 }
188 
189 /**
190  *  wx_host_interface_command - Issue command to manageability block
191  *  @wx: pointer to the HW structure
192  *  @buffer: contains the command to write and where the return status will
193  *   be placed
194  *  @length: length of buffer, must be multiple of 4 bytes
195  *  @timeout: time in ms to wait for command completion
196  *  @return_data: read and return data from the buffer (true) or not (false)
197  *   Needed because FW structures are big endian and decoding of
198  *   these fields can be 8 bit or 16 bit based on command. Decoding
199  *   is not easily understood without making a table of commands.
200  *   So we will leave this up to the caller to read back the data
201  *   in these cases.
202  **/
203 int wx_host_interface_command(struct wx *wx, u32 *buffer,
204 			      u32 length, u32 timeout, bool return_data)
205 {
206 	u32 hdr_size = sizeof(struct wx_hic_hdr);
207 	u32 hicr, i, bi, buf[64] = {};
208 	int status = 0;
209 	u32 dword_len;
210 	u16 buf_len;
211 
212 	if (length == 0 || length > WX_HI_MAX_BLOCK_BYTE_LENGTH) {
213 		wx_err(wx, "Buffer length failure buffersize=%d.\n", length);
214 		return -EINVAL;
215 	}
216 
217 	status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_MB);
218 	if (status != 0)
219 		return status;
220 
221 	/* Calculate length in DWORDs. We must be DWORD aligned */
222 	if ((length % (sizeof(u32))) != 0) {
223 		wx_err(wx, "Buffer length failure, not aligned to dword");
224 		status = -EINVAL;
225 		goto rel_out;
226 	}
227 
228 	dword_len = length >> 2;
229 
230 	/* The device driver writes the relevant command block
231 	 * into the ram area.
232 	 */
233 	for (i = 0; i < dword_len; i++) {
234 		wr32a(wx, WX_MNG_MBOX, i, (__force u32)cpu_to_le32(buffer[i]));
235 		/* write flush */
236 		buf[i] = rd32a(wx, WX_MNG_MBOX, i);
237 	}
238 	/* Setting this bit tells the ARC that a new command is pending. */
239 	wr32m(wx, WX_MNG_MBOX_CTL,
240 	      WX_MNG_MBOX_CTL_SWRDY, WX_MNG_MBOX_CTL_SWRDY);
241 
242 	status = read_poll_timeout(rd32, hicr, hicr & WX_MNG_MBOX_CTL_FWRDY, 1000,
243 				   timeout * 1000, false, wx, WX_MNG_MBOX_CTL);
244 
245 	/* Check command completion */
246 	if (status) {
247 		wx_dbg(wx, "Command has failed with no status valid.\n");
248 
249 		buf[0] = rd32(wx, WX_MNG_MBOX);
250 		if ((buffer[0] & 0xff) != (~buf[0] >> 24)) {
251 			status = -EINVAL;
252 			goto rel_out;
253 		}
254 		if ((buf[0] & 0xff0000) >> 16 == 0x80) {
255 			wx_dbg(wx, "It's unknown cmd.\n");
256 			status = -EINVAL;
257 			goto rel_out;
258 		}
259 
260 		wx_dbg(wx, "write value:\n");
261 		for (i = 0; i < dword_len; i++)
262 			wx_dbg(wx, "%x ", buffer[i]);
263 		wx_dbg(wx, "read value:\n");
264 		for (i = 0; i < dword_len; i++)
265 			wx_dbg(wx, "%x ", buf[i]);
266 	}
267 
268 	if (!return_data)
269 		goto rel_out;
270 
271 	/* Calculate length in DWORDs */
272 	dword_len = hdr_size >> 2;
273 
274 	/* first pull in the header so we know the buffer length */
275 	for (bi = 0; bi < dword_len; bi++) {
276 		buffer[bi] = rd32a(wx, WX_MNG_MBOX, bi);
277 		le32_to_cpus(&buffer[bi]);
278 	}
279 
280 	/* If there is any thing in data position pull it in */
281 	buf_len = ((struct wx_hic_hdr *)buffer)->buf_len;
282 	if (buf_len == 0)
283 		goto rel_out;
284 
285 	if (length < buf_len + hdr_size) {
286 		wx_err(wx, "Buffer not large enough for reply message.\n");
287 		status = -EFAULT;
288 		goto rel_out;
289 	}
290 
291 	/* Calculate length in DWORDs, add 3 for odd lengths */
292 	dword_len = (buf_len + 3) >> 2;
293 
294 	/* Pull in the rest of the buffer (bi is where we left off) */
295 	for (; bi <= dword_len; bi++) {
296 		buffer[bi] = rd32a(wx, WX_MNG_MBOX, bi);
297 		le32_to_cpus(&buffer[bi]);
298 	}
299 
300 rel_out:
301 	wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_MB);
302 	return status;
303 }
304 EXPORT_SYMBOL(wx_host_interface_command);
305 
306 /**
307  *  wx_read_ee_hostif_data - Read EEPROM word using a host interface cmd
308  *  assuming that the semaphore is already obtained.
309  *  @wx: pointer to hardware structure
310  *  @offset: offset of  word in the EEPROM to read
311  *  @data: word read from the EEPROM
312  *
313  *  Reads a 16 bit word from the EEPROM using the hostif.
314  **/
315 static int wx_read_ee_hostif_data(struct wx *wx, u16 offset, u16 *data)
316 {
317 	struct wx_hic_read_shadow_ram buffer;
318 	int status;
319 
320 	buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
321 	buffer.hdr.req.buf_lenh = 0;
322 	buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
323 	buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
324 
325 	/* convert offset from words to bytes */
326 	buffer.address = (__force u32)cpu_to_be32(offset * 2);
327 	/* one word */
328 	buffer.length = (__force u16)cpu_to_be16(sizeof(u16));
329 
330 	status = wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer),
331 					   WX_HI_COMMAND_TIMEOUT, false);
332 
333 	if (status != 0)
334 		return status;
335 
336 	*data = (u16)rd32a(wx, WX_MNG_MBOX, FW_NVM_DATA_OFFSET);
337 
338 	return status;
339 }
340 
341 /**
342  *  wx_read_ee_hostif - Read EEPROM word using a host interface cmd
343  *  @wx: pointer to hardware structure
344  *  @offset: offset of  word in the EEPROM to read
345  *  @data: word read from the EEPROM
346  *
347  *  Reads a 16 bit word from the EEPROM using the hostif.
348  **/
349 int wx_read_ee_hostif(struct wx *wx, u16 offset, u16 *data)
350 {
351 	int status = 0;
352 
353 	status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH);
354 	if (status == 0) {
355 		status = wx_read_ee_hostif_data(wx, offset, data);
356 		wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH);
357 	}
358 
359 	return status;
360 }
361 EXPORT_SYMBOL(wx_read_ee_hostif);
362 
363 /**
364  *  wx_read_ee_hostif_buffer- Read EEPROM word(s) using hostif
365  *  @wx: pointer to hardware structure
366  *  @offset: offset of  word in the EEPROM to read
367  *  @words: number of words
368  *  @data: word(s) read from the EEPROM
369  *
370  *  Reads a 16 bit word(s) from the EEPROM using the hostif.
371  **/
372 int wx_read_ee_hostif_buffer(struct wx *wx,
373 			     u16 offset, u16 words, u16 *data)
374 {
375 	struct wx_hic_read_shadow_ram buffer;
376 	u32 current_word = 0;
377 	u16 words_to_read;
378 	u32 value = 0;
379 	int status;
380 	u32 i;
381 
382 	/* Take semaphore for the entire operation. */
383 	status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH);
384 	if (status != 0)
385 		return status;
386 
387 	while (words) {
388 		if (words > FW_MAX_READ_BUFFER_SIZE / 2)
389 			words_to_read = FW_MAX_READ_BUFFER_SIZE / 2;
390 		else
391 			words_to_read = words;
392 
393 		buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
394 		buffer.hdr.req.buf_lenh = 0;
395 		buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
396 		buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
397 
398 		/* convert offset from words to bytes */
399 		buffer.address = (__force u32)cpu_to_be32((offset + current_word) * 2);
400 		buffer.length = (__force u16)cpu_to_be16(words_to_read * 2);
401 
402 		status = wx_host_interface_command(wx, (u32 *)&buffer,
403 						   sizeof(buffer),
404 						   WX_HI_COMMAND_TIMEOUT,
405 						   false);
406 
407 		if (status != 0) {
408 			wx_err(wx, "Host interface command failed\n");
409 			goto out;
410 		}
411 
412 		for (i = 0; i < words_to_read; i++) {
413 			u32 reg = WX_MNG_MBOX + (FW_NVM_DATA_OFFSET << 2) + 2 * i;
414 
415 			value = rd32(wx, reg);
416 			data[current_word] = (u16)(value & 0xffff);
417 			current_word++;
418 			i++;
419 			if (i < words_to_read) {
420 				value >>= 16;
421 				data[current_word] = (u16)(value & 0xffff);
422 				current_word++;
423 			}
424 		}
425 		words -= words_to_read;
426 	}
427 
428 out:
429 	wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH);
430 	return status;
431 }
432 EXPORT_SYMBOL(wx_read_ee_hostif_buffer);
433 
434 /**
435  *  wx_calculate_checksum - Calculate checksum for buffer
436  *  @buffer: pointer to EEPROM
437  *  @length: size of EEPROM to calculate a checksum for
438  *  Calculates the checksum for some buffer on a specified length.  The
439  *  checksum calculated is returned.
440  **/
441 static u8 wx_calculate_checksum(u8 *buffer, u32 length)
442 {
443 	u8 sum = 0;
444 	u32 i;
445 
446 	if (!buffer)
447 		return 0;
448 
449 	for (i = 0; i < length; i++)
450 		sum += buffer[i];
451 
452 	return (u8)(0 - sum);
453 }
454 
455 /**
456  *  wx_reset_hostif - send reset cmd to fw
457  *  @wx: pointer to hardware structure
458  *
459  *  Sends reset cmd to firmware through the manageability
460  *  block.
461  **/
462 int wx_reset_hostif(struct wx *wx)
463 {
464 	struct wx_hic_reset reset_cmd;
465 	int ret_val = 0;
466 	int i;
467 
468 	reset_cmd.hdr.cmd = FW_RESET_CMD;
469 	reset_cmd.hdr.buf_len = FW_RESET_LEN;
470 	reset_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
471 	reset_cmd.lan_id = wx->bus.func;
472 	reset_cmd.reset_type = (u16)wx->reset_type;
473 	reset_cmd.hdr.checksum = 0;
474 	reset_cmd.hdr.checksum = wx_calculate_checksum((u8 *)&reset_cmd,
475 						       (FW_CEM_HDR_LEN +
476 							reset_cmd.hdr.buf_len));
477 
478 	for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
479 		ret_val = wx_host_interface_command(wx, (u32 *)&reset_cmd,
480 						    sizeof(reset_cmd),
481 						    WX_HI_COMMAND_TIMEOUT,
482 						    true);
483 		if (ret_val != 0)
484 			continue;
485 
486 		if (reset_cmd.hdr.cmd_or_resp.ret_status ==
487 		    FW_CEM_RESP_STATUS_SUCCESS)
488 			ret_val = 0;
489 		else
490 			ret_val = -EFAULT;
491 
492 		break;
493 	}
494 
495 	return ret_val;
496 }
497 EXPORT_SYMBOL(wx_reset_hostif);
498 
499 /**
500  *  wx_init_eeprom_params - Initialize EEPROM params
501  *  @wx: pointer to hardware structure
502  *
503  *  Initializes the EEPROM parameters wx_eeprom_info within the
504  *  wx_hw struct in order to set up EEPROM access.
505  **/
506 void wx_init_eeprom_params(struct wx *wx)
507 {
508 	struct wx_eeprom_info *eeprom = &wx->eeprom;
509 	u16 eeprom_size;
510 	u16 data = 0x80;
511 
512 	if (eeprom->type == wx_eeprom_uninitialized) {
513 		eeprom->semaphore_delay = 10;
514 		eeprom->type = wx_eeprom_none;
515 
516 		if (!(rd32(wx, WX_SPI_STATUS) &
517 		      WX_SPI_STATUS_FLASH_BYPASS)) {
518 			eeprom->type = wx_flash;
519 
520 			eeprom_size = 4096;
521 			eeprom->word_size = eeprom_size >> 1;
522 
523 			wx_dbg(wx, "Eeprom params: type = %d, size = %d\n",
524 			       eeprom->type, eeprom->word_size);
525 		}
526 	}
527 
528 	if (wx->mac.type == wx_mac_sp) {
529 		if (wx_read_ee_hostif(wx, WX_SW_REGION_PTR, &data)) {
530 			wx_err(wx, "NVM Read Error\n");
531 			return;
532 		}
533 		data = data >> 1;
534 	}
535 
536 	eeprom->sw_region_offset = data;
537 }
538 EXPORT_SYMBOL(wx_init_eeprom_params);
539 
540 /**
541  *  wx_get_mac_addr - Generic get MAC address
542  *  @wx: pointer to hardware structure
543  *  @mac_addr: Adapter MAC address
544  *
545  *  Reads the adapter's MAC address from first Receive Address Register (RAR0)
546  *  A reset of the adapter must be performed prior to calling this function
547  *  in order for the MAC address to have been loaded from the EEPROM into RAR0
548  **/
549 void wx_get_mac_addr(struct wx *wx, u8 *mac_addr)
550 {
551 	u32 rar_high;
552 	u32 rar_low;
553 	u16 i;
554 
555 	wr32(wx, WX_PSR_MAC_SWC_IDX, 0);
556 	rar_high = rd32(wx, WX_PSR_MAC_SWC_AD_H);
557 	rar_low = rd32(wx, WX_PSR_MAC_SWC_AD_L);
558 
559 	for (i = 0; i < 2; i++)
560 		mac_addr[i] = (u8)(rar_high >> (1 - i) * 8);
561 
562 	for (i = 0; i < 4; i++)
563 		mac_addr[i + 2] = (u8)(rar_low >> (3 - i) * 8);
564 }
565 EXPORT_SYMBOL(wx_get_mac_addr);
566 
567 /**
568  *  wx_set_rar - Set Rx address register
569  *  @wx: pointer to hardware structure
570  *  @index: Receive address register to write
571  *  @addr: Address to put into receive address register
572  *  @pools: VMDq "set" or "pool" index
573  *  @enable_addr: set flag that address is active
574  *
575  *  Puts an ethernet address into a receive address register.
576  **/
577 static int wx_set_rar(struct wx *wx, u32 index, u8 *addr, u64 pools,
578 		      u32 enable_addr)
579 {
580 	u32 rar_entries = wx->mac.num_rar_entries;
581 	u32 rar_low, rar_high;
582 
583 	/* Make sure we are using a valid rar index range */
584 	if (index >= rar_entries) {
585 		wx_err(wx, "RAR index %d is out of range.\n", index);
586 		return -EINVAL;
587 	}
588 
589 	/* select the MAC address */
590 	wr32(wx, WX_PSR_MAC_SWC_IDX, index);
591 
592 	/* setup VMDq pool mapping */
593 	wr32(wx, WX_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF);
594 	if (wx->mac.type == wx_mac_sp)
595 		wr32(wx, WX_PSR_MAC_SWC_VM_H, pools >> 32);
596 
597 	/* HW expects these in little endian so we reverse the byte
598 	 * order from network order (big endian) to little endian
599 	 *
600 	 * Some parts put the VMDq setting in the extra RAH bits,
601 	 * so save everything except the lower 16 bits that hold part
602 	 * of the address and the address valid bit.
603 	 */
604 	rar_low = ((u32)addr[5] |
605 		  ((u32)addr[4] << 8) |
606 		  ((u32)addr[3] << 16) |
607 		  ((u32)addr[2] << 24));
608 	rar_high = ((u32)addr[1] |
609 		   ((u32)addr[0] << 8));
610 	if (enable_addr != 0)
611 		rar_high |= WX_PSR_MAC_SWC_AD_H_AV;
612 
613 	wr32(wx, WX_PSR_MAC_SWC_AD_L, rar_low);
614 	wr32m(wx, WX_PSR_MAC_SWC_AD_H,
615 	      (WX_PSR_MAC_SWC_AD_H_AD(U16_MAX) |
616 	       WX_PSR_MAC_SWC_AD_H_ADTYPE(1) |
617 	       WX_PSR_MAC_SWC_AD_H_AV),
618 	      rar_high);
619 
620 	return 0;
621 }
622 
623 /**
624  *  wx_clear_rar - Remove Rx address register
625  *  @wx: pointer to hardware structure
626  *  @index: Receive address register to write
627  *
628  *  Clears an ethernet address from a receive address register.
629  **/
630 static int wx_clear_rar(struct wx *wx, u32 index)
631 {
632 	u32 rar_entries = wx->mac.num_rar_entries;
633 
634 	/* Make sure we are using a valid rar index range */
635 	if (index >= rar_entries) {
636 		wx_err(wx, "RAR index %d is out of range.\n", index);
637 		return -EINVAL;
638 	}
639 
640 	/* Some parts put the VMDq setting in the extra RAH bits,
641 	 * so save everything except the lower 16 bits that hold part
642 	 * of the address and the address valid bit.
643 	 */
644 	wr32(wx, WX_PSR_MAC_SWC_IDX, index);
645 
646 	wr32(wx, WX_PSR_MAC_SWC_VM_L, 0);
647 	wr32(wx, WX_PSR_MAC_SWC_VM_H, 0);
648 
649 	wr32(wx, WX_PSR_MAC_SWC_AD_L, 0);
650 	wr32m(wx, WX_PSR_MAC_SWC_AD_H,
651 	      (WX_PSR_MAC_SWC_AD_H_AD(U16_MAX) |
652 	       WX_PSR_MAC_SWC_AD_H_ADTYPE(1) |
653 	       WX_PSR_MAC_SWC_AD_H_AV),
654 	      0);
655 
656 	return 0;
657 }
658 
659 /**
660  *  wx_clear_vmdq - Disassociate a VMDq pool index from a rx address
661  *  @wx: pointer to hardware struct
662  *  @rar: receive address register index to disassociate
663  *  @vmdq: VMDq pool index to remove from the rar
664  **/
665 static int wx_clear_vmdq(struct wx *wx, u32 rar, u32 __maybe_unused vmdq)
666 {
667 	u32 rar_entries = wx->mac.num_rar_entries;
668 	u32 mpsar_lo, mpsar_hi;
669 
670 	/* Make sure we are using a valid rar index range */
671 	if (rar >= rar_entries) {
672 		wx_err(wx, "RAR index %d is out of range.\n", rar);
673 		return -EINVAL;
674 	}
675 
676 	wr32(wx, WX_PSR_MAC_SWC_IDX, rar);
677 	mpsar_lo = rd32(wx, WX_PSR_MAC_SWC_VM_L);
678 	mpsar_hi = rd32(wx, WX_PSR_MAC_SWC_VM_H);
679 
680 	if (!mpsar_lo && !mpsar_hi)
681 		return 0;
682 
683 	/* was that the last pool using this rar? */
684 	if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
685 		wx_clear_rar(wx, rar);
686 
687 	return 0;
688 }
689 
690 /**
691  *  wx_init_uta_tables - Initialize the Unicast Table Array
692  *  @wx: pointer to hardware structure
693  **/
694 static void wx_init_uta_tables(struct wx *wx)
695 {
696 	int i;
697 
698 	wx_dbg(wx, " Clearing UTA\n");
699 
700 	for (i = 0; i < 128; i++)
701 		wr32(wx, WX_PSR_UC_TBL(i), 0);
702 }
703 
704 /**
705  *  wx_init_rx_addrs - Initializes receive address filters.
706  *  @wx: pointer to hardware structure
707  *
708  *  Places the MAC address in receive address register 0 and clears the rest
709  *  of the receive address registers. Clears the multicast table. Assumes
710  *  the receiver is in reset when the routine is called.
711  **/
712 void wx_init_rx_addrs(struct wx *wx)
713 {
714 	u32 rar_entries = wx->mac.num_rar_entries;
715 	u32 psrctl;
716 	int i;
717 
718 	/* If the current mac address is valid, assume it is a software override
719 	 * to the permanent address.
720 	 * Otherwise, use the permanent address from the eeprom.
721 	 */
722 	if (!is_valid_ether_addr(wx->mac.addr)) {
723 		/* Get the MAC address from the RAR0 for later reference */
724 		wx_get_mac_addr(wx, wx->mac.addr);
725 		wx_dbg(wx, "Keeping Current RAR0 Addr = %pM\n", wx->mac.addr);
726 	} else {
727 		/* Setup the receive address. */
728 		wx_dbg(wx, "Overriding MAC Address in RAR[0]\n");
729 		wx_dbg(wx, "New MAC Addr = %pM\n", wx->mac.addr);
730 
731 		wx_set_rar(wx, 0, wx->mac.addr, 0, WX_PSR_MAC_SWC_AD_H_AV);
732 
733 		if (wx->mac.type == wx_mac_sp) {
734 			/* clear VMDq pool/queue selection for RAR 0 */
735 			wx_clear_vmdq(wx, 0, WX_CLEAR_VMDQ_ALL);
736 		}
737 	}
738 
739 	/* Zero out the other receive addresses. */
740 	wx_dbg(wx, "Clearing RAR[1-%d]\n", rar_entries - 1);
741 	for (i = 1; i < rar_entries; i++) {
742 		wr32(wx, WX_PSR_MAC_SWC_IDX, i);
743 		wr32(wx, WX_PSR_MAC_SWC_AD_L, 0);
744 		wr32(wx, WX_PSR_MAC_SWC_AD_H, 0);
745 	}
746 
747 	/* Clear the MTA */
748 	wx->addr_ctrl.mta_in_use = 0;
749 	psrctl = rd32(wx, WX_PSR_CTL);
750 	psrctl &= ~(WX_PSR_CTL_MO | WX_PSR_CTL_MFE);
751 	psrctl |= wx->mac.mc_filter_type << WX_PSR_CTL_MO_SHIFT;
752 	wr32(wx, WX_PSR_CTL, psrctl);
753 	wx_dbg(wx, " Clearing MTA\n");
754 	for (i = 0; i < wx->mac.mcft_size; i++)
755 		wr32(wx, WX_PSR_MC_TBL(i), 0);
756 
757 	wx_init_uta_tables(wx);
758 }
759 EXPORT_SYMBOL(wx_init_rx_addrs);
760 
761 static void wx_sync_mac_table(struct wx *wx)
762 {
763 	int i;
764 
765 	for (i = 0; i < wx->mac.num_rar_entries; i++) {
766 		if (wx->mac_table[i].state & WX_MAC_STATE_MODIFIED) {
767 			if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE) {
768 				wx_set_rar(wx, i,
769 					   wx->mac_table[i].addr,
770 					   wx->mac_table[i].pools,
771 					   WX_PSR_MAC_SWC_AD_H_AV);
772 			} else {
773 				wx_clear_rar(wx, i);
774 			}
775 			wx->mac_table[i].state &= ~(WX_MAC_STATE_MODIFIED);
776 		}
777 	}
778 }
779 
780 /* this function destroys the first RAR entry */
781 void wx_mac_set_default_filter(struct wx *wx, u8 *addr)
782 {
783 	memcpy(&wx->mac_table[0].addr, addr, ETH_ALEN);
784 	wx->mac_table[0].pools = 1ULL;
785 	wx->mac_table[0].state = (WX_MAC_STATE_DEFAULT | WX_MAC_STATE_IN_USE);
786 	wx_set_rar(wx, 0, wx->mac_table[0].addr,
787 		   wx->mac_table[0].pools,
788 		   WX_PSR_MAC_SWC_AD_H_AV);
789 }
790 EXPORT_SYMBOL(wx_mac_set_default_filter);
791 
792 void wx_flush_sw_mac_table(struct wx *wx)
793 {
794 	u32 i;
795 
796 	for (i = 0; i < wx->mac.num_rar_entries; i++) {
797 		if (!(wx->mac_table[i].state & WX_MAC_STATE_IN_USE))
798 			continue;
799 
800 		wx->mac_table[i].state |= WX_MAC_STATE_MODIFIED;
801 		wx->mac_table[i].state &= ~WX_MAC_STATE_IN_USE;
802 		memset(wx->mac_table[i].addr, 0, ETH_ALEN);
803 		wx->mac_table[i].pools = 0;
804 	}
805 	wx_sync_mac_table(wx);
806 }
807 EXPORT_SYMBOL(wx_flush_sw_mac_table);
808 
809 static int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool)
810 {
811 	u32 i;
812 
813 	if (is_zero_ether_addr(addr))
814 		return -EINVAL;
815 
816 	for (i = 0; i < wx->mac.num_rar_entries; i++) {
817 		if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE) {
818 			if (ether_addr_equal(addr, wx->mac_table[i].addr)) {
819 				if (wx->mac_table[i].pools != (1ULL << pool)) {
820 					memcpy(wx->mac_table[i].addr, addr, ETH_ALEN);
821 					wx->mac_table[i].pools |= (1ULL << pool);
822 					wx_sync_mac_table(wx);
823 					return i;
824 				}
825 			}
826 		}
827 
828 		if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE)
829 			continue;
830 		wx->mac_table[i].state |= (WX_MAC_STATE_MODIFIED |
831 					   WX_MAC_STATE_IN_USE);
832 		memcpy(wx->mac_table[i].addr, addr, ETH_ALEN);
833 		wx->mac_table[i].pools |= (1ULL << pool);
834 		wx_sync_mac_table(wx);
835 		return i;
836 	}
837 	return -ENOMEM;
838 }
839 
840 static int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool)
841 {
842 	u32 i;
843 
844 	if (is_zero_ether_addr(addr))
845 		return -EINVAL;
846 
847 	/* search table for addr, if found, set to 0 and sync */
848 	for (i = 0; i < wx->mac.num_rar_entries; i++) {
849 		if (!ether_addr_equal(addr, wx->mac_table[i].addr))
850 			continue;
851 
852 		wx->mac_table[i].state |= WX_MAC_STATE_MODIFIED;
853 		wx->mac_table[i].pools &= ~(1ULL << pool);
854 		if (!wx->mac_table[i].pools) {
855 			wx->mac_table[i].state &= ~WX_MAC_STATE_IN_USE;
856 			memset(wx->mac_table[i].addr, 0, ETH_ALEN);
857 		}
858 		wx_sync_mac_table(wx);
859 		return 0;
860 	}
861 	return -ENOMEM;
862 }
863 
864 static int wx_available_rars(struct wx *wx)
865 {
866 	u32 i, count = 0;
867 
868 	for (i = 0; i < wx->mac.num_rar_entries; i++) {
869 		if (wx->mac_table[i].state == 0)
870 			count++;
871 	}
872 
873 	return count;
874 }
875 
876 /**
877  * wx_write_uc_addr_list - write unicast addresses to RAR table
878  * @netdev: network interface device structure
879  * @pool: index for mac table
880  *
881  * Writes unicast address list to the RAR table.
882  * Returns: -ENOMEM on failure/insufficient address space
883  *                0 on no addresses written
884  *                X on writing X addresses to the RAR table
885  **/
886 static int wx_write_uc_addr_list(struct net_device *netdev, int pool)
887 {
888 	struct wx *wx = netdev_priv(netdev);
889 	int count = 0;
890 
891 	/* return ENOMEM indicating insufficient memory for addresses */
892 	if (netdev_uc_count(netdev) > wx_available_rars(wx))
893 		return -ENOMEM;
894 
895 	if (!netdev_uc_empty(netdev)) {
896 		struct netdev_hw_addr *ha;
897 
898 		netdev_for_each_uc_addr(ha, netdev) {
899 			wx_del_mac_filter(wx, ha->addr, pool);
900 			wx_add_mac_filter(wx, ha->addr, pool);
901 			count++;
902 		}
903 	}
904 	return count;
905 }
906 
907 /**
908  *  wx_mta_vector - Determines bit-vector in multicast table to set
909  *  @wx: pointer to private structure
910  *  @mc_addr: the multicast address
911  *
912  *  Extracts the 12 bits, from a multicast address, to determine which
913  *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
914  *  incoming rx multicast addresses, to determine the bit-vector to check in
915  *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
916  *  by the MO field of the MCSTCTRL. The MO field is set during initialization
917  *  to mc_filter_type.
918  **/
919 static u32 wx_mta_vector(struct wx *wx, u8 *mc_addr)
920 {
921 	u32 vector = 0;
922 
923 	switch (wx->mac.mc_filter_type) {
924 	case 0:   /* use bits [47:36] of the address */
925 		vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
926 		break;
927 	case 1:   /* use bits [46:35] of the address */
928 		vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
929 		break;
930 	case 2:   /* use bits [45:34] of the address */
931 		vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
932 		break;
933 	case 3:   /* use bits [43:32] of the address */
934 		vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
935 		break;
936 	default:  /* Invalid mc_filter_type */
937 		wx_err(wx, "MC filter type param set incorrectly\n");
938 		break;
939 	}
940 
941 	/* vector can only be 12-bits or boundary will be exceeded */
942 	vector &= 0xFFF;
943 	return vector;
944 }
945 
946 /**
947  *  wx_set_mta - Set bit-vector in multicast table
948  *  @wx: pointer to private structure
949  *  @mc_addr: Multicast address
950  *
951  *  Sets the bit-vector in the multicast table.
952  **/
953 static void wx_set_mta(struct wx *wx, u8 *mc_addr)
954 {
955 	u32 vector, vector_bit, vector_reg;
956 
957 	wx->addr_ctrl.mta_in_use++;
958 
959 	vector = wx_mta_vector(wx, mc_addr);
960 	wx_dbg(wx, " bit-vector = 0x%03X\n", vector);
961 
962 	/* The MTA is a register array of 128 32-bit registers. It is treated
963 	 * like an array of 4096 bits.  We want to set bit
964 	 * BitArray[vector_value]. So we figure out what register the bit is
965 	 * in, read it, OR in the new bit, then write back the new value.  The
966 	 * register is determined by the upper 7 bits of the vector value and
967 	 * the bit within that register are determined by the lower 5 bits of
968 	 * the value.
969 	 */
970 	vector_reg = (vector >> 5) & 0x7F;
971 	vector_bit = vector & 0x1F;
972 	wx->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
973 }
974 
975 /**
976  *  wx_update_mc_addr_list - Updates MAC list of multicast addresses
977  *  @wx: pointer to private structure
978  *  @netdev: pointer to net device structure
979  *
980  *  The given list replaces any existing list. Clears the MC addrs from receive
981  *  address registers and the multicast table. Uses unused receive address
982  *  registers for the first multicast addresses, and hashes the rest into the
983  *  multicast table.
984  **/
985 static void wx_update_mc_addr_list(struct wx *wx, struct net_device *netdev)
986 {
987 	struct netdev_hw_addr *ha;
988 	u32 i, psrctl;
989 
990 	/* Set the new number of MC addresses that we are being requested to
991 	 * use.
992 	 */
993 	wx->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev);
994 	wx->addr_ctrl.mta_in_use = 0;
995 
996 	/* Clear mta_shadow */
997 	wx_dbg(wx, " Clearing MTA\n");
998 	memset(&wx->mac.mta_shadow, 0, sizeof(wx->mac.mta_shadow));
999 
1000 	/* Update mta_shadow */
1001 	netdev_for_each_mc_addr(ha, netdev) {
1002 		wx_dbg(wx, " Adding the multicast addresses:\n");
1003 		wx_set_mta(wx, ha->addr);
1004 	}
1005 
1006 	/* Enable mta */
1007 	for (i = 0; i < wx->mac.mcft_size; i++)
1008 		wr32a(wx, WX_PSR_MC_TBL(0), i,
1009 		      wx->mac.mta_shadow[i]);
1010 
1011 	if (wx->addr_ctrl.mta_in_use > 0) {
1012 		psrctl = rd32(wx, WX_PSR_CTL);
1013 		psrctl &= ~(WX_PSR_CTL_MO | WX_PSR_CTL_MFE);
1014 		psrctl |= WX_PSR_CTL_MFE |
1015 			  (wx->mac.mc_filter_type << WX_PSR_CTL_MO_SHIFT);
1016 		wr32(wx, WX_PSR_CTL, psrctl);
1017 	}
1018 
1019 	wx_dbg(wx, "Update mc addr list Complete\n");
1020 }
1021 
1022 /**
1023  * wx_write_mc_addr_list - write multicast addresses to MTA
1024  * @netdev: network interface device structure
1025  *
1026  * Writes multicast address list to the MTA hash table.
1027  * Returns: 0 on no addresses written
1028  *          X on writing X addresses to MTA
1029  **/
1030 static int wx_write_mc_addr_list(struct net_device *netdev)
1031 {
1032 	struct wx *wx = netdev_priv(netdev);
1033 
1034 	if (!netif_running(netdev))
1035 		return 0;
1036 
1037 	wx_update_mc_addr_list(wx, netdev);
1038 
1039 	return netdev_mc_count(netdev);
1040 }
1041 
1042 /**
1043  * wx_set_mac - Change the Ethernet Address of the NIC
1044  * @netdev: network interface device structure
1045  * @p: pointer to an address structure
1046  *
1047  * Returns 0 on success, negative on failure
1048  **/
1049 int wx_set_mac(struct net_device *netdev, void *p)
1050 {
1051 	struct wx *wx = netdev_priv(netdev);
1052 	struct sockaddr *addr = p;
1053 	int retval;
1054 
1055 	retval = eth_prepare_mac_addr_change(netdev, addr);
1056 	if (retval)
1057 		return retval;
1058 
1059 	wx_del_mac_filter(wx, wx->mac.addr, 0);
1060 	eth_hw_addr_set(netdev, addr->sa_data);
1061 	memcpy(wx->mac.addr, addr->sa_data, netdev->addr_len);
1062 
1063 	wx_mac_set_default_filter(wx, wx->mac.addr);
1064 
1065 	return 0;
1066 }
1067 EXPORT_SYMBOL(wx_set_mac);
1068 
1069 void wx_disable_rx(struct wx *wx)
1070 {
1071 	u32 pfdtxgswc;
1072 	u32 rxctrl;
1073 
1074 	rxctrl = rd32(wx, WX_RDB_PB_CTL);
1075 	if (rxctrl & WX_RDB_PB_CTL_RXEN) {
1076 		pfdtxgswc = rd32(wx, WX_PSR_CTL);
1077 		if (pfdtxgswc & WX_PSR_CTL_SW_EN) {
1078 			pfdtxgswc &= ~WX_PSR_CTL_SW_EN;
1079 			wr32(wx, WX_PSR_CTL, pfdtxgswc);
1080 			wx->mac.set_lben = true;
1081 		} else {
1082 			wx->mac.set_lben = false;
1083 		}
1084 		rxctrl &= ~WX_RDB_PB_CTL_RXEN;
1085 		wr32(wx, WX_RDB_PB_CTL, rxctrl);
1086 
1087 		if (!(((wx->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) ||
1088 		      ((wx->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) {
1089 			/* disable mac receiver */
1090 			wr32m(wx, WX_MAC_RX_CFG,
1091 			      WX_MAC_RX_CFG_RE, 0);
1092 		}
1093 	}
1094 }
1095 EXPORT_SYMBOL(wx_disable_rx);
1096 
1097 static void wx_enable_rx(struct wx *wx)
1098 {
1099 	u32 psrctl;
1100 
1101 	/* enable mac receiver */
1102 	wr32m(wx, WX_MAC_RX_CFG,
1103 	      WX_MAC_RX_CFG_RE, WX_MAC_RX_CFG_RE);
1104 
1105 	wr32m(wx, WX_RDB_PB_CTL,
1106 	      WX_RDB_PB_CTL_RXEN, WX_RDB_PB_CTL_RXEN);
1107 
1108 	if (wx->mac.set_lben) {
1109 		psrctl = rd32(wx, WX_PSR_CTL);
1110 		psrctl |= WX_PSR_CTL_SW_EN;
1111 		wr32(wx, WX_PSR_CTL, psrctl);
1112 		wx->mac.set_lben = false;
1113 	}
1114 }
1115 
1116 /**
1117  * wx_set_rxpba - Initialize Rx packet buffer
1118  * @wx: pointer to private structure
1119  **/
1120 static void wx_set_rxpba(struct wx *wx)
1121 {
1122 	u32 rxpktsize, txpktsize, txpbthresh;
1123 
1124 	rxpktsize = wx->mac.rx_pb_size << WX_RDB_PB_SZ_SHIFT;
1125 	wr32(wx, WX_RDB_PB_SZ(0), rxpktsize);
1126 
1127 	/* Only support an equally distributed Tx packet buffer strategy. */
1128 	txpktsize = wx->mac.tx_pb_size;
1129 	txpbthresh = (txpktsize / 1024) - WX_TXPKT_SIZE_MAX;
1130 	wr32(wx, WX_TDB_PB_SZ(0), txpktsize);
1131 	wr32(wx, WX_TDM_PB_THRE(0), txpbthresh);
1132 }
1133 
1134 static void wx_configure_port(struct wx *wx)
1135 {
1136 	u32 value, i;
1137 
1138 	value = WX_CFG_PORT_CTL_D_VLAN | WX_CFG_PORT_CTL_QINQ;
1139 	wr32m(wx, WX_CFG_PORT_CTL,
1140 	      WX_CFG_PORT_CTL_D_VLAN |
1141 	      WX_CFG_PORT_CTL_QINQ,
1142 	      value);
1143 
1144 	wr32(wx, WX_CFG_TAG_TPID(0),
1145 	     ETH_P_8021Q | ETH_P_8021AD << 16);
1146 	wx->tpid[0] = ETH_P_8021Q;
1147 	wx->tpid[1] = ETH_P_8021AD;
1148 	for (i = 1; i < 4; i++)
1149 		wr32(wx, WX_CFG_TAG_TPID(i),
1150 		     ETH_P_8021Q | ETH_P_8021Q << 16);
1151 	for (i = 2; i < 8; i++)
1152 		wx->tpid[i] = ETH_P_8021Q;
1153 }
1154 
1155 /**
1156  *  wx_disable_sec_rx_path - Stops the receive data path
1157  *  @wx: pointer to private structure
1158  *
1159  *  Stops the receive data path and waits for the HW to internally empty
1160  *  the Rx security block
1161  **/
1162 static int wx_disable_sec_rx_path(struct wx *wx)
1163 {
1164 	u32 secrx;
1165 
1166 	wr32m(wx, WX_RSC_CTL,
1167 	      WX_RSC_CTL_RX_DIS, WX_RSC_CTL_RX_DIS);
1168 
1169 	return read_poll_timeout(rd32, secrx, secrx & WX_RSC_ST_RSEC_RDY,
1170 				 1000, 40000, false, wx, WX_RSC_ST);
1171 }
1172 
1173 /**
1174  *  wx_enable_sec_rx_path - Enables the receive data path
1175  *  @wx: pointer to private structure
1176  *
1177  *  Enables the receive data path.
1178  **/
1179 static void wx_enable_sec_rx_path(struct wx *wx)
1180 {
1181 	wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_RX_DIS, 0);
1182 	WX_WRITE_FLUSH(wx);
1183 }
1184 
1185 static void wx_vlan_strip_control(struct wx *wx, bool enable)
1186 {
1187 	int i, j;
1188 
1189 	for (i = 0; i < wx->num_rx_queues; i++) {
1190 		struct wx_ring *ring = wx->rx_ring[i];
1191 
1192 		j = ring->reg_idx;
1193 		wr32m(wx, WX_PX_RR_CFG(j), WX_PX_RR_CFG_VLAN,
1194 		      enable ? WX_PX_RR_CFG_VLAN : 0);
1195 	}
1196 }
1197 
1198 void wx_set_rx_mode(struct net_device *netdev)
1199 {
1200 	struct wx *wx = netdev_priv(netdev);
1201 	netdev_features_t features;
1202 	u32 fctrl, vmolr, vlnctrl;
1203 	int count;
1204 
1205 	features = netdev->features;
1206 
1207 	/* Check for Promiscuous and All Multicast modes */
1208 	fctrl = rd32(wx, WX_PSR_CTL);
1209 	fctrl &= ~(WX_PSR_CTL_UPE | WX_PSR_CTL_MPE);
1210 	vmolr = rd32(wx, WX_PSR_VM_L2CTL(0));
1211 	vmolr &= ~(WX_PSR_VM_L2CTL_UPE |
1212 		   WX_PSR_VM_L2CTL_MPE |
1213 		   WX_PSR_VM_L2CTL_ROPE |
1214 		   WX_PSR_VM_L2CTL_ROMPE);
1215 	vlnctrl = rd32(wx, WX_PSR_VLAN_CTL);
1216 	vlnctrl &= ~(WX_PSR_VLAN_CTL_VFE | WX_PSR_VLAN_CTL_CFIEN);
1217 
1218 	/* set all bits that we expect to always be set */
1219 	fctrl |= WX_PSR_CTL_BAM | WX_PSR_CTL_MFE;
1220 	vmolr |= WX_PSR_VM_L2CTL_BAM |
1221 		 WX_PSR_VM_L2CTL_AUPE |
1222 		 WX_PSR_VM_L2CTL_VACC;
1223 	vlnctrl |= WX_PSR_VLAN_CTL_VFE;
1224 
1225 	wx->addr_ctrl.user_set_promisc = false;
1226 	if (netdev->flags & IFF_PROMISC) {
1227 		wx->addr_ctrl.user_set_promisc = true;
1228 		fctrl |= WX_PSR_CTL_UPE | WX_PSR_CTL_MPE;
1229 		/* pf don't want packets routing to vf, so clear UPE */
1230 		vmolr |= WX_PSR_VM_L2CTL_MPE;
1231 		vlnctrl &= ~WX_PSR_VLAN_CTL_VFE;
1232 	}
1233 
1234 	if (netdev->flags & IFF_ALLMULTI) {
1235 		fctrl |= WX_PSR_CTL_MPE;
1236 		vmolr |= WX_PSR_VM_L2CTL_MPE;
1237 	}
1238 
1239 	if (netdev->features & NETIF_F_RXALL) {
1240 		vmolr |= (WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_MPE);
1241 		vlnctrl &= ~WX_PSR_VLAN_CTL_VFE;
1242 		/* receive bad packets */
1243 		wr32m(wx, WX_RSC_CTL,
1244 		      WX_RSC_CTL_SAVE_MAC_ERR,
1245 		      WX_RSC_CTL_SAVE_MAC_ERR);
1246 	} else {
1247 		vmolr |= WX_PSR_VM_L2CTL_ROPE | WX_PSR_VM_L2CTL_ROMPE;
1248 	}
1249 
1250 	/* Write addresses to available RAR registers, if there is not
1251 	 * sufficient space to store all the addresses then enable
1252 	 * unicast promiscuous mode
1253 	 */
1254 	count = wx_write_uc_addr_list(netdev, 0);
1255 	if (count < 0) {
1256 		vmolr &= ~WX_PSR_VM_L2CTL_ROPE;
1257 		vmolr |= WX_PSR_VM_L2CTL_UPE;
1258 	}
1259 
1260 	/* Write addresses to the MTA, if the attempt fails
1261 	 * then we should just turn on promiscuous mode so
1262 	 * that we can at least receive multicast traffic
1263 	 */
1264 	count = wx_write_mc_addr_list(netdev);
1265 	if (count < 0) {
1266 		vmolr &= ~WX_PSR_VM_L2CTL_ROMPE;
1267 		vmolr |= WX_PSR_VM_L2CTL_MPE;
1268 	}
1269 
1270 	wr32(wx, WX_PSR_VLAN_CTL, vlnctrl);
1271 	wr32(wx, WX_PSR_CTL, fctrl);
1272 	wr32(wx, WX_PSR_VM_L2CTL(0), vmolr);
1273 
1274 	if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
1275 	    (features & NETIF_F_HW_VLAN_STAG_RX))
1276 		wx_vlan_strip_control(wx, true);
1277 	else
1278 		wx_vlan_strip_control(wx, false);
1279 
1280 }
1281 EXPORT_SYMBOL(wx_set_rx_mode);
1282 
1283 static void wx_set_rx_buffer_len(struct wx *wx)
1284 {
1285 	struct net_device *netdev = wx->netdev;
1286 	u32 mhadd, max_frame;
1287 
1288 	max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
1289 	/* adjust max frame to be at least the size of a standard frame */
1290 	if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
1291 		max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
1292 
1293 	mhadd = rd32(wx, WX_PSR_MAX_SZ);
1294 	if (max_frame != mhadd)
1295 		wr32(wx, WX_PSR_MAX_SZ, max_frame);
1296 }
1297 
1298 /**
1299  * wx_change_mtu - Change the Maximum Transfer Unit
1300  * @netdev: network interface device structure
1301  * @new_mtu: new value for maximum frame size
1302  *
1303  * Returns 0 on success, negative on failure
1304  **/
1305 int wx_change_mtu(struct net_device *netdev, int new_mtu)
1306 {
1307 	struct wx *wx = netdev_priv(netdev);
1308 
1309 	netdev->mtu = new_mtu;
1310 	wx_set_rx_buffer_len(wx);
1311 
1312 	return 0;
1313 }
1314 EXPORT_SYMBOL(wx_change_mtu);
1315 
1316 /* Disable the specified rx queue */
1317 void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring)
1318 {
1319 	u8 reg_idx = ring->reg_idx;
1320 	u32 rxdctl;
1321 	int ret;
1322 
1323 	/* write value back with RRCFG.EN bit cleared */
1324 	wr32m(wx, WX_PX_RR_CFG(reg_idx),
1325 	      WX_PX_RR_CFG_RR_EN, 0);
1326 
1327 	/* the hardware may take up to 100us to really disable the rx queue */
1328 	ret = read_poll_timeout(rd32, rxdctl, !(rxdctl & WX_PX_RR_CFG_RR_EN),
1329 				10, 100, true, wx, WX_PX_RR_CFG(reg_idx));
1330 
1331 	if (ret == -ETIMEDOUT) {
1332 		/* Just for information */
1333 		wx_err(wx,
1334 		       "RRCFG.EN on Rx queue %d not cleared within the polling period\n",
1335 		       reg_idx);
1336 	}
1337 }
1338 EXPORT_SYMBOL(wx_disable_rx_queue);
1339 
1340 static void wx_enable_rx_queue(struct wx *wx, struct wx_ring *ring)
1341 {
1342 	u8 reg_idx = ring->reg_idx;
1343 	u32 rxdctl;
1344 	int ret;
1345 
1346 	ret = read_poll_timeout(rd32, rxdctl, rxdctl & WX_PX_RR_CFG_RR_EN,
1347 				1000, 10000, true, wx, WX_PX_RR_CFG(reg_idx));
1348 
1349 	if (ret == -ETIMEDOUT) {
1350 		/* Just for information */
1351 		wx_err(wx,
1352 		       "RRCFG.EN on Rx queue %d not set within the polling period\n",
1353 		       reg_idx);
1354 	}
1355 }
1356 
1357 static void wx_configure_srrctl(struct wx *wx,
1358 				struct wx_ring *rx_ring)
1359 {
1360 	u16 reg_idx = rx_ring->reg_idx;
1361 	u32 srrctl;
1362 
1363 	srrctl = rd32(wx, WX_PX_RR_CFG(reg_idx));
1364 	srrctl &= ~(WX_PX_RR_CFG_RR_HDR_SZ |
1365 		    WX_PX_RR_CFG_RR_BUF_SZ |
1366 		    WX_PX_RR_CFG_SPLIT_MODE);
1367 	/* configure header buffer length, needed for RSC */
1368 	srrctl |= WX_RXBUFFER_256 << WX_PX_RR_CFG_BHDRSIZE_SHIFT;
1369 
1370 	/* configure the packet buffer length */
1371 	srrctl |= WX_RX_BUFSZ >> WX_PX_RR_CFG_BSIZEPKT_SHIFT;
1372 
1373 	wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl);
1374 }
1375 
1376 static void wx_configure_tx_ring(struct wx *wx,
1377 				 struct wx_ring *ring)
1378 {
1379 	u32 txdctl = WX_PX_TR_CFG_ENABLE;
1380 	u8 reg_idx = ring->reg_idx;
1381 	u64 tdba = ring->dma;
1382 	int ret;
1383 
1384 	/* disable queue to avoid issues while updating state */
1385 	wr32(wx, WX_PX_TR_CFG(reg_idx), WX_PX_TR_CFG_SWFLSH);
1386 	WX_WRITE_FLUSH(wx);
1387 
1388 	wr32(wx, WX_PX_TR_BAL(reg_idx), tdba & DMA_BIT_MASK(32));
1389 	wr32(wx, WX_PX_TR_BAH(reg_idx), upper_32_bits(tdba));
1390 
1391 	/* reset head and tail pointers */
1392 	wr32(wx, WX_PX_TR_RP(reg_idx), 0);
1393 	wr32(wx, WX_PX_TR_WP(reg_idx), 0);
1394 	ring->tail = wx->hw_addr + WX_PX_TR_WP(reg_idx);
1395 
1396 	if (ring->count < WX_MAX_TXD)
1397 		txdctl |= ring->count / 128 << WX_PX_TR_CFG_TR_SIZE_SHIFT;
1398 	txdctl |= 0x20 << WX_PX_TR_CFG_WTHRESH_SHIFT;
1399 
1400 	/* reinitialize tx_buffer_info */
1401 	memset(ring->tx_buffer_info, 0,
1402 	       sizeof(struct wx_tx_buffer) * ring->count);
1403 
1404 	/* enable queue */
1405 	wr32(wx, WX_PX_TR_CFG(reg_idx), txdctl);
1406 
1407 	/* poll to verify queue is enabled */
1408 	ret = read_poll_timeout(rd32, txdctl, txdctl & WX_PX_TR_CFG_ENABLE,
1409 				1000, 10000, true, wx, WX_PX_TR_CFG(reg_idx));
1410 	if (ret == -ETIMEDOUT)
1411 		wx_err(wx, "Could not enable Tx Queue %d\n", reg_idx);
1412 }
1413 
1414 static void wx_configure_rx_ring(struct wx *wx,
1415 				 struct wx_ring *ring)
1416 {
1417 	u16 reg_idx = ring->reg_idx;
1418 	union wx_rx_desc *rx_desc;
1419 	u64 rdba = ring->dma;
1420 	u32 rxdctl;
1421 
1422 	/* disable queue to avoid issues while updating state */
1423 	rxdctl = rd32(wx, WX_PX_RR_CFG(reg_idx));
1424 	wx_disable_rx_queue(wx, ring);
1425 
1426 	wr32(wx, WX_PX_RR_BAL(reg_idx), rdba & DMA_BIT_MASK(32));
1427 	wr32(wx, WX_PX_RR_BAH(reg_idx), upper_32_bits(rdba));
1428 
1429 	if (ring->count == WX_MAX_RXD)
1430 		rxdctl |= 0 << WX_PX_RR_CFG_RR_SIZE_SHIFT;
1431 	else
1432 		rxdctl |= (ring->count / 128) << WX_PX_RR_CFG_RR_SIZE_SHIFT;
1433 
1434 	rxdctl |= 0x1 << WX_PX_RR_CFG_RR_THER_SHIFT;
1435 	wr32(wx, WX_PX_RR_CFG(reg_idx), rxdctl);
1436 
1437 	/* reset head and tail pointers */
1438 	wr32(wx, WX_PX_RR_RP(reg_idx), 0);
1439 	wr32(wx, WX_PX_RR_WP(reg_idx), 0);
1440 	ring->tail = wx->hw_addr + WX_PX_RR_WP(reg_idx);
1441 
1442 	wx_configure_srrctl(wx, ring);
1443 
1444 	/* initialize rx_buffer_info */
1445 	memset(ring->rx_buffer_info, 0,
1446 	       sizeof(struct wx_rx_buffer) * ring->count);
1447 
1448 	/* initialize Rx descriptor 0 */
1449 	rx_desc = WX_RX_DESC(ring, 0);
1450 	rx_desc->wb.upper.length = 0;
1451 
1452 	/* enable receive descriptor ring */
1453 	wr32m(wx, WX_PX_RR_CFG(reg_idx),
1454 	      WX_PX_RR_CFG_RR_EN, WX_PX_RR_CFG_RR_EN);
1455 
1456 	wx_enable_rx_queue(wx, ring);
1457 	wx_alloc_rx_buffers(ring, wx_desc_unused(ring));
1458 }
1459 
1460 /**
1461  * wx_configure_tx - Configure Transmit Unit after Reset
1462  * @wx: pointer to private structure
1463  *
1464  * Configure the Tx unit of the MAC after a reset.
1465  **/
1466 static void wx_configure_tx(struct wx *wx)
1467 {
1468 	u32 i;
1469 
1470 	/* TDM_CTL.TE must be before Tx queues are enabled */
1471 	wr32m(wx, WX_TDM_CTL,
1472 	      WX_TDM_CTL_TE, WX_TDM_CTL_TE);
1473 
1474 	/* Setup the HW Tx Head and Tail descriptor pointers */
1475 	for (i = 0; i < wx->num_tx_queues; i++)
1476 		wx_configure_tx_ring(wx, wx->tx_ring[i]);
1477 
1478 	wr32m(wx, WX_TSC_BUF_AE, WX_TSC_BUF_AE_THR, 0x10);
1479 
1480 	if (wx->mac.type == wx_mac_em)
1481 		wr32m(wx, WX_TSC_CTL, WX_TSC_CTL_TX_DIS | WX_TSC_CTL_TSEC_DIS, 0x1);
1482 
1483 	/* enable mac transmitter */
1484 	wr32m(wx, WX_MAC_TX_CFG,
1485 	      WX_MAC_TX_CFG_TE, WX_MAC_TX_CFG_TE);
1486 }
1487 
1488 static void wx_restore_vlan(struct wx *wx)
1489 {
1490 	u16 vid = 1;
1491 
1492 	wx_vlan_rx_add_vid(wx->netdev, htons(ETH_P_8021Q), 0);
1493 
1494 	for_each_set_bit_from(vid, wx->active_vlans, VLAN_N_VID)
1495 		wx_vlan_rx_add_vid(wx->netdev, htons(ETH_P_8021Q), vid);
1496 }
1497 
1498 /**
1499  * wx_configure_rx - Configure Receive Unit after Reset
1500  * @wx: pointer to private structure
1501  *
1502  * Configure the Rx unit of the MAC after a reset.
1503  **/
1504 static void wx_configure_rx(struct wx *wx)
1505 {
1506 	u32 psrtype, i;
1507 	int ret;
1508 
1509 	wx_disable_rx(wx);
1510 
1511 	psrtype = WX_RDB_PL_CFG_L4HDR |
1512 		  WX_RDB_PL_CFG_L3HDR |
1513 		  WX_RDB_PL_CFG_L2HDR |
1514 		  WX_RDB_PL_CFG_TUN_TUNHDR;
1515 	wr32(wx, WX_RDB_PL_CFG(0), psrtype);
1516 
1517 	/* enable hw crc stripping */
1518 	wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_CRC_STRIP, WX_RSC_CTL_CRC_STRIP);
1519 
1520 	if (wx->mac.type == wx_mac_sp) {
1521 		u32 psrctl;
1522 
1523 		/* RSC Setup */
1524 		psrctl = rd32(wx, WX_PSR_CTL);
1525 		psrctl |= WX_PSR_CTL_RSC_ACK; /* Disable RSC for ACK packets */
1526 		psrctl |= WX_PSR_CTL_RSC_DIS;
1527 		wr32(wx, WX_PSR_CTL, psrctl);
1528 	}
1529 
1530 	/* set_rx_buffer_len must be called before ring initialization */
1531 	wx_set_rx_buffer_len(wx);
1532 
1533 	/* Setup the HW Rx Head and Tail Descriptor Pointers and
1534 	 * the Base and Length of the Rx Descriptor Ring
1535 	 */
1536 	for (i = 0; i < wx->num_rx_queues; i++)
1537 		wx_configure_rx_ring(wx, wx->rx_ring[i]);
1538 
1539 	/* Enable all receives, disable security engine prior to block traffic */
1540 	ret = wx_disable_sec_rx_path(wx);
1541 	if (ret < 0)
1542 		wx_err(wx, "The register status is abnormal, please check device.");
1543 
1544 	wx_enable_rx(wx);
1545 	wx_enable_sec_rx_path(wx);
1546 }
1547 
1548 static void wx_configure_isb(struct wx *wx)
1549 {
1550 	/* set ISB Address */
1551 	wr32(wx, WX_PX_ISB_ADDR_L, wx->isb_dma & DMA_BIT_MASK(32));
1552 	if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
1553 		wr32(wx, WX_PX_ISB_ADDR_H, upper_32_bits(wx->isb_dma));
1554 }
1555 
1556 void wx_configure(struct wx *wx)
1557 {
1558 	wx_set_rxpba(wx);
1559 	wx_configure_port(wx);
1560 
1561 	wx_set_rx_mode(wx->netdev);
1562 	wx_restore_vlan(wx);
1563 	wx_enable_sec_rx_path(wx);
1564 
1565 	wx_configure_tx(wx);
1566 	wx_configure_rx(wx);
1567 	wx_configure_isb(wx);
1568 }
1569 EXPORT_SYMBOL(wx_configure);
1570 
1571 /**
1572  *  wx_disable_pcie_master - Disable PCI-express master access
1573  *  @wx: pointer to hardware structure
1574  *
1575  *  Disables PCI-Express master access and verifies there are no pending
1576  *  requests.
1577  **/
1578 int wx_disable_pcie_master(struct wx *wx)
1579 {
1580 	int status = 0;
1581 	u32 val;
1582 
1583 	/* Always set this bit to ensure any future transactions are blocked */
1584 	pci_clear_master(wx->pdev);
1585 
1586 	/* Exit if master requests are blocked */
1587 	if (!(rd32(wx, WX_PX_TRANSACTION_PENDING)))
1588 		return 0;
1589 
1590 	/* Poll for master request bit to clear */
1591 	status = read_poll_timeout(rd32, val, !val, 100, WX_PCI_MASTER_DISABLE_TIMEOUT,
1592 				   false, wx, WX_PX_TRANSACTION_PENDING);
1593 	if (status < 0)
1594 		wx_err(wx, "PCIe transaction pending bit did not clear.\n");
1595 
1596 	return status;
1597 }
1598 EXPORT_SYMBOL(wx_disable_pcie_master);
1599 
1600 /**
1601  *  wx_stop_adapter - Generic stop Tx/Rx units
1602  *  @wx: pointer to hardware structure
1603  *
1604  *  Sets the adapter_stopped flag within wx_hw struct. Clears interrupts,
1605  *  disables transmit and receive units. The adapter_stopped flag is used by
1606  *  the shared code and drivers to determine if the adapter is in a stopped
1607  *  state and should not touch the hardware.
1608  **/
1609 int wx_stop_adapter(struct wx *wx)
1610 {
1611 	u16 i;
1612 
1613 	/* Set the adapter_stopped flag so other driver functions stop touching
1614 	 * the hardware
1615 	 */
1616 	wx->adapter_stopped = true;
1617 
1618 	/* Disable the receive unit */
1619 	wx_disable_rx(wx);
1620 
1621 	/* Set interrupt mask to stop interrupts from being generated */
1622 	wx_intr_disable(wx, WX_INTR_ALL);
1623 
1624 	/* Clear any pending interrupts, flush previous writes */
1625 	wr32(wx, WX_PX_MISC_IC, 0xffffffff);
1626 	wr32(wx, WX_BME_CTL, 0x3);
1627 
1628 	/* Disable the transmit unit.  Each queue must be disabled. */
1629 	for (i = 0; i < wx->mac.max_tx_queues; i++) {
1630 		wr32m(wx, WX_PX_TR_CFG(i),
1631 		      WX_PX_TR_CFG_SWFLSH | WX_PX_TR_CFG_ENABLE,
1632 		      WX_PX_TR_CFG_SWFLSH);
1633 	}
1634 
1635 	/* Disable the receive unit by stopping each queue */
1636 	for (i = 0; i < wx->mac.max_rx_queues; i++) {
1637 		wr32m(wx, WX_PX_RR_CFG(i),
1638 		      WX_PX_RR_CFG_RR_EN, 0);
1639 	}
1640 
1641 	/* flush all queues disables */
1642 	WX_WRITE_FLUSH(wx);
1643 
1644 	/* Prevent the PCI-E bus from hanging by disabling PCI-E master
1645 	 * access and verify no pending requests
1646 	 */
1647 	return wx_disable_pcie_master(wx);
1648 }
1649 EXPORT_SYMBOL(wx_stop_adapter);
1650 
1651 void wx_reset_misc(struct wx *wx)
1652 {
1653 	int i;
1654 
1655 	/* receive packets that size > 2048 */
1656 	wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_JE, WX_MAC_RX_CFG_JE);
1657 
1658 	/* clear counters on read */
1659 	wr32m(wx, WX_MMC_CONTROL,
1660 	      WX_MMC_CONTROL_RSTONRD, WX_MMC_CONTROL_RSTONRD);
1661 
1662 	wr32m(wx, WX_MAC_RX_FLOW_CTRL,
1663 	      WX_MAC_RX_FLOW_CTRL_RFE, WX_MAC_RX_FLOW_CTRL_RFE);
1664 
1665 	wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
1666 
1667 	wr32m(wx, WX_MIS_RST_ST,
1668 	      WX_MIS_RST_ST_RST_INIT, 0x1E00);
1669 
1670 	/* errata 4: initialize mng flex tbl and wakeup flex tbl*/
1671 	wr32(wx, WX_PSR_MNG_FLEX_SEL, 0);
1672 	for (i = 0; i < 16; i++) {
1673 		wr32(wx, WX_PSR_MNG_FLEX_DW_L(i), 0);
1674 		wr32(wx, WX_PSR_MNG_FLEX_DW_H(i), 0);
1675 		wr32(wx, WX_PSR_MNG_FLEX_MSK(i), 0);
1676 	}
1677 	wr32(wx, WX_PSR_LAN_FLEX_SEL, 0);
1678 	for (i = 0; i < 16; i++) {
1679 		wr32(wx, WX_PSR_LAN_FLEX_DW_L(i), 0);
1680 		wr32(wx, WX_PSR_LAN_FLEX_DW_H(i), 0);
1681 		wr32(wx, WX_PSR_LAN_FLEX_MSK(i), 0);
1682 	}
1683 
1684 	/* set pause frame dst mac addr */
1685 	wr32(wx, WX_RDB_PFCMACDAL, 0xC2000001);
1686 	wr32(wx, WX_RDB_PFCMACDAH, 0x0180);
1687 }
1688 EXPORT_SYMBOL(wx_reset_misc);
1689 
1690 /**
1691  *  wx_get_pcie_msix_counts - Gets MSI-X vector count
1692  *  @wx: pointer to hardware structure
1693  *  @msix_count: number of MSI interrupts that can be obtained
1694  *  @max_msix_count: number of MSI interrupts that mac need
1695  *
1696  *  Read PCIe configuration space, and get the MSI-X vector count from
1697  *  the capabilities table.
1698  **/
1699 int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count)
1700 {
1701 	struct pci_dev *pdev = wx->pdev;
1702 	struct device *dev = &pdev->dev;
1703 	int pos;
1704 
1705 	*msix_count = 1;
1706 	pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
1707 	if (!pos) {
1708 		dev_err(dev, "Unable to find MSI-X Capabilities\n");
1709 		return -EINVAL;
1710 	}
1711 	pci_read_config_word(pdev,
1712 			     pos + PCI_MSIX_FLAGS,
1713 			     msix_count);
1714 	*msix_count &= WX_PCIE_MSIX_TBL_SZ_MASK;
1715 	/* MSI-X count is zero-based in HW */
1716 	*msix_count += 1;
1717 
1718 	if (*msix_count > max_msix_count)
1719 		*msix_count = max_msix_count;
1720 
1721 	return 0;
1722 }
1723 EXPORT_SYMBOL(wx_get_pcie_msix_counts);
1724 
1725 int wx_sw_init(struct wx *wx)
1726 {
1727 	struct pci_dev *pdev = wx->pdev;
1728 	u32 ssid = 0;
1729 	int err = 0;
1730 
1731 	wx->vendor_id = pdev->vendor;
1732 	wx->device_id = pdev->device;
1733 	wx->revision_id = pdev->revision;
1734 	wx->oem_svid = pdev->subsystem_vendor;
1735 	wx->oem_ssid = pdev->subsystem_device;
1736 	wx->bus.device = PCI_SLOT(pdev->devfn);
1737 	wx->bus.func = PCI_FUNC(pdev->devfn);
1738 
1739 	if (wx->oem_svid == PCI_VENDOR_ID_WANGXUN) {
1740 		wx->subsystem_vendor_id = pdev->subsystem_vendor;
1741 		wx->subsystem_device_id = pdev->subsystem_device;
1742 	} else {
1743 		err = wx_flash_read_dword(wx, 0xfffdc, &ssid);
1744 		if (!err)
1745 			wx->subsystem_device_id = swab16((u16)ssid);
1746 
1747 		return err;
1748 	}
1749 
1750 	wx->mac_table = kcalloc(wx->mac.num_rar_entries,
1751 				sizeof(struct wx_mac_addr),
1752 				GFP_KERNEL);
1753 	if (!wx->mac_table) {
1754 		wx_err(wx, "mac_table allocation failed\n");
1755 		return -ENOMEM;
1756 	}
1757 
1758 	return 0;
1759 }
1760 EXPORT_SYMBOL(wx_sw_init);
1761 
1762 /**
1763  *  wx_find_vlvf_slot - find the vlanid or the first empty slot
1764  *  @wx: pointer to hardware structure
1765  *  @vlan: VLAN id to write to VLAN filter
1766  *
1767  *  return the VLVF index where this VLAN id should be placed
1768  *
1769  **/
1770 static int wx_find_vlvf_slot(struct wx *wx, u32 vlan)
1771 {
1772 	u32 bits = 0, first_empty_slot = 0;
1773 	int regindex;
1774 
1775 	/* short cut the special case */
1776 	if (vlan == 0)
1777 		return 0;
1778 
1779 	/* Search for the vlan id in the VLVF entries. Save off the first empty
1780 	 * slot found along the way
1781 	 */
1782 	for (regindex = 1; regindex < WX_PSR_VLAN_SWC_ENTRIES; regindex++) {
1783 		wr32(wx, WX_PSR_VLAN_SWC_IDX, regindex);
1784 		bits = rd32(wx, WX_PSR_VLAN_SWC);
1785 		if (!bits && !(first_empty_slot))
1786 			first_empty_slot = regindex;
1787 		else if ((bits & 0x0FFF) == vlan)
1788 			break;
1789 	}
1790 
1791 	if (regindex >= WX_PSR_VLAN_SWC_ENTRIES) {
1792 		if (first_empty_slot)
1793 			regindex = first_empty_slot;
1794 		else
1795 			regindex = -ENOMEM;
1796 	}
1797 
1798 	return regindex;
1799 }
1800 
1801 /**
1802  *  wx_set_vlvf - Set VLAN Pool Filter
1803  *  @wx: pointer to hardware structure
1804  *  @vlan: VLAN id to write to VLAN filter
1805  *  @vind: VMDq output index that maps queue to VLAN id in VFVFB
1806  *  @vlan_on: boolean flag to turn on/off VLAN in VFVF
1807  *  @vfta_changed: pointer to boolean flag which indicates whether VFTA
1808  *                 should be changed
1809  *
1810  *  Turn on/off specified bit in VLVF table.
1811  **/
1812 static int wx_set_vlvf(struct wx *wx, u32 vlan, u32 vind, bool vlan_on,
1813 		       bool *vfta_changed)
1814 {
1815 	int vlvf_index;
1816 	u32 vt, bits;
1817 
1818 	/* If VT Mode is set
1819 	 *   Either vlan_on
1820 	 *     make sure the vlan is in VLVF
1821 	 *     set the vind bit in the matching VLVFB
1822 	 *   Or !vlan_on
1823 	 *     clear the pool bit and possibly the vind
1824 	 */
1825 	vt = rd32(wx, WX_CFG_PORT_CTL);
1826 	if (!(vt & WX_CFG_PORT_CTL_NUM_VT_MASK))
1827 		return 0;
1828 
1829 	vlvf_index = wx_find_vlvf_slot(wx, vlan);
1830 	if (vlvf_index < 0)
1831 		return vlvf_index;
1832 
1833 	wr32(wx, WX_PSR_VLAN_SWC_IDX, vlvf_index);
1834 	if (vlan_on) {
1835 		/* set the pool bit */
1836 		if (vind < 32) {
1837 			bits = rd32(wx, WX_PSR_VLAN_SWC_VM_L);
1838 			bits |= (1 << vind);
1839 			wr32(wx, WX_PSR_VLAN_SWC_VM_L, bits);
1840 		} else {
1841 			bits = rd32(wx, WX_PSR_VLAN_SWC_VM_H);
1842 			bits |= (1 << (vind - 32));
1843 			wr32(wx, WX_PSR_VLAN_SWC_VM_H, bits);
1844 		}
1845 	} else {
1846 		/* clear the pool bit */
1847 		if (vind < 32) {
1848 			bits = rd32(wx, WX_PSR_VLAN_SWC_VM_L);
1849 			bits &= ~(1 << vind);
1850 			wr32(wx, WX_PSR_VLAN_SWC_VM_L, bits);
1851 			bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_H);
1852 		} else {
1853 			bits = rd32(wx, WX_PSR_VLAN_SWC_VM_H);
1854 			bits &= ~(1 << (vind - 32));
1855 			wr32(wx, WX_PSR_VLAN_SWC_VM_H, bits);
1856 			bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_L);
1857 		}
1858 	}
1859 
1860 	if (bits) {
1861 		wr32(wx, WX_PSR_VLAN_SWC, (WX_PSR_VLAN_SWC_VIEN | vlan));
1862 		if (!vlan_on && vfta_changed)
1863 			*vfta_changed = false;
1864 	} else {
1865 		wr32(wx, WX_PSR_VLAN_SWC, 0);
1866 	}
1867 
1868 	return 0;
1869 }
1870 
1871 /**
1872  *  wx_set_vfta - Set VLAN filter table
1873  *  @wx: pointer to hardware structure
1874  *  @vlan: VLAN id to write to VLAN filter
1875  *  @vind: VMDq output index that maps queue to VLAN id in VFVFB
1876  *  @vlan_on: boolean flag to turn on/off VLAN in VFVF
1877  *
1878  *  Turn on/off specified VLAN in the VLAN filter table.
1879  **/
1880 static int wx_set_vfta(struct wx *wx, u32 vlan, u32 vind, bool vlan_on)
1881 {
1882 	u32 bitindex, vfta, targetbit;
1883 	bool vfta_changed = false;
1884 	int regindex, ret;
1885 
1886 	/* this is a 2 part operation - first the VFTA, then the
1887 	 * VLVF and VLVFB if VT Mode is set
1888 	 * We don't write the VFTA until we know the VLVF part succeeded.
1889 	 */
1890 
1891 	/* Part 1
1892 	 * The VFTA is a bitstring made up of 128 32-bit registers
1893 	 * that enable the particular VLAN id, much like the MTA:
1894 	 *    bits[11-5]: which register
1895 	 *    bits[4-0]:  which bit in the register
1896 	 */
1897 	regindex = (vlan >> 5) & 0x7F;
1898 	bitindex = vlan & 0x1F;
1899 	targetbit = (1 << bitindex);
1900 	/* errata 5 */
1901 	vfta = wx->mac.vft_shadow[regindex];
1902 	if (vlan_on) {
1903 		if (!(vfta & targetbit)) {
1904 			vfta |= targetbit;
1905 			vfta_changed = true;
1906 		}
1907 	} else {
1908 		if ((vfta & targetbit)) {
1909 			vfta &= ~targetbit;
1910 			vfta_changed = true;
1911 		}
1912 	}
1913 	/* Part 2
1914 	 * Call wx_set_vlvf to set VLVFB and VLVF
1915 	 */
1916 	ret = wx_set_vlvf(wx, vlan, vind, vlan_on, &vfta_changed);
1917 	if (ret != 0)
1918 		return ret;
1919 
1920 	if (vfta_changed)
1921 		wr32(wx, WX_PSR_VLAN_TBL(regindex), vfta);
1922 	wx->mac.vft_shadow[regindex] = vfta;
1923 
1924 	return 0;
1925 }
1926 
1927 /**
1928  *  wx_clear_vfta - Clear VLAN filter table
1929  *  @wx: pointer to hardware structure
1930  *
1931  *  Clears the VLAN filer table, and the VMDq index associated with the filter
1932  **/
1933 static void wx_clear_vfta(struct wx *wx)
1934 {
1935 	u32 offset;
1936 
1937 	for (offset = 0; offset < wx->mac.vft_size; offset++) {
1938 		wr32(wx, WX_PSR_VLAN_TBL(offset), 0);
1939 		wx->mac.vft_shadow[offset] = 0;
1940 	}
1941 
1942 	for (offset = 0; offset < WX_PSR_VLAN_SWC_ENTRIES; offset++) {
1943 		wr32(wx, WX_PSR_VLAN_SWC_IDX, offset);
1944 		wr32(wx, WX_PSR_VLAN_SWC, 0);
1945 		wr32(wx, WX_PSR_VLAN_SWC_VM_L, 0);
1946 		wr32(wx, WX_PSR_VLAN_SWC_VM_H, 0);
1947 	}
1948 }
1949 
1950 int wx_vlan_rx_add_vid(struct net_device *netdev,
1951 		       __be16 proto, u16 vid)
1952 {
1953 	struct wx *wx = netdev_priv(netdev);
1954 
1955 	/* add VID to filter table */
1956 	wx_set_vfta(wx, vid, VMDQ_P(0), true);
1957 	set_bit(vid, wx->active_vlans);
1958 
1959 	return 0;
1960 }
1961 EXPORT_SYMBOL(wx_vlan_rx_add_vid);
1962 
1963 int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
1964 {
1965 	struct wx *wx = netdev_priv(netdev);
1966 
1967 	/* remove VID from filter table */
1968 	if (vid)
1969 		wx_set_vfta(wx, vid, VMDQ_P(0), false);
1970 	clear_bit(vid, wx->active_vlans);
1971 
1972 	return 0;
1973 }
1974 EXPORT_SYMBOL(wx_vlan_rx_kill_vid);
1975 
1976 /**
1977  *  wx_start_hw - Prepare hardware for Tx/Rx
1978  *  @wx: pointer to hardware structure
1979  *
1980  *  Starts the hardware using the generic start_hw function
1981  *  and the generation start_hw function.
1982  *  Then performs revision-specific operations, if any.
1983  **/
1984 void wx_start_hw(struct wx *wx)
1985 {
1986 	int i;
1987 
1988 	/* Clear the VLAN filter table */
1989 	wx_clear_vfta(wx);
1990 	WX_WRITE_FLUSH(wx);
1991 	/* Clear the rate limiters */
1992 	for (i = 0; i < wx->mac.max_tx_queues; i++) {
1993 		wr32(wx, WX_TDM_RP_IDX, i);
1994 		wr32(wx, WX_TDM_RP_RATE, 0);
1995 	}
1996 }
1997 EXPORT_SYMBOL(wx_start_hw);
1998 
1999 MODULE_LICENSE("GPL");
2000