1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */
3 
4 #include <linux/etherdevice.h>
5 #include <linux/netdevice.h>
6 #include <linux/if_ether.h>
7 #include <linux/if_vlan.h>
8 #include <linux/iopoll.h>
9 #include <linux/pci.h>
10 
11 #include "wx_type.h"
12 #include "wx_lib.h"
13 #include "wx_hw.h"
14 
15 static void wx_intr_disable(struct wx *wx, u64 qmask)
16 {
17 	u32 mask;
18 
19 	mask = (qmask & U32_MAX);
20 	if (mask)
21 		wr32(wx, WX_PX_IMS(0), mask);
22 
23 	if (wx->mac.type == wx_mac_sp) {
24 		mask = (qmask >> 32);
25 		if (mask)
26 			wr32(wx, WX_PX_IMS(1), mask);
27 	}
28 }
29 
30 void wx_intr_enable(struct wx *wx, u64 qmask)
31 {
32 	u32 mask;
33 
34 	mask = (qmask & U32_MAX);
35 	if (mask)
36 		wr32(wx, WX_PX_IMC(0), mask);
37 	if (wx->mac.type == wx_mac_sp) {
38 		mask = (qmask >> 32);
39 		if (mask)
40 			wr32(wx, WX_PX_IMC(1), mask);
41 	}
42 }
43 EXPORT_SYMBOL(wx_intr_enable);
44 
45 /**
46  * wx_irq_disable - Mask off interrupt generation on the NIC
47  * @wx: board private structure
48  **/
49 void wx_irq_disable(struct wx *wx)
50 {
51 	struct pci_dev *pdev = wx->pdev;
52 
53 	wr32(wx, WX_PX_MISC_IEN, 0);
54 	wx_intr_disable(wx, WX_INTR_ALL);
55 
56 	if (pdev->msix_enabled) {
57 		int vector;
58 
59 		for (vector = 0; vector < wx->num_q_vectors; vector++)
60 			synchronize_irq(wx->msix_entries[vector].vector);
61 
62 		synchronize_irq(wx->msix_entries[vector].vector);
63 	} else {
64 		synchronize_irq(pdev->irq);
65 	}
66 }
67 EXPORT_SYMBOL(wx_irq_disable);
68 
69 /* cmd_addr is used for some special command:
70  * 1. to be sector address, when implemented erase sector command
71  * 2. to be flash address when implemented read, write flash address
72  */
73 static int wx_fmgr_cmd_op(struct wx *wx, u32 cmd, u32 cmd_addr)
74 {
75 	u32 cmd_val = 0, val = 0;
76 
77 	cmd_val = WX_SPI_CMD_CMD(cmd) |
78 		  WX_SPI_CMD_CLK(WX_SPI_CLK_DIV) |
79 		  cmd_addr;
80 	wr32(wx, WX_SPI_CMD, cmd_val);
81 
82 	return read_poll_timeout(rd32, val, (val & 0x1), 10, 100000,
83 				 false, wx, WX_SPI_STATUS);
84 }
85 
86 static int wx_flash_read_dword(struct wx *wx, u32 addr, u32 *data)
87 {
88 	int ret = 0;
89 
90 	ret = wx_fmgr_cmd_op(wx, WX_SPI_CMD_READ_DWORD, addr);
91 	if (ret < 0)
92 		return ret;
93 
94 	*data = rd32(wx, WX_SPI_DATA);
95 
96 	return ret;
97 }
98 
99 int wx_check_flash_load(struct wx *hw, u32 check_bit)
100 {
101 	u32 reg = 0;
102 	int err = 0;
103 
104 	/* if there's flash existing */
105 	if (!(rd32(hw, WX_SPI_STATUS) &
106 	      WX_SPI_STATUS_FLASH_BYPASS)) {
107 		/* wait hw load flash done */
108 		err = read_poll_timeout(rd32, reg, !(reg & check_bit), 20000, 2000000,
109 					false, hw, WX_SPI_ILDR_STATUS);
110 		if (err < 0)
111 			wx_err(hw, "Check flash load timeout.\n");
112 	}
113 
114 	return err;
115 }
116 EXPORT_SYMBOL(wx_check_flash_load);
117 
118 void wx_control_hw(struct wx *wx, bool drv)
119 {
120 	/* True : Let firmware know the driver has taken over
121 	 * False : Let firmware take over control of hw
122 	 */
123 	wr32m(wx, WX_CFG_PORT_CTL, WX_CFG_PORT_CTL_DRV_LOAD,
124 	      drv ? WX_CFG_PORT_CTL_DRV_LOAD : 0);
125 }
126 EXPORT_SYMBOL(wx_control_hw);
127 
128 /**
129  * wx_mng_present - returns 0 when management capability is present
130  * @wx: pointer to hardware structure
131  */
132 int wx_mng_present(struct wx *wx)
133 {
134 	u32 fwsm;
135 
136 	fwsm = rd32(wx, WX_MIS_ST);
137 	if (fwsm & WX_MIS_ST_MNG_INIT_DN)
138 		return 0;
139 	else
140 		return -EACCES;
141 }
142 EXPORT_SYMBOL(wx_mng_present);
143 
144 /* Software lock to be held while software semaphore is being accessed. */
145 static DEFINE_MUTEX(wx_sw_sync_lock);
146 
147 /**
148  *  wx_release_sw_sync - Release SW semaphore
149  *  @wx: pointer to hardware structure
150  *  @mask: Mask to specify which semaphore to release
151  *
152  *  Releases the SW semaphore for the specified
153  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
154  **/
155 static void wx_release_sw_sync(struct wx *wx, u32 mask)
156 {
157 	mutex_lock(&wx_sw_sync_lock);
158 	wr32m(wx, WX_MNG_SWFW_SYNC, mask, 0);
159 	mutex_unlock(&wx_sw_sync_lock);
160 }
161 
162 /**
163  *  wx_acquire_sw_sync - Acquire SW semaphore
164  *  @wx: pointer to hardware structure
165  *  @mask: Mask to specify which semaphore to acquire
166  *
167  *  Acquires the SW semaphore for the specified
168  *  function (CSR, PHY0, PHY1, EEPROM, Flash)
169  **/
170 static int wx_acquire_sw_sync(struct wx *wx, u32 mask)
171 {
172 	u32 sem = 0;
173 	int ret = 0;
174 
175 	mutex_lock(&wx_sw_sync_lock);
176 	ret = read_poll_timeout(rd32, sem, !(sem & mask),
177 				5000, 2000000, false, wx, WX_MNG_SWFW_SYNC);
178 	if (!ret) {
179 		sem |= mask;
180 		wr32(wx, WX_MNG_SWFW_SYNC, sem);
181 	} else {
182 		wx_err(wx, "SW Semaphore not granted: 0x%x.\n", sem);
183 	}
184 	mutex_unlock(&wx_sw_sync_lock);
185 
186 	return ret;
187 }
188 
189 /**
190  *  wx_host_interface_command - Issue command to manageability block
191  *  @wx: pointer to the HW structure
192  *  @buffer: contains the command to write and where the return status will
193  *   be placed
194  *  @length: length of buffer, must be multiple of 4 bytes
195  *  @timeout: time in ms to wait for command completion
196  *  @return_data: read and return data from the buffer (true) or not (false)
197  *   Needed because FW structures are big endian and decoding of
198  *   these fields can be 8 bit or 16 bit based on command. Decoding
199  *   is not easily understood without making a table of commands.
200  *   So we will leave this up to the caller to read back the data
201  *   in these cases.
202  **/
203 int wx_host_interface_command(struct wx *wx, u32 *buffer,
204 			      u32 length, u32 timeout, bool return_data)
205 {
206 	u32 hdr_size = sizeof(struct wx_hic_hdr);
207 	u32 hicr, i, bi, buf[64] = {};
208 	int status = 0;
209 	u32 dword_len;
210 	u16 buf_len;
211 
212 	if (length == 0 || length > WX_HI_MAX_BLOCK_BYTE_LENGTH) {
213 		wx_err(wx, "Buffer length failure buffersize=%d.\n", length);
214 		return -EINVAL;
215 	}
216 
217 	status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_MB);
218 	if (status != 0)
219 		return status;
220 
221 	/* Calculate length in DWORDs. We must be DWORD aligned */
222 	if ((length % (sizeof(u32))) != 0) {
223 		wx_err(wx, "Buffer length failure, not aligned to dword");
224 		status = -EINVAL;
225 		goto rel_out;
226 	}
227 
228 	dword_len = length >> 2;
229 
230 	/* The device driver writes the relevant command block
231 	 * into the ram area.
232 	 */
233 	for (i = 0; i < dword_len; i++) {
234 		wr32a(wx, WX_MNG_MBOX, i, (__force u32)cpu_to_le32(buffer[i]));
235 		/* write flush */
236 		buf[i] = rd32a(wx, WX_MNG_MBOX, i);
237 	}
238 	/* Setting this bit tells the ARC that a new command is pending. */
239 	wr32m(wx, WX_MNG_MBOX_CTL,
240 	      WX_MNG_MBOX_CTL_SWRDY, WX_MNG_MBOX_CTL_SWRDY);
241 
242 	status = read_poll_timeout(rd32, hicr, hicr & WX_MNG_MBOX_CTL_FWRDY, 1000,
243 				   timeout * 1000, false, wx, WX_MNG_MBOX_CTL);
244 
245 	/* Check command completion */
246 	if (status) {
247 		wx_dbg(wx, "Command has failed with no status valid.\n");
248 
249 		buf[0] = rd32(wx, WX_MNG_MBOX);
250 		if ((buffer[0] & 0xff) != (~buf[0] >> 24)) {
251 			status = -EINVAL;
252 			goto rel_out;
253 		}
254 		if ((buf[0] & 0xff0000) >> 16 == 0x80) {
255 			wx_dbg(wx, "It's unknown cmd.\n");
256 			status = -EINVAL;
257 			goto rel_out;
258 		}
259 
260 		wx_dbg(wx, "write value:\n");
261 		for (i = 0; i < dword_len; i++)
262 			wx_dbg(wx, "%x ", buffer[i]);
263 		wx_dbg(wx, "read value:\n");
264 		for (i = 0; i < dword_len; i++)
265 			wx_dbg(wx, "%x ", buf[i]);
266 	}
267 
268 	if (!return_data)
269 		goto rel_out;
270 
271 	/* Calculate length in DWORDs */
272 	dword_len = hdr_size >> 2;
273 
274 	/* first pull in the header so we know the buffer length */
275 	for (bi = 0; bi < dword_len; bi++) {
276 		buffer[bi] = rd32a(wx, WX_MNG_MBOX, bi);
277 		le32_to_cpus(&buffer[bi]);
278 	}
279 
280 	/* If there is any thing in data position pull it in */
281 	buf_len = ((struct wx_hic_hdr *)buffer)->buf_len;
282 	if (buf_len == 0)
283 		goto rel_out;
284 
285 	if (length < buf_len + hdr_size) {
286 		wx_err(wx, "Buffer not large enough for reply message.\n");
287 		status = -EFAULT;
288 		goto rel_out;
289 	}
290 
291 	/* Calculate length in DWORDs, add 3 for odd lengths */
292 	dword_len = (buf_len + 3) >> 2;
293 
294 	/* Pull in the rest of the buffer (bi is where we left off) */
295 	for (; bi <= dword_len; bi++) {
296 		buffer[bi] = rd32a(wx, WX_MNG_MBOX, bi);
297 		le32_to_cpus(&buffer[bi]);
298 	}
299 
300 rel_out:
301 	wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_MB);
302 	return status;
303 }
304 EXPORT_SYMBOL(wx_host_interface_command);
305 
306 /**
307  *  wx_read_ee_hostif_data - Read EEPROM word using a host interface cmd
308  *  assuming that the semaphore is already obtained.
309  *  @wx: pointer to hardware structure
310  *  @offset: offset of  word in the EEPROM to read
311  *  @data: word read from the EEPROM
312  *
313  *  Reads a 16 bit word from the EEPROM using the hostif.
314  **/
315 static int wx_read_ee_hostif_data(struct wx *wx, u16 offset, u16 *data)
316 {
317 	struct wx_hic_read_shadow_ram buffer;
318 	int status;
319 
320 	buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
321 	buffer.hdr.req.buf_lenh = 0;
322 	buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
323 	buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
324 
325 	/* convert offset from words to bytes */
326 	buffer.address = (__force u32)cpu_to_be32(offset * 2);
327 	/* one word */
328 	buffer.length = (__force u16)cpu_to_be16(sizeof(u16));
329 
330 	status = wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer),
331 					   WX_HI_COMMAND_TIMEOUT, false);
332 
333 	if (status != 0)
334 		return status;
335 
336 	*data = (u16)rd32a(wx, WX_MNG_MBOX, FW_NVM_DATA_OFFSET);
337 
338 	return status;
339 }
340 
341 /**
342  *  wx_read_ee_hostif - Read EEPROM word using a host interface cmd
343  *  @wx: pointer to hardware structure
344  *  @offset: offset of  word in the EEPROM to read
345  *  @data: word read from the EEPROM
346  *
347  *  Reads a 16 bit word from the EEPROM using the hostif.
348  **/
349 int wx_read_ee_hostif(struct wx *wx, u16 offset, u16 *data)
350 {
351 	int status = 0;
352 
353 	status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH);
354 	if (status == 0) {
355 		status = wx_read_ee_hostif_data(wx, offset, data);
356 		wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH);
357 	}
358 
359 	return status;
360 }
361 EXPORT_SYMBOL(wx_read_ee_hostif);
362 
363 /**
364  *  wx_read_ee_hostif_buffer- Read EEPROM word(s) using hostif
365  *  @wx: pointer to hardware structure
366  *  @offset: offset of  word in the EEPROM to read
367  *  @words: number of words
368  *  @data: word(s) read from the EEPROM
369  *
370  *  Reads a 16 bit word(s) from the EEPROM using the hostif.
371  **/
372 int wx_read_ee_hostif_buffer(struct wx *wx,
373 			     u16 offset, u16 words, u16 *data)
374 {
375 	struct wx_hic_read_shadow_ram buffer;
376 	u32 current_word = 0;
377 	u16 words_to_read;
378 	u32 value = 0;
379 	int status;
380 	u32 i;
381 
382 	/* Take semaphore for the entire operation. */
383 	status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH);
384 	if (status != 0)
385 		return status;
386 
387 	while (words) {
388 		if (words > FW_MAX_READ_BUFFER_SIZE / 2)
389 			words_to_read = FW_MAX_READ_BUFFER_SIZE / 2;
390 		else
391 			words_to_read = words;
392 
393 		buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
394 		buffer.hdr.req.buf_lenh = 0;
395 		buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
396 		buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
397 
398 		/* convert offset from words to bytes */
399 		buffer.address = (__force u32)cpu_to_be32((offset + current_word) * 2);
400 		buffer.length = (__force u16)cpu_to_be16(words_to_read * 2);
401 
402 		status = wx_host_interface_command(wx, (u32 *)&buffer,
403 						   sizeof(buffer),
404 						   WX_HI_COMMAND_TIMEOUT,
405 						   false);
406 
407 		if (status != 0) {
408 			wx_err(wx, "Host interface command failed\n");
409 			goto out;
410 		}
411 
412 		for (i = 0; i < words_to_read; i++) {
413 			u32 reg = WX_MNG_MBOX + (FW_NVM_DATA_OFFSET << 2) + 2 * i;
414 
415 			value = rd32(wx, reg);
416 			data[current_word] = (u16)(value & 0xffff);
417 			current_word++;
418 			i++;
419 			if (i < words_to_read) {
420 				value >>= 16;
421 				data[current_word] = (u16)(value & 0xffff);
422 				current_word++;
423 			}
424 		}
425 		words -= words_to_read;
426 	}
427 
428 out:
429 	wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH);
430 	return status;
431 }
432 EXPORT_SYMBOL(wx_read_ee_hostif_buffer);
433 
434 /**
435  *  wx_calculate_checksum - Calculate checksum for buffer
436  *  @buffer: pointer to EEPROM
437  *  @length: size of EEPROM to calculate a checksum for
438  *  Calculates the checksum for some buffer on a specified length.  The
439  *  checksum calculated is returned.
440  **/
441 static u8 wx_calculate_checksum(u8 *buffer, u32 length)
442 {
443 	u8 sum = 0;
444 	u32 i;
445 
446 	if (!buffer)
447 		return 0;
448 
449 	for (i = 0; i < length; i++)
450 		sum += buffer[i];
451 
452 	return (u8)(0 - sum);
453 }
454 
455 /**
456  *  wx_reset_hostif - send reset cmd to fw
457  *  @wx: pointer to hardware structure
458  *
459  *  Sends reset cmd to firmware through the manageability
460  *  block.
461  **/
462 int wx_reset_hostif(struct wx *wx)
463 {
464 	struct wx_hic_reset reset_cmd;
465 	int ret_val = 0;
466 	int i;
467 
468 	reset_cmd.hdr.cmd = FW_RESET_CMD;
469 	reset_cmd.hdr.buf_len = FW_RESET_LEN;
470 	reset_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
471 	reset_cmd.lan_id = wx->bus.func;
472 	reset_cmd.reset_type = (u16)wx->reset_type;
473 	reset_cmd.hdr.checksum = 0;
474 	reset_cmd.hdr.checksum = wx_calculate_checksum((u8 *)&reset_cmd,
475 						       (FW_CEM_HDR_LEN +
476 							reset_cmd.hdr.buf_len));
477 
478 	for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
479 		ret_val = wx_host_interface_command(wx, (u32 *)&reset_cmd,
480 						    sizeof(reset_cmd),
481 						    WX_HI_COMMAND_TIMEOUT,
482 						    true);
483 		if (ret_val != 0)
484 			continue;
485 
486 		if (reset_cmd.hdr.cmd_or_resp.ret_status ==
487 		    FW_CEM_RESP_STATUS_SUCCESS)
488 			ret_val = 0;
489 		else
490 			ret_val = -EFAULT;
491 
492 		break;
493 	}
494 
495 	return ret_val;
496 }
497 EXPORT_SYMBOL(wx_reset_hostif);
498 
499 /**
500  *  wx_init_eeprom_params - Initialize EEPROM params
501  *  @wx: pointer to hardware structure
502  *
503  *  Initializes the EEPROM parameters wx_eeprom_info within the
504  *  wx_hw struct in order to set up EEPROM access.
505  **/
506 void wx_init_eeprom_params(struct wx *wx)
507 {
508 	struct wx_eeprom_info *eeprom = &wx->eeprom;
509 	u16 eeprom_size;
510 	u16 data = 0x80;
511 
512 	if (eeprom->type == wx_eeprom_uninitialized) {
513 		eeprom->semaphore_delay = 10;
514 		eeprom->type = wx_eeprom_none;
515 
516 		if (!(rd32(wx, WX_SPI_STATUS) &
517 		      WX_SPI_STATUS_FLASH_BYPASS)) {
518 			eeprom->type = wx_flash;
519 
520 			eeprom_size = 4096;
521 			eeprom->word_size = eeprom_size >> 1;
522 
523 			wx_dbg(wx, "Eeprom params: type = %d, size = %d\n",
524 			       eeprom->type, eeprom->word_size);
525 		}
526 	}
527 
528 	if (wx->mac.type == wx_mac_sp) {
529 		if (wx_read_ee_hostif(wx, WX_SW_REGION_PTR, &data)) {
530 			wx_err(wx, "NVM Read Error\n");
531 			return;
532 		}
533 		data = data >> 1;
534 	}
535 
536 	eeprom->sw_region_offset = data;
537 }
538 EXPORT_SYMBOL(wx_init_eeprom_params);
539 
540 /**
541  *  wx_get_mac_addr - Generic get MAC address
542  *  @wx: pointer to hardware structure
543  *  @mac_addr: Adapter MAC address
544  *
545  *  Reads the adapter's MAC address from first Receive Address Register (RAR0)
546  *  A reset of the adapter must be performed prior to calling this function
547  *  in order for the MAC address to have been loaded from the EEPROM into RAR0
548  **/
549 void wx_get_mac_addr(struct wx *wx, u8 *mac_addr)
550 {
551 	u32 rar_high;
552 	u32 rar_low;
553 	u16 i;
554 
555 	wr32(wx, WX_PSR_MAC_SWC_IDX, 0);
556 	rar_high = rd32(wx, WX_PSR_MAC_SWC_AD_H);
557 	rar_low = rd32(wx, WX_PSR_MAC_SWC_AD_L);
558 
559 	for (i = 0; i < 2; i++)
560 		mac_addr[i] = (u8)(rar_high >> (1 - i) * 8);
561 
562 	for (i = 0; i < 4; i++)
563 		mac_addr[i + 2] = (u8)(rar_low >> (3 - i) * 8);
564 }
565 EXPORT_SYMBOL(wx_get_mac_addr);
566 
567 /**
568  *  wx_set_rar - Set Rx address register
569  *  @wx: pointer to hardware structure
570  *  @index: Receive address register to write
571  *  @addr: Address to put into receive address register
572  *  @pools: VMDq "set" or "pool" index
573  *  @enable_addr: set flag that address is active
574  *
575  *  Puts an ethernet address into a receive address register.
576  **/
577 static int wx_set_rar(struct wx *wx, u32 index, u8 *addr, u64 pools,
578 		      u32 enable_addr)
579 {
580 	u32 rar_entries = wx->mac.num_rar_entries;
581 	u32 rar_low, rar_high;
582 
583 	/* Make sure we are using a valid rar index range */
584 	if (index >= rar_entries) {
585 		wx_err(wx, "RAR index %d is out of range.\n", index);
586 		return -EINVAL;
587 	}
588 
589 	/* select the MAC address */
590 	wr32(wx, WX_PSR_MAC_SWC_IDX, index);
591 
592 	/* setup VMDq pool mapping */
593 	wr32(wx, WX_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF);
594 	if (wx->mac.type == wx_mac_sp)
595 		wr32(wx, WX_PSR_MAC_SWC_VM_H, pools >> 32);
596 
597 	/* HW expects these in little endian so we reverse the byte
598 	 * order from network order (big endian) to little endian
599 	 *
600 	 * Some parts put the VMDq setting in the extra RAH bits,
601 	 * so save everything except the lower 16 bits that hold part
602 	 * of the address and the address valid bit.
603 	 */
604 	rar_low = ((u32)addr[5] |
605 		  ((u32)addr[4] << 8) |
606 		  ((u32)addr[3] << 16) |
607 		  ((u32)addr[2] << 24));
608 	rar_high = ((u32)addr[1] |
609 		   ((u32)addr[0] << 8));
610 	if (enable_addr != 0)
611 		rar_high |= WX_PSR_MAC_SWC_AD_H_AV;
612 
613 	wr32(wx, WX_PSR_MAC_SWC_AD_L, rar_low);
614 	wr32m(wx, WX_PSR_MAC_SWC_AD_H,
615 	      (WX_PSR_MAC_SWC_AD_H_AD(U16_MAX) |
616 	       WX_PSR_MAC_SWC_AD_H_ADTYPE(1) |
617 	       WX_PSR_MAC_SWC_AD_H_AV),
618 	      rar_high);
619 
620 	return 0;
621 }
622 
623 /**
624  *  wx_clear_rar - Remove Rx address register
625  *  @wx: pointer to hardware structure
626  *  @index: Receive address register to write
627  *
628  *  Clears an ethernet address from a receive address register.
629  **/
630 static int wx_clear_rar(struct wx *wx, u32 index)
631 {
632 	u32 rar_entries = wx->mac.num_rar_entries;
633 
634 	/* Make sure we are using a valid rar index range */
635 	if (index >= rar_entries) {
636 		wx_err(wx, "RAR index %d is out of range.\n", index);
637 		return -EINVAL;
638 	}
639 
640 	/* Some parts put the VMDq setting in the extra RAH bits,
641 	 * so save everything except the lower 16 bits that hold part
642 	 * of the address and the address valid bit.
643 	 */
644 	wr32(wx, WX_PSR_MAC_SWC_IDX, index);
645 
646 	wr32(wx, WX_PSR_MAC_SWC_VM_L, 0);
647 	wr32(wx, WX_PSR_MAC_SWC_VM_H, 0);
648 
649 	wr32(wx, WX_PSR_MAC_SWC_AD_L, 0);
650 	wr32m(wx, WX_PSR_MAC_SWC_AD_H,
651 	      (WX_PSR_MAC_SWC_AD_H_AD(U16_MAX) |
652 	       WX_PSR_MAC_SWC_AD_H_ADTYPE(1) |
653 	       WX_PSR_MAC_SWC_AD_H_AV),
654 	      0);
655 
656 	return 0;
657 }
658 
659 /**
660  *  wx_clear_vmdq - Disassociate a VMDq pool index from a rx address
661  *  @wx: pointer to hardware struct
662  *  @rar: receive address register index to disassociate
663  *  @vmdq: VMDq pool index to remove from the rar
664  **/
665 static int wx_clear_vmdq(struct wx *wx, u32 rar, u32 __maybe_unused vmdq)
666 {
667 	u32 rar_entries = wx->mac.num_rar_entries;
668 	u32 mpsar_lo, mpsar_hi;
669 
670 	/* Make sure we are using a valid rar index range */
671 	if (rar >= rar_entries) {
672 		wx_err(wx, "RAR index %d is out of range.\n", rar);
673 		return -EINVAL;
674 	}
675 
676 	wr32(wx, WX_PSR_MAC_SWC_IDX, rar);
677 	mpsar_lo = rd32(wx, WX_PSR_MAC_SWC_VM_L);
678 	mpsar_hi = rd32(wx, WX_PSR_MAC_SWC_VM_H);
679 
680 	if (!mpsar_lo && !mpsar_hi)
681 		return 0;
682 
683 	/* was that the last pool using this rar? */
684 	if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
685 		wx_clear_rar(wx, rar);
686 
687 	return 0;
688 }
689 
690 /**
691  *  wx_init_uta_tables - Initialize the Unicast Table Array
692  *  @wx: pointer to hardware structure
693  **/
694 static void wx_init_uta_tables(struct wx *wx)
695 {
696 	int i;
697 
698 	wx_dbg(wx, " Clearing UTA\n");
699 
700 	for (i = 0; i < 128; i++)
701 		wr32(wx, WX_PSR_UC_TBL(i), 0);
702 }
703 
704 /**
705  *  wx_init_rx_addrs - Initializes receive address filters.
706  *  @wx: pointer to hardware structure
707  *
708  *  Places the MAC address in receive address register 0 and clears the rest
709  *  of the receive address registers. Clears the multicast table. Assumes
710  *  the receiver is in reset when the routine is called.
711  **/
712 void wx_init_rx_addrs(struct wx *wx)
713 {
714 	u32 rar_entries = wx->mac.num_rar_entries;
715 	u32 psrctl;
716 	int i;
717 
718 	/* If the current mac address is valid, assume it is a software override
719 	 * to the permanent address.
720 	 * Otherwise, use the permanent address from the eeprom.
721 	 */
722 	if (!is_valid_ether_addr(wx->mac.addr)) {
723 		/* Get the MAC address from the RAR0 for later reference */
724 		wx_get_mac_addr(wx, wx->mac.addr);
725 		wx_dbg(wx, "Keeping Current RAR0 Addr = %pM\n", wx->mac.addr);
726 	} else {
727 		/* Setup the receive address. */
728 		wx_dbg(wx, "Overriding MAC Address in RAR[0]\n");
729 		wx_dbg(wx, "New MAC Addr = %pM\n", wx->mac.addr);
730 
731 		wx_set_rar(wx, 0, wx->mac.addr, 0, WX_PSR_MAC_SWC_AD_H_AV);
732 
733 		if (wx->mac.type == wx_mac_sp) {
734 			/* clear VMDq pool/queue selection for RAR 0 */
735 			wx_clear_vmdq(wx, 0, WX_CLEAR_VMDQ_ALL);
736 		}
737 	}
738 
739 	/* Zero out the other receive addresses. */
740 	wx_dbg(wx, "Clearing RAR[1-%d]\n", rar_entries - 1);
741 	for (i = 1; i < rar_entries; i++) {
742 		wr32(wx, WX_PSR_MAC_SWC_IDX, i);
743 		wr32(wx, WX_PSR_MAC_SWC_AD_L, 0);
744 		wr32(wx, WX_PSR_MAC_SWC_AD_H, 0);
745 	}
746 
747 	/* Clear the MTA */
748 	wx->addr_ctrl.mta_in_use = 0;
749 	psrctl = rd32(wx, WX_PSR_CTL);
750 	psrctl &= ~(WX_PSR_CTL_MO | WX_PSR_CTL_MFE);
751 	psrctl |= wx->mac.mc_filter_type << WX_PSR_CTL_MO_SHIFT;
752 	wr32(wx, WX_PSR_CTL, psrctl);
753 	wx_dbg(wx, " Clearing MTA\n");
754 	for (i = 0; i < wx->mac.mcft_size; i++)
755 		wr32(wx, WX_PSR_MC_TBL(i), 0);
756 
757 	wx_init_uta_tables(wx);
758 }
759 EXPORT_SYMBOL(wx_init_rx_addrs);
760 
761 static void wx_sync_mac_table(struct wx *wx)
762 {
763 	int i;
764 
765 	for (i = 0; i < wx->mac.num_rar_entries; i++) {
766 		if (wx->mac_table[i].state & WX_MAC_STATE_MODIFIED) {
767 			if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE) {
768 				wx_set_rar(wx, i,
769 					   wx->mac_table[i].addr,
770 					   wx->mac_table[i].pools,
771 					   WX_PSR_MAC_SWC_AD_H_AV);
772 			} else {
773 				wx_clear_rar(wx, i);
774 			}
775 			wx->mac_table[i].state &= ~(WX_MAC_STATE_MODIFIED);
776 		}
777 	}
778 }
779 
780 /* this function destroys the first RAR entry */
781 void wx_mac_set_default_filter(struct wx *wx, u8 *addr)
782 {
783 	memcpy(&wx->mac_table[0].addr, addr, ETH_ALEN);
784 	wx->mac_table[0].pools = 1ULL;
785 	wx->mac_table[0].state = (WX_MAC_STATE_DEFAULT | WX_MAC_STATE_IN_USE);
786 	wx_set_rar(wx, 0, wx->mac_table[0].addr,
787 		   wx->mac_table[0].pools,
788 		   WX_PSR_MAC_SWC_AD_H_AV);
789 }
790 EXPORT_SYMBOL(wx_mac_set_default_filter);
791 
792 void wx_flush_sw_mac_table(struct wx *wx)
793 {
794 	u32 i;
795 
796 	for (i = 0; i < wx->mac.num_rar_entries; i++) {
797 		if (!(wx->mac_table[i].state & WX_MAC_STATE_IN_USE))
798 			continue;
799 
800 		wx->mac_table[i].state |= WX_MAC_STATE_MODIFIED;
801 		wx->mac_table[i].state &= ~WX_MAC_STATE_IN_USE;
802 		memset(wx->mac_table[i].addr, 0, ETH_ALEN);
803 		wx->mac_table[i].pools = 0;
804 	}
805 	wx_sync_mac_table(wx);
806 }
807 EXPORT_SYMBOL(wx_flush_sw_mac_table);
808 
809 static int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool)
810 {
811 	u32 i;
812 
813 	if (is_zero_ether_addr(addr))
814 		return -EINVAL;
815 
816 	for (i = 0; i < wx->mac.num_rar_entries; i++) {
817 		if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE) {
818 			if (ether_addr_equal(addr, wx->mac_table[i].addr)) {
819 				if (wx->mac_table[i].pools != (1ULL << pool)) {
820 					memcpy(wx->mac_table[i].addr, addr, ETH_ALEN);
821 					wx->mac_table[i].pools |= (1ULL << pool);
822 					wx_sync_mac_table(wx);
823 					return i;
824 				}
825 			}
826 		}
827 
828 		if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE)
829 			continue;
830 		wx->mac_table[i].state |= (WX_MAC_STATE_MODIFIED |
831 					   WX_MAC_STATE_IN_USE);
832 		memcpy(wx->mac_table[i].addr, addr, ETH_ALEN);
833 		wx->mac_table[i].pools |= (1ULL << pool);
834 		wx_sync_mac_table(wx);
835 		return i;
836 	}
837 	return -ENOMEM;
838 }
839 
840 static int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool)
841 {
842 	u32 i;
843 
844 	if (is_zero_ether_addr(addr))
845 		return -EINVAL;
846 
847 	/* search table for addr, if found, set to 0 and sync */
848 	for (i = 0; i < wx->mac.num_rar_entries; i++) {
849 		if (!ether_addr_equal(addr, wx->mac_table[i].addr))
850 			continue;
851 
852 		wx->mac_table[i].state |= WX_MAC_STATE_MODIFIED;
853 		wx->mac_table[i].pools &= ~(1ULL << pool);
854 		if (!wx->mac_table[i].pools) {
855 			wx->mac_table[i].state &= ~WX_MAC_STATE_IN_USE;
856 			memset(wx->mac_table[i].addr, 0, ETH_ALEN);
857 		}
858 		wx_sync_mac_table(wx);
859 		return 0;
860 	}
861 	return -ENOMEM;
862 }
863 
864 static int wx_available_rars(struct wx *wx)
865 {
866 	u32 i, count = 0;
867 
868 	for (i = 0; i < wx->mac.num_rar_entries; i++) {
869 		if (wx->mac_table[i].state == 0)
870 			count++;
871 	}
872 
873 	return count;
874 }
875 
876 /**
877  * wx_write_uc_addr_list - write unicast addresses to RAR table
878  * @netdev: network interface device structure
879  * @pool: index for mac table
880  *
881  * Writes unicast address list to the RAR table.
882  * Returns: -ENOMEM on failure/insufficient address space
883  *                0 on no addresses written
884  *                X on writing X addresses to the RAR table
885  **/
886 static int wx_write_uc_addr_list(struct net_device *netdev, int pool)
887 {
888 	struct wx *wx = netdev_priv(netdev);
889 	int count = 0;
890 
891 	/* return ENOMEM indicating insufficient memory for addresses */
892 	if (netdev_uc_count(netdev) > wx_available_rars(wx))
893 		return -ENOMEM;
894 
895 	if (!netdev_uc_empty(netdev)) {
896 		struct netdev_hw_addr *ha;
897 
898 		netdev_for_each_uc_addr(ha, netdev) {
899 			wx_del_mac_filter(wx, ha->addr, pool);
900 			wx_add_mac_filter(wx, ha->addr, pool);
901 			count++;
902 		}
903 	}
904 	return count;
905 }
906 
907 /**
908  *  wx_mta_vector - Determines bit-vector in multicast table to set
909  *  @wx: pointer to private structure
910  *  @mc_addr: the multicast address
911  *
912  *  Extracts the 12 bits, from a multicast address, to determine which
913  *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
914  *  incoming rx multicast addresses, to determine the bit-vector to check in
915  *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
916  *  by the MO field of the MCSTCTRL. The MO field is set during initialization
917  *  to mc_filter_type.
918  **/
919 static u32 wx_mta_vector(struct wx *wx, u8 *mc_addr)
920 {
921 	u32 vector = 0;
922 
923 	switch (wx->mac.mc_filter_type) {
924 	case 0:   /* use bits [47:36] of the address */
925 		vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
926 		break;
927 	case 1:   /* use bits [46:35] of the address */
928 		vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
929 		break;
930 	case 2:   /* use bits [45:34] of the address */
931 		vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
932 		break;
933 	case 3:   /* use bits [43:32] of the address */
934 		vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
935 		break;
936 	default:  /* Invalid mc_filter_type */
937 		wx_err(wx, "MC filter type param set incorrectly\n");
938 		break;
939 	}
940 
941 	/* vector can only be 12-bits or boundary will be exceeded */
942 	vector &= 0xFFF;
943 	return vector;
944 }
945 
946 /**
947  *  wx_set_mta - Set bit-vector in multicast table
948  *  @wx: pointer to private structure
949  *  @mc_addr: Multicast address
950  *
951  *  Sets the bit-vector in the multicast table.
952  **/
953 static void wx_set_mta(struct wx *wx, u8 *mc_addr)
954 {
955 	u32 vector, vector_bit, vector_reg;
956 
957 	wx->addr_ctrl.mta_in_use++;
958 
959 	vector = wx_mta_vector(wx, mc_addr);
960 	wx_dbg(wx, " bit-vector = 0x%03X\n", vector);
961 
962 	/* The MTA is a register array of 128 32-bit registers. It is treated
963 	 * like an array of 4096 bits.  We want to set bit
964 	 * BitArray[vector_value]. So we figure out what register the bit is
965 	 * in, read it, OR in the new bit, then write back the new value.  The
966 	 * register is determined by the upper 7 bits of the vector value and
967 	 * the bit within that register are determined by the lower 5 bits of
968 	 * the value.
969 	 */
970 	vector_reg = (vector >> 5) & 0x7F;
971 	vector_bit = vector & 0x1F;
972 	wx->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
973 }
974 
975 /**
976  *  wx_update_mc_addr_list - Updates MAC list of multicast addresses
977  *  @wx: pointer to private structure
978  *  @netdev: pointer to net device structure
979  *
980  *  The given list replaces any existing list. Clears the MC addrs from receive
981  *  address registers and the multicast table. Uses unused receive address
982  *  registers for the first multicast addresses, and hashes the rest into the
983  *  multicast table.
984  **/
985 static void wx_update_mc_addr_list(struct wx *wx, struct net_device *netdev)
986 {
987 	struct netdev_hw_addr *ha;
988 	u32 i, psrctl;
989 
990 	/* Set the new number of MC addresses that we are being requested to
991 	 * use.
992 	 */
993 	wx->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev);
994 	wx->addr_ctrl.mta_in_use = 0;
995 
996 	/* Clear mta_shadow */
997 	wx_dbg(wx, " Clearing MTA\n");
998 	memset(&wx->mac.mta_shadow, 0, sizeof(wx->mac.mta_shadow));
999 
1000 	/* Update mta_shadow */
1001 	netdev_for_each_mc_addr(ha, netdev) {
1002 		wx_dbg(wx, " Adding the multicast addresses:\n");
1003 		wx_set_mta(wx, ha->addr);
1004 	}
1005 
1006 	/* Enable mta */
1007 	for (i = 0; i < wx->mac.mcft_size; i++)
1008 		wr32a(wx, WX_PSR_MC_TBL(0), i,
1009 		      wx->mac.mta_shadow[i]);
1010 
1011 	if (wx->addr_ctrl.mta_in_use > 0) {
1012 		psrctl = rd32(wx, WX_PSR_CTL);
1013 		psrctl &= ~(WX_PSR_CTL_MO | WX_PSR_CTL_MFE);
1014 		psrctl |= WX_PSR_CTL_MFE |
1015 			  (wx->mac.mc_filter_type << WX_PSR_CTL_MO_SHIFT);
1016 		wr32(wx, WX_PSR_CTL, psrctl);
1017 	}
1018 
1019 	wx_dbg(wx, "Update mc addr list Complete\n");
1020 }
1021 
1022 /**
1023  * wx_write_mc_addr_list - write multicast addresses to MTA
1024  * @netdev: network interface device structure
1025  *
1026  * Writes multicast address list to the MTA hash table.
1027  * Returns: 0 on no addresses written
1028  *          X on writing X addresses to MTA
1029  **/
1030 static int wx_write_mc_addr_list(struct net_device *netdev)
1031 {
1032 	struct wx *wx = netdev_priv(netdev);
1033 
1034 	if (!netif_running(netdev))
1035 		return 0;
1036 
1037 	wx_update_mc_addr_list(wx, netdev);
1038 
1039 	return netdev_mc_count(netdev);
1040 }
1041 
1042 /**
1043  * wx_set_mac - Change the Ethernet Address of the NIC
1044  * @netdev: network interface device structure
1045  * @p: pointer to an address structure
1046  *
1047  * Returns 0 on success, negative on failure
1048  **/
1049 int wx_set_mac(struct net_device *netdev, void *p)
1050 {
1051 	struct wx *wx = netdev_priv(netdev);
1052 	struct sockaddr *addr = p;
1053 	int retval;
1054 
1055 	retval = eth_prepare_mac_addr_change(netdev, addr);
1056 	if (retval)
1057 		return retval;
1058 
1059 	wx_del_mac_filter(wx, wx->mac.addr, 0);
1060 	eth_hw_addr_set(netdev, addr->sa_data);
1061 	memcpy(wx->mac.addr, addr->sa_data, netdev->addr_len);
1062 
1063 	wx_mac_set_default_filter(wx, wx->mac.addr);
1064 
1065 	return 0;
1066 }
1067 EXPORT_SYMBOL(wx_set_mac);
1068 
1069 void wx_disable_rx(struct wx *wx)
1070 {
1071 	u32 pfdtxgswc;
1072 	u32 rxctrl;
1073 
1074 	rxctrl = rd32(wx, WX_RDB_PB_CTL);
1075 	if (rxctrl & WX_RDB_PB_CTL_RXEN) {
1076 		pfdtxgswc = rd32(wx, WX_PSR_CTL);
1077 		if (pfdtxgswc & WX_PSR_CTL_SW_EN) {
1078 			pfdtxgswc &= ~WX_PSR_CTL_SW_EN;
1079 			wr32(wx, WX_PSR_CTL, pfdtxgswc);
1080 			wx->mac.set_lben = true;
1081 		} else {
1082 			wx->mac.set_lben = false;
1083 		}
1084 		rxctrl &= ~WX_RDB_PB_CTL_RXEN;
1085 		wr32(wx, WX_RDB_PB_CTL, rxctrl);
1086 
1087 		if (!(((wx->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) ||
1088 		      ((wx->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) {
1089 			/* disable mac receiver */
1090 			wr32m(wx, WX_MAC_RX_CFG,
1091 			      WX_MAC_RX_CFG_RE, 0);
1092 		}
1093 	}
1094 }
1095 EXPORT_SYMBOL(wx_disable_rx);
1096 
1097 static void wx_enable_rx(struct wx *wx)
1098 {
1099 	u32 psrctl;
1100 
1101 	/* enable mac receiver */
1102 	wr32m(wx, WX_MAC_RX_CFG,
1103 	      WX_MAC_RX_CFG_RE, WX_MAC_RX_CFG_RE);
1104 
1105 	wr32m(wx, WX_RDB_PB_CTL,
1106 	      WX_RDB_PB_CTL_RXEN, WX_RDB_PB_CTL_RXEN);
1107 
1108 	if (wx->mac.set_lben) {
1109 		psrctl = rd32(wx, WX_PSR_CTL);
1110 		psrctl |= WX_PSR_CTL_SW_EN;
1111 		wr32(wx, WX_PSR_CTL, psrctl);
1112 		wx->mac.set_lben = false;
1113 	}
1114 }
1115 
1116 /**
1117  * wx_set_rxpba - Initialize Rx packet buffer
1118  * @wx: pointer to private structure
1119  **/
1120 static void wx_set_rxpba(struct wx *wx)
1121 {
1122 	u32 rxpktsize, txpktsize, txpbthresh;
1123 
1124 	rxpktsize = wx->mac.rx_pb_size << WX_RDB_PB_SZ_SHIFT;
1125 	wr32(wx, WX_RDB_PB_SZ(0), rxpktsize);
1126 
1127 	/* Only support an equally distributed Tx packet buffer strategy. */
1128 	txpktsize = wx->mac.tx_pb_size;
1129 	txpbthresh = (txpktsize / 1024) - WX_TXPKT_SIZE_MAX;
1130 	wr32(wx, WX_TDB_PB_SZ(0), txpktsize);
1131 	wr32(wx, WX_TDM_PB_THRE(0), txpbthresh);
1132 }
1133 
1134 static void wx_configure_port(struct wx *wx)
1135 {
1136 	u32 value, i;
1137 
1138 	value = WX_CFG_PORT_CTL_D_VLAN | WX_CFG_PORT_CTL_QINQ;
1139 	wr32m(wx, WX_CFG_PORT_CTL,
1140 	      WX_CFG_PORT_CTL_D_VLAN |
1141 	      WX_CFG_PORT_CTL_QINQ,
1142 	      value);
1143 
1144 	wr32(wx, WX_CFG_TAG_TPID(0),
1145 	     ETH_P_8021Q | ETH_P_8021AD << 16);
1146 	wx->tpid[0] = ETH_P_8021Q;
1147 	wx->tpid[1] = ETH_P_8021AD;
1148 	for (i = 1; i < 4; i++)
1149 		wr32(wx, WX_CFG_TAG_TPID(i),
1150 		     ETH_P_8021Q | ETH_P_8021Q << 16);
1151 	for (i = 2; i < 8; i++)
1152 		wx->tpid[i] = ETH_P_8021Q;
1153 }
1154 
1155 /**
1156  *  wx_disable_sec_rx_path - Stops the receive data path
1157  *  @wx: pointer to private structure
1158  *
1159  *  Stops the receive data path and waits for the HW to internally empty
1160  *  the Rx security block
1161  **/
1162 static int wx_disable_sec_rx_path(struct wx *wx)
1163 {
1164 	u32 secrx;
1165 
1166 	wr32m(wx, WX_RSC_CTL,
1167 	      WX_RSC_CTL_RX_DIS, WX_RSC_CTL_RX_DIS);
1168 
1169 	return read_poll_timeout(rd32, secrx, secrx & WX_RSC_ST_RSEC_RDY,
1170 				 1000, 40000, false, wx, WX_RSC_ST);
1171 }
1172 
1173 /**
1174  *  wx_enable_sec_rx_path - Enables the receive data path
1175  *  @wx: pointer to private structure
1176  *
1177  *  Enables the receive data path.
1178  **/
1179 static void wx_enable_sec_rx_path(struct wx *wx)
1180 {
1181 	wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_RX_DIS, 0);
1182 	WX_WRITE_FLUSH(wx);
1183 }
1184 
1185 void wx_set_rx_mode(struct net_device *netdev)
1186 {
1187 	struct wx *wx = netdev_priv(netdev);
1188 	u32 fctrl, vmolr, vlnctrl;
1189 	int count;
1190 
1191 	/* Check for Promiscuous and All Multicast modes */
1192 	fctrl = rd32(wx, WX_PSR_CTL);
1193 	fctrl &= ~(WX_PSR_CTL_UPE | WX_PSR_CTL_MPE);
1194 	vmolr = rd32(wx, WX_PSR_VM_L2CTL(0));
1195 	vmolr &= ~(WX_PSR_VM_L2CTL_UPE |
1196 		   WX_PSR_VM_L2CTL_MPE |
1197 		   WX_PSR_VM_L2CTL_ROPE |
1198 		   WX_PSR_VM_L2CTL_ROMPE);
1199 	vlnctrl = rd32(wx, WX_PSR_VLAN_CTL);
1200 	vlnctrl &= ~(WX_PSR_VLAN_CTL_VFE | WX_PSR_VLAN_CTL_CFIEN);
1201 
1202 	/* set all bits that we expect to always be set */
1203 	fctrl |= WX_PSR_CTL_BAM | WX_PSR_CTL_MFE;
1204 	vmolr |= WX_PSR_VM_L2CTL_BAM |
1205 		 WX_PSR_VM_L2CTL_AUPE |
1206 		 WX_PSR_VM_L2CTL_VACC;
1207 	vlnctrl |= WX_PSR_VLAN_CTL_VFE;
1208 
1209 	wx->addr_ctrl.user_set_promisc = false;
1210 	if (netdev->flags & IFF_PROMISC) {
1211 		wx->addr_ctrl.user_set_promisc = true;
1212 		fctrl |= WX_PSR_CTL_UPE | WX_PSR_CTL_MPE;
1213 		/* pf don't want packets routing to vf, so clear UPE */
1214 		vmolr |= WX_PSR_VM_L2CTL_MPE;
1215 		vlnctrl &= ~WX_PSR_VLAN_CTL_VFE;
1216 	}
1217 
1218 	if (netdev->flags & IFF_ALLMULTI) {
1219 		fctrl |= WX_PSR_CTL_MPE;
1220 		vmolr |= WX_PSR_VM_L2CTL_MPE;
1221 	}
1222 
1223 	if (netdev->features & NETIF_F_RXALL) {
1224 		vmolr |= (WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_MPE);
1225 		vlnctrl &= ~WX_PSR_VLAN_CTL_VFE;
1226 		/* receive bad packets */
1227 		wr32m(wx, WX_RSC_CTL,
1228 		      WX_RSC_CTL_SAVE_MAC_ERR,
1229 		      WX_RSC_CTL_SAVE_MAC_ERR);
1230 	} else {
1231 		vmolr |= WX_PSR_VM_L2CTL_ROPE | WX_PSR_VM_L2CTL_ROMPE;
1232 	}
1233 
1234 	/* Write addresses to available RAR registers, if there is not
1235 	 * sufficient space to store all the addresses then enable
1236 	 * unicast promiscuous mode
1237 	 */
1238 	count = wx_write_uc_addr_list(netdev, 0);
1239 	if (count < 0) {
1240 		vmolr &= ~WX_PSR_VM_L2CTL_ROPE;
1241 		vmolr |= WX_PSR_VM_L2CTL_UPE;
1242 	}
1243 
1244 	/* Write addresses to the MTA, if the attempt fails
1245 	 * then we should just turn on promiscuous mode so
1246 	 * that we can at least receive multicast traffic
1247 	 */
1248 	count = wx_write_mc_addr_list(netdev);
1249 	if (count < 0) {
1250 		vmolr &= ~WX_PSR_VM_L2CTL_ROMPE;
1251 		vmolr |= WX_PSR_VM_L2CTL_MPE;
1252 	}
1253 
1254 	wr32(wx, WX_PSR_VLAN_CTL, vlnctrl);
1255 	wr32(wx, WX_PSR_CTL, fctrl);
1256 	wr32(wx, WX_PSR_VM_L2CTL(0), vmolr);
1257 }
1258 EXPORT_SYMBOL(wx_set_rx_mode);
1259 
1260 static void wx_set_rx_buffer_len(struct wx *wx)
1261 {
1262 	struct net_device *netdev = wx->netdev;
1263 	u32 mhadd, max_frame;
1264 
1265 	max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
1266 	/* adjust max frame to be at least the size of a standard frame */
1267 	if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
1268 		max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
1269 
1270 	mhadd = rd32(wx, WX_PSR_MAX_SZ);
1271 	if (max_frame != mhadd)
1272 		wr32(wx, WX_PSR_MAX_SZ, max_frame);
1273 }
1274 
1275 /**
1276  * wx_change_mtu - Change the Maximum Transfer Unit
1277  * @netdev: network interface device structure
1278  * @new_mtu: new value for maximum frame size
1279  *
1280  * Returns 0 on success, negative on failure
1281  **/
1282 int wx_change_mtu(struct net_device *netdev, int new_mtu)
1283 {
1284 	struct wx *wx = netdev_priv(netdev);
1285 
1286 	netdev->mtu = new_mtu;
1287 	wx_set_rx_buffer_len(wx);
1288 
1289 	return 0;
1290 }
1291 EXPORT_SYMBOL(wx_change_mtu);
1292 
1293 /* Disable the specified rx queue */
1294 void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring)
1295 {
1296 	u8 reg_idx = ring->reg_idx;
1297 	u32 rxdctl;
1298 	int ret;
1299 
1300 	/* write value back with RRCFG.EN bit cleared */
1301 	wr32m(wx, WX_PX_RR_CFG(reg_idx),
1302 	      WX_PX_RR_CFG_RR_EN, 0);
1303 
1304 	/* the hardware may take up to 100us to really disable the rx queue */
1305 	ret = read_poll_timeout(rd32, rxdctl, !(rxdctl & WX_PX_RR_CFG_RR_EN),
1306 				10, 100, true, wx, WX_PX_RR_CFG(reg_idx));
1307 
1308 	if (ret == -ETIMEDOUT) {
1309 		/* Just for information */
1310 		wx_err(wx,
1311 		       "RRCFG.EN on Rx queue %d not cleared within the polling period\n",
1312 		       reg_idx);
1313 	}
1314 }
1315 EXPORT_SYMBOL(wx_disable_rx_queue);
1316 
1317 static void wx_enable_rx_queue(struct wx *wx, struct wx_ring *ring)
1318 {
1319 	u8 reg_idx = ring->reg_idx;
1320 	u32 rxdctl;
1321 	int ret;
1322 
1323 	ret = read_poll_timeout(rd32, rxdctl, rxdctl & WX_PX_RR_CFG_RR_EN,
1324 				1000, 10000, true, wx, WX_PX_RR_CFG(reg_idx));
1325 
1326 	if (ret == -ETIMEDOUT) {
1327 		/* Just for information */
1328 		wx_err(wx,
1329 		       "RRCFG.EN on Rx queue %d not set within the polling period\n",
1330 		       reg_idx);
1331 	}
1332 }
1333 
1334 static void wx_configure_srrctl(struct wx *wx,
1335 				struct wx_ring *rx_ring)
1336 {
1337 	u16 reg_idx = rx_ring->reg_idx;
1338 	u32 srrctl;
1339 
1340 	srrctl = rd32(wx, WX_PX_RR_CFG(reg_idx));
1341 	srrctl &= ~(WX_PX_RR_CFG_RR_HDR_SZ |
1342 		    WX_PX_RR_CFG_RR_BUF_SZ |
1343 		    WX_PX_RR_CFG_SPLIT_MODE);
1344 	/* configure header buffer length, needed for RSC */
1345 	srrctl |= WX_RXBUFFER_256 << WX_PX_RR_CFG_BHDRSIZE_SHIFT;
1346 
1347 	/* configure the packet buffer length */
1348 	srrctl |= WX_RX_BUFSZ >> WX_PX_RR_CFG_BSIZEPKT_SHIFT;
1349 
1350 	wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl);
1351 }
1352 
1353 static void wx_configure_tx_ring(struct wx *wx,
1354 				 struct wx_ring *ring)
1355 {
1356 	u32 txdctl = WX_PX_TR_CFG_ENABLE;
1357 	u8 reg_idx = ring->reg_idx;
1358 	u64 tdba = ring->dma;
1359 	int ret;
1360 
1361 	/* disable queue to avoid issues while updating state */
1362 	wr32(wx, WX_PX_TR_CFG(reg_idx), WX_PX_TR_CFG_SWFLSH);
1363 	WX_WRITE_FLUSH(wx);
1364 
1365 	wr32(wx, WX_PX_TR_BAL(reg_idx), tdba & DMA_BIT_MASK(32));
1366 	wr32(wx, WX_PX_TR_BAH(reg_idx), upper_32_bits(tdba));
1367 
1368 	/* reset head and tail pointers */
1369 	wr32(wx, WX_PX_TR_RP(reg_idx), 0);
1370 	wr32(wx, WX_PX_TR_WP(reg_idx), 0);
1371 	ring->tail = wx->hw_addr + WX_PX_TR_WP(reg_idx);
1372 
1373 	if (ring->count < WX_MAX_TXD)
1374 		txdctl |= ring->count / 128 << WX_PX_TR_CFG_TR_SIZE_SHIFT;
1375 	txdctl |= 0x20 << WX_PX_TR_CFG_WTHRESH_SHIFT;
1376 
1377 	/* reinitialize tx_buffer_info */
1378 	memset(ring->tx_buffer_info, 0,
1379 	       sizeof(struct wx_tx_buffer) * ring->count);
1380 
1381 	/* enable queue */
1382 	wr32(wx, WX_PX_TR_CFG(reg_idx), txdctl);
1383 
1384 	/* poll to verify queue is enabled */
1385 	ret = read_poll_timeout(rd32, txdctl, txdctl & WX_PX_TR_CFG_ENABLE,
1386 				1000, 10000, true, wx, WX_PX_TR_CFG(reg_idx));
1387 	if (ret == -ETIMEDOUT)
1388 		wx_err(wx, "Could not enable Tx Queue %d\n", reg_idx);
1389 }
1390 
1391 static void wx_configure_rx_ring(struct wx *wx,
1392 				 struct wx_ring *ring)
1393 {
1394 	u16 reg_idx = ring->reg_idx;
1395 	union wx_rx_desc *rx_desc;
1396 	u64 rdba = ring->dma;
1397 	u32 rxdctl;
1398 
1399 	/* disable queue to avoid issues while updating state */
1400 	rxdctl = rd32(wx, WX_PX_RR_CFG(reg_idx));
1401 	wx_disable_rx_queue(wx, ring);
1402 
1403 	wr32(wx, WX_PX_RR_BAL(reg_idx), rdba & DMA_BIT_MASK(32));
1404 	wr32(wx, WX_PX_RR_BAH(reg_idx), upper_32_bits(rdba));
1405 
1406 	if (ring->count == WX_MAX_RXD)
1407 		rxdctl |= 0 << WX_PX_RR_CFG_RR_SIZE_SHIFT;
1408 	else
1409 		rxdctl |= (ring->count / 128) << WX_PX_RR_CFG_RR_SIZE_SHIFT;
1410 
1411 	rxdctl |= 0x1 << WX_PX_RR_CFG_RR_THER_SHIFT;
1412 	wr32(wx, WX_PX_RR_CFG(reg_idx), rxdctl);
1413 
1414 	/* reset head and tail pointers */
1415 	wr32(wx, WX_PX_RR_RP(reg_idx), 0);
1416 	wr32(wx, WX_PX_RR_WP(reg_idx), 0);
1417 	ring->tail = wx->hw_addr + WX_PX_RR_WP(reg_idx);
1418 
1419 	wx_configure_srrctl(wx, ring);
1420 
1421 	/* initialize rx_buffer_info */
1422 	memset(ring->rx_buffer_info, 0,
1423 	       sizeof(struct wx_rx_buffer) * ring->count);
1424 
1425 	/* initialize Rx descriptor 0 */
1426 	rx_desc = WX_RX_DESC(ring, 0);
1427 	rx_desc->wb.upper.length = 0;
1428 
1429 	/* enable receive descriptor ring */
1430 	wr32m(wx, WX_PX_RR_CFG(reg_idx),
1431 	      WX_PX_RR_CFG_RR_EN, WX_PX_RR_CFG_RR_EN);
1432 
1433 	wx_enable_rx_queue(wx, ring);
1434 	wx_alloc_rx_buffers(ring, wx_desc_unused(ring));
1435 }
1436 
1437 /**
1438  * wx_configure_tx - Configure Transmit Unit after Reset
1439  * @wx: pointer to private structure
1440  *
1441  * Configure the Tx unit of the MAC after a reset.
1442  **/
1443 static void wx_configure_tx(struct wx *wx)
1444 {
1445 	u32 i;
1446 
1447 	/* TDM_CTL.TE must be before Tx queues are enabled */
1448 	wr32m(wx, WX_TDM_CTL,
1449 	      WX_TDM_CTL_TE, WX_TDM_CTL_TE);
1450 
1451 	/* Setup the HW Tx Head and Tail descriptor pointers */
1452 	for (i = 0; i < wx->num_tx_queues; i++)
1453 		wx_configure_tx_ring(wx, wx->tx_ring[i]);
1454 
1455 	wr32m(wx, WX_TSC_BUF_AE, WX_TSC_BUF_AE_THR, 0x10);
1456 
1457 	if (wx->mac.type == wx_mac_em)
1458 		wr32m(wx, WX_TSC_CTL, WX_TSC_CTL_TX_DIS | WX_TSC_CTL_TSEC_DIS, 0x1);
1459 
1460 	/* enable mac transmitter */
1461 	wr32m(wx, WX_MAC_TX_CFG,
1462 	      WX_MAC_TX_CFG_TE, WX_MAC_TX_CFG_TE);
1463 }
1464 
1465 /**
1466  * wx_configure_rx - Configure Receive Unit after Reset
1467  * @wx: pointer to private structure
1468  *
1469  * Configure the Rx unit of the MAC after a reset.
1470  **/
1471 static void wx_configure_rx(struct wx *wx)
1472 {
1473 	u32 psrtype, i;
1474 	int ret;
1475 
1476 	wx_disable_rx(wx);
1477 
1478 	psrtype = WX_RDB_PL_CFG_L4HDR |
1479 		  WX_RDB_PL_CFG_L3HDR |
1480 		  WX_RDB_PL_CFG_L2HDR |
1481 		  WX_RDB_PL_CFG_TUN_TUNHDR |
1482 		  WX_RDB_PL_CFG_TUN_TUNHDR;
1483 	wr32(wx, WX_RDB_PL_CFG(0), psrtype);
1484 
1485 	/* enable hw crc stripping */
1486 	wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_CRC_STRIP, WX_RSC_CTL_CRC_STRIP);
1487 
1488 	if (wx->mac.type == wx_mac_sp) {
1489 		u32 psrctl;
1490 
1491 		/* RSC Setup */
1492 		psrctl = rd32(wx, WX_PSR_CTL);
1493 		psrctl |= WX_PSR_CTL_RSC_ACK; /* Disable RSC for ACK packets */
1494 		psrctl |= WX_PSR_CTL_RSC_DIS;
1495 		wr32(wx, WX_PSR_CTL, psrctl);
1496 	}
1497 
1498 	/* set_rx_buffer_len must be called before ring initialization */
1499 	wx_set_rx_buffer_len(wx);
1500 
1501 	/* Setup the HW Rx Head and Tail Descriptor Pointers and
1502 	 * the Base and Length of the Rx Descriptor Ring
1503 	 */
1504 	for (i = 0; i < wx->num_rx_queues; i++)
1505 		wx_configure_rx_ring(wx, wx->rx_ring[i]);
1506 
1507 	/* Enable all receives, disable security engine prior to block traffic */
1508 	ret = wx_disable_sec_rx_path(wx);
1509 	if (ret < 0)
1510 		wx_err(wx, "The register status is abnormal, please check device.");
1511 
1512 	wx_enable_rx(wx);
1513 	wx_enable_sec_rx_path(wx);
1514 }
1515 
1516 static void wx_configure_isb(struct wx *wx)
1517 {
1518 	/* set ISB Address */
1519 	wr32(wx, WX_PX_ISB_ADDR_L, wx->isb_dma & DMA_BIT_MASK(32));
1520 	if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
1521 		wr32(wx, WX_PX_ISB_ADDR_H, upper_32_bits(wx->isb_dma));
1522 }
1523 
1524 void wx_configure(struct wx *wx)
1525 {
1526 	wx_set_rxpba(wx);
1527 	wx_configure_port(wx);
1528 
1529 	wx_set_rx_mode(wx->netdev);
1530 
1531 	wx_enable_sec_rx_path(wx);
1532 
1533 	wx_configure_tx(wx);
1534 	wx_configure_rx(wx);
1535 	wx_configure_isb(wx);
1536 }
1537 EXPORT_SYMBOL(wx_configure);
1538 
1539 /**
1540  *  wx_disable_pcie_master - Disable PCI-express master access
1541  *  @wx: pointer to hardware structure
1542  *
1543  *  Disables PCI-Express master access and verifies there are no pending
1544  *  requests.
1545  **/
1546 int wx_disable_pcie_master(struct wx *wx)
1547 {
1548 	int status = 0;
1549 	u32 val;
1550 
1551 	/* Always set this bit to ensure any future transactions are blocked */
1552 	pci_clear_master(wx->pdev);
1553 
1554 	/* Exit if master requests are blocked */
1555 	if (!(rd32(wx, WX_PX_TRANSACTION_PENDING)))
1556 		return 0;
1557 
1558 	/* Poll for master request bit to clear */
1559 	status = read_poll_timeout(rd32, val, !val, 100, WX_PCI_MASTER_DISABLE_TIMEOUT,
1560 				   false, wx, WX_PX_TRANSACTION_PENDING);
1561 	if (status < 0)
1562 		wx_err(wx, "PCIe transaction pending bit did not clear.\n");
1563 
1564 	return status;
1565 }
1566 EXPORT_SYMBOL(wx_disable_pcie_master);
1567 
1568 /**
1569  *  wx_stop_adapter - Generic stop Tx/Rx units
1570  *  @wx: pointer to hardware structure
1571  *
1572  *  Sets the adapter_stopped flag within wx_hw struct. Clears interrupts,
1573  *  disables transmit and receive units. The adapter_stopped flag is used by
1574  *  the shared code and drivers to determine if the adapter is in a stopped
1575  *  state and should not touch the hardware.
1576  **/
1577 int wx_stop_adapter(struct wx *wx)
1578 {
1579 	u16 i;
1580 
1581 	/* Set the adapter_stopped flag so other driver functions stop touching
1582 	 * the hardware
1583 	 */
1584 	wx->adapter_stopped = true;
1585 
1586 	/* Disable the receive unit */
1587 	wx_disable_rx(wx);
1588 
1589 	/* Set interrupt mask to stop interrupts from being generated */
1590 	wx_intr_disable(wx, WX_INTR_ALL);
1591 
1592 	/* Clear any pending interrupts, flush previous writes */
1593 	wr32(wx, WX_PX_MISC_IC, 0xffffffff);
1594 	wr32(wx, WX_BME_CTL, 0x3);
1595 
1596 	/* Disable the transmit unit.  Each queue must be disabled. */
1597 	for (i = 0; i < wx->mac.max_tx_queues; i++) {
1598 		wr32m(wx, WX_PX_TR_CFG(i),
1599 		      WX_PX_TR_CFG_SWFLSH | WX_PX_TR_CFG_ENABLE,
1600 		      WX_PX_TR_CFG_SWFLSH);
1601 	}
1602 
1603 	/* Disable the receive unit by stopping each queue */
1604 	for (i = 0; i < wx->mac.max_rx_queues; i++) {
1605 		wr32m(wx, WX_PX_RR_CFG(i),
1606 		      WX_PX_RR_CFG_RR_EN, 0);
1607 	}
1608 
1609 	/* flush all queues disables */
1610 	WX_WRITE_FLUSH(wx);
1611 
1612 	/* Prevent the PCI-E bus from hanging by disabling PCI-E master
1613 	 * access and verify no pending requests
1614 	 */
1615 	return wx_disable_pcie_master(wx);
1616 }
1617 EXPORT_SYMBOL(wx_stop_adapter);
1618 
1619 void wx_reset_misc(struct wx *wx)
1620 {
1621 	int i;
1622 
1623 	/* receive packets that size > 2048 */
1624 	wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_JE, WX_MAC_RX_CFG_JE);
1625 
1626 	/* clear counters on read */
1627 	wr32m(wx, WX_MMC_CONTROL,
1628 	      WX_MMC_CONTROL_RSTONRD, WX_MMC_CONTROL_RSTONRD);
1629 
1630 	wr32m(wx, WX_MAC_RX_FLOW_CTRL,
1631 	      WX_MAC_RX_FLOW_CTRL_RFE, WX_MAC_RX_FLOW_CTRL_RFE);
1632 
1633 	wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
1634 
1635 	wr32m(wx, WX_MIS_RST_ST,
1636 	      WX_MIS_RST_ST_RST_INIT, 0x1E00);
1637 
1638 	/* errata 4: initialize mng flex tbl and wakeup flex tbl*/
1639 	wr32(wx, WX_PSR_MNG_FLEX_SEL, 0);
1640 	for (i = 0; i < 16; i++) {
1641 		wr32(wx, WX_PSR_MNG_FLEX_DW_L(i), 0);
1642 		wr32(wx, WX_PSR_MNG_FLEX_DW_H(i), 0);
1643 		wr32(wx, WX_PSR_MNG_FLEX_MSK(i), 0);
1644 	}
1645 	wr32(wx, WX_PSR_LAN_FLEX_SEL, 0);
1646 	for (i = 0; i < 16; i++) {
1647 		wr32(wx, WX_PSR_LAN_FLEX_DW_L(i), 0);
1648 		wr32(wx, WX_PSR_LAN_FLEX_DW_H(i), 0);
1649 		wr32(wx, WX_PSR_LAN_FLEX_MSK(i), 0);
1650 	}
1651 
1652 	/* set pause frame dst mac addr */
1653 	wr32(wx, WX_RDB_PFCMACDAL, 0xC2000001);
1654 	wr32(wx, WX_RDB_PFCMACDAH, 0x0180);
1655 }
1656 EXPORT_SYMBOL(wx_reset_misc);
1657 
1658 /**
1659  *  wx_get_pcie_msix_counts - Gets MSI-X vector count
1660  *  @wx: pointer to hardware structure
1661  *  @msix_count: number of MSI interrupts that can be obtained
1662  *  @max_msix_count: number of MSI interrupts that mac need
1663  *
1664  *  Read PCIe configuration space, and get the MSI-X vector count from
1665  *  the capabilities table.
1666  **/
1667 int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count)
1668 {
1669 	struct pci_dev *pdev = wx->pdev;
1670 	struct device *dev = &pdev->dev;
1671 	int pos;
1672 
1673 	*msix_count = 1;
1674 	pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
1675 	if (!pos) {
1676 		dev_err(dev, "Unable to find MSI-X Capabilities\n");
1677 		return -EINVAL;
1678 	}
1679 	pci_read_config_word(pdev,
1680 			     pos + PCI_MSIX_FLAGS,
1681 			     msix_count);
1682 	*msix_count &= WX_PCIE_MSIX_TBL_SZ_MASK;
1683 	/* MSI-X count is zero-based in HW */
1684 	*msix_count += 1;
1685 
1686 	if (*msix_count > max_msix_count)
1687 		*msix_count = max_msix_count;
1688 
1689 	return 0;
1690 }
1691 EXPORT_SYMBOL(wx_get_pcie_msix_counts);
1692 
1693 int wx_sw_init(struct wx *wx)
1694 {
1695 	struct pci_dev *pdev = wx->pdev;
1696 	u32 ssid = 0;
1697 	int err = 0;
1698 
1699 	wx->vendor_id = pdev->vendor;
1700 	wx->device_id = pdev->device;
1701 	wx->revision_id = pdev->revision;
1702 	wx->oem_svid = pdev->subsystem_vendor;
1703 	wx->oem_ssid = pdev->subsystem_device;
1704 	wx->bus.device = PCI_SLOT(pdev->devfn);
1705 	wx->bus.func = PCI_FUNC(pdev->devfn);
1706 
1707 	if (wx->oem_svid == PCI_VENDOR_ID_WANGXUN) {
1708 		wx->subsystem_vendor_id = pdev->subsystem_vendor;
1709 		wx->subsystem_device_id = pdev->subsystem_device;
1710 	} else {
1711 		err = wx_flash_read_dword(wx, 0xfffdc, &ssid);
1712 		if (!err)
1713 			wx->subsystem_device_id = swab16((u16)ssid);
1714 
1715 		return err;
1716 	}
1717 
1718 	wx->mac_table = kcalloc(wx->mac.num_rar_entries,
1719 				sizeof(struct wx_mac_addr),
1720 				GFP_KERNEL);
1721 	if (!wx->mac_table) {
1722 		wx_err(wx, "mac_table allocation failed\n");
1723 		return -ENOMEM;
1724 	}
1725 
1726 	return 0;
1727 }
1728 EXPORT_SYMBOL(wx_sw_init);
1729 
1730 MODULE_LICENSE("GPL");
1731