1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
4  * All rights reserved.
5  *
6  * Purpose: driver entry for initial, open, close, tx and rx.
7  *
8  * Author: Lyndon Chen
9  *
10  * Date: Jan 8, 2003
11  *
12  * Functions:
13  *
14  *   vt6655_probe - module initial (insmod) driver entry
15  *   vt6655_remove - module remove entry
16  *   device_free_info - device structure resource free function
17  *   device_print_info - print out resource
18  *   device_rx_srv - rx service function
19  *   device_alloc_rx_buf - rx buffer pre-allocated function
20  *   device_free_rx_buf - free rx buffer function
21  *   device_free_tx_buf - free tx buffer function
22  *   device_init_rd0_ring - initial rd dma0 ring
23  *   device_init_rd1_ring - initial rd dma1 ring
24  *   device_init_td0_ring - initial tx dma0 ring buffer
25  *   device_init_td1_ring - initial tx dma1 ring buffer
26  *   device_init_registers - initial MAC & BBP & RF internal registers.
27  *   device_init_rings - initial tx/rx ring buffer
28  *   device_free_rings - free all allocated ring buffer
29  *   device_tx_srv - tx interrupt service function
30  *
31  * Revision History:
32  */
33 
34 #include <linux/file.h>
35 #include "device.h"
36 #include "card.h"
37 #include "channel.h"
38 #include "baseband.h"
39 #include "mac.h"
40 #include "power.h"
41 #include "rxtx.h"
42 #include "dpc.h"
43 #include "rf.h"
44 #include <linux/delay.h>
45 #include <linux/kthread.h>
46 #include <linux/slab.h>
47 
48 /*---------------------  Static Definitions -------------------------*/
49 /*
50  * Define module options
51  */
52 MODULE_AUTHOR("VIA Networking Technologies, Inc., <lyndonchen@vntek.com.tw>");
53 MODULE_LICENSE("GPL");
54 MODULE_DESCRIPTION("VIA Networking Solomon-A/B/G Wireless LAN Adapter Driver");
55 
56 #define DEVICE_PARAM(N, D)
57 
58 #define RX_DESC_MIN0     16
59 #define RX_DESC_MAX0     128
60 #define RX_DESC_DEF0     32
61 DEVICE_PARAM(RxDescriptors0, "Number of receive descriptors0");
62 
63 #define RX_DESC_MIN1     16
64 #define RX_DESC_MAX1     128
65 #define RX_DESC_DEF1     32
66 DEVICE_PARAM(RxDescriptors1, "Number of receive descriptors1");
67 
68 #define TX_DESC_MIN0     16
69 #define TX_DESC_MAX0     128
70 #define TX_DESC_DEF0     32
71 DEVICE_PARAM(TxDescriptors0, "Number of transmit descriptors0");
72 
73 #define TX_DESC_MIN1     16
74 #define TX_DESC_MAX1     128
75 #define TX_DESC_DEF1     64
76 DEVICE_PARAM(TxDescriptors1, "Number of transmit descriptors1");
77 
78 #define INT_WORKS_DEF   20
79 #define INT_WORKS_MIN   10
80 #define INT_WORKS_MAX   64
81 
82 DEVICE_PARAM(int_works, "Number of packets per interrupt services");
83 
84 #define RTS_THRESH_DEF     2347
85 
86 #define FRAG_THRESH_DEF     2346
87 
88 #define SHORT_RETRY_MIN     0
89 #define SHORT_RETRY_MAX     31
90 #define SHORT_RETRY_DEF     8
91 
92 DEVICE_PARAM(ShortRetryLimit, "Short frame retry limits");
93 
94 #define LONG_RETRY_MIN     0
95 #define LONG_RETRY_MAX     15
96 #define LONG_RETRY_DEF     4
97 
98 DEVICE_PARAM(LongRetryLimit, "long frame retry limits");
99 
100 /* BasebandType[] baseband type selected
101  * 0: indicate 802.11a type
102  * 1: indicate 802.11b type
103  * 2: indicate 802.11g type
104  */
105 #define BBP_TYPE_MIN     0
106 #define BBP_TYPE_MAX     2
107 #define BBP_TYPE_DEF     2
108 
109 DEVICE_PARAM(BasebandType, "baseband type");
110 
111 /*
112  * Static vars definitions
113  */
114 static const struct pci_device_id vt6655_pci_id_table[] = {
115 	{ PCI_VDEVICE(VIA, 0x3253) },
116 	{ 0, }
117 };
118 
119 /*---------------------  Static Functions  --------------------------*/
120 
121 static int  vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent);
122 static void device_free_info(struct vnt_private *priv);
123 static void device_print_info(struct vnt_private *priv);
124 
125 static void vt6655_mac_write_bssid_addr(void __iomem *iobase, const u8 *mac_addr);
126 static void vt6655_mac_read_ether_addr(void __iomem *iobase, u8 *mac_addr);
127 
128 static int device_init_rd0_ring(struct vnt_private *priv);
129 static int device_init_rd1_ring(struct vnt_private *priv);
130 static int device_init_td0_ring(struct vnt_private *priv);
131 static int device_init_td1_ring(struct vnt_private *priv);
132 
133 static int  device_rx_srv(struct vnt_private *priv, unsigned int idx);
134 static int  device_tx_srv(struct vnt_private *priv, unsigned int idx);
135 static bool device_alloc_rx_buf(struct vnt_private *, struct vnt_rx_desc *);
136 static void device_free_rx_buf(struct vnt_private *priv,
137 			       struct vnt_rx_desc *rd);
138 static void device_init_registers(struct vnt_private *priv);
139 static void device_free_tx_buf(struct vnt_private *, struct vnt_tx_desc *);
140 static void device_free_td0_ring(struct vnt_private *priv);
141 static void device_free_td1_ring(struct vnt_private *priv);
142 static void device_free_rd0_ring(struct vnt_private *priv);
143 static void device_free_rd1_ring(struct vnt_private *priv);
144 static void device_free_rings(struct vnt_private *priv);
145 
146 /*---------------------  Export Variables  --------------------------*/
147 
148 /*---------------------  Export Functions  --------------------------*/
149 
150 static void vt6655_remove(struct pci_dev *pcid)
151 {
152 	struct vnt_private *priv = pci_get_drvdata(pcid);
153 
154 	if (!priv)
155 		return;
156 	device_free_info(priv);
157 }
158 
159 static void device_get_options(struct vnt_private *priv)
160 {
161 	struct vnt_options *opts = &priv->opts;
162 
163 	opts->rx_descs0 = RX_DESC_DEF0;
164 	opts->rx_descs1 = RX_DESC_DEF1;
165 	opts->tx_descs[0] = TX_DESC_DEF0;
166 	opts->tx_descs[1] = TX_DESC_DEF1;
167 	opts->int_works = INT_WORKS_DEF;
168 
169 	opts->short_retry = SHORT_RETRY_DEF;
170 	opts->long_retry = LONG_RETRY_DEF;
171 	opts->bbp_type = BBP_TYPE_DEF;
172 }
173 
174 static void
175 device_set_options(struct vnt_private *priv)
176 {
177 	priv->byShortRetryLimit = priv->opts.short_retry;
178 	priv->byLongRetryLimit = priv->opts.long_retry;
179 	priv->byBBType = priv->opts.bbp_type;
180 	priv->byPacketType = priv->byBBType;
181 	priv->byAutoFBCtrl = AUTO_FB_0;
182 	priv->bUpdateBBVGA = true;
183 	priv->preamble_type = 0;
184 
185 	pr_debug(" byShortRetryLimit= %d\n", (int)priv->byShortRetryLimit);
186 	pr_debug(" byLongRetryLimit= %d\n", (int)priv->byLongRetryLimit);
187 	pr_debug(" preamble_type= %d\n", (int)priv->preamble_type);
188 	pr_debug(" byShortPreamble= %d\n", (int)priv->byShortPreamble);
189 	pr_debug(" byBBType= %d\n", (int)priv->byBBType);
190 }
191 
192 static void vt6655_mac_write_bssid_addr(void __iomem *iobase, const u8 *mac_addr)
193 {
194 	iowrite8(1, iobase + MAC_REG_PAGE1SEL);
195 	for (int i = 0; i < 6; i++)
196 		iowrite8(mac_addr[i], iobase + MAC_REG_BSSID0 + i);
197 	iowrite8(0, iobase + MAC_REG_PAGE1SEL);
198 }
199 
200 static void vt6655_mac_read_ether_addr(void __iomem *iobase, u8 *mac_addr)
201 {
202 	iowrite8(1, iobase + MAC_REG_PAGE1SEL);
203 	for (int i = 0; i < 6; i++)
204 		mac_addr[i] = ioread8(iobase + MAC_REG_PAR0 + i);
205 	iowrite8(0, iobase + MAC_REG_PAGE1SEL);
206 }
207 
208 /*
209  * Initialisation of MAC & BBP registers
210  */
211 
212 static void device_init_registers(struct vnt_private *priv)
213 {
214 	unsigned long flags;
215 	unsigned int ii;
216 	unsigned char byValue;
217 	unsigned char byCCKPwrdBm = 0;
218 	unsigned char byOFDMPwrdBm = 0;
219 
220 	MACbShutdown(priv);
221 	bb_software_reset(priv);
222 
223 	/* Do MACbSoftwareReset in MACvInitialize */
224 	MACbSoftwareReset(priv);
225 
226 	priv->bAES = false;
227 
228 	/* Only used in 11g type, sync with ERP IE */
229 	priv->bProtectMode = false;
230 
231 	priv->bNonERPPresent = false;
232 	priv->bBarkerPreambleMd = false;
233 	priv->wCurrentRate = RATE_1M;
234 	priv->byTopOFDMBasicRate = RATE_24M;
235 	priv->byTopCCKBasicRate = RATE_1M;
236 
237 	/* init MAC */
238 	MACvInitialize(priv);
239 
240 	/* Get Local ID */
241 	priv->local_id = ioread8(priv->port_offset + MAC_REG_LOCALID);
242 
243 	spin_lock_irqsave(&priv->lock, flags);
244 
245 	SROMvReadAllContents(priv->port_offset, priv->abyEEPROM);
246 
247 	spin_unlock_irqrestore(&priv->lock, flags);
248 
249 	/* Get Channel range */
250 	priv->byMinChannel = 1;
251 	priv->byMaxChannel = CB_MAX_CHANNEL;
252 
253 	/* Get Antena */
254 	byValue = SROMbyReadEmbedded(priv->port_offset, EEP_OFS_ANTENNA);
255 	if (byValue & EEP_ANTINV)
256 		priv->bTxRxAntInv = true;
257 	else
258 		priv->bTxRxAntInv = false;
259 
260 	byValue &= (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN);
261 	/* if not set default is All */
262 	if (byValue == 0)
263 		byValue = (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN);
264 
265 	if (byValue == (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN)) {
266 		priv->byAntennaCount = 2;
267 		priv->byTxAntennaMode = ANT_B;
268 		priv->dwTxAntennaSel = 1;
269 		priv->dwRxAntennaSel = 1;
270 
271 		if (priv->bTxRxAntInv)
272 			priv->byRxAntennaMode = ANT_A;
273 		else
274 			priv->byRxAntennaMode = ANT_B;
275 	} else  {
276 		priv->byAntennaCount = 1;
277 		priv->dwTxAntennaSel = 0;
278 		priv->dwRxAntennaSel = 0;
279 
280 		if (byValue & EEP_ANTENNA_AUX) {
281 			priv->byTxAntennaMode = ANT_A;
282 
283 			if (priv->bTxRxAntInv)
284 				priv->byRxAntennaMode = ANT_B;
285 			else
286 				priv->byRxAntennaMode = ANT_A;
287 		} else {
288 			priv->byTxAntennaMode = ANT_B;
289 
290 			if (priv->bTxRxAntInv)
291 				priv->byRxAntennaMode = ANT_A;
292 			else
293 				priv->byRxAntennaMode = ANT_B;
294 		}
295 	}
296 
297 	/* Set initial antenna mode */
298 	bb_set_tx_antenna_mode(priv, priv->byTxAntennaMode);
299 	bb_set_rx_antenna_mode(priv, priv->byRxAntennaMode);
300 
301 	/* zonetype initial */
302 	priv->byOriginalZonetype = priv->abyEEPROM[EEP_OFS_ZONETYPE];
303 
304 	if (!priv->bZoneRegExist)
305 		priv->byZoneType = priv->abyEEPROM[EEP_OFS_ZONETYPE];
306 
307 	pr_debug("priv->byZoneType = %x\n", priv->byZoneType);
308 
309 	/* Init RF module */
310 	RFbInit(priv);
311 
312 	/* Get Desire Power Value */
313 	priv->byCurPwr = 0xFF;
314 	priv->byCCKPwr = SROMbyReadEmbedded(priv->port_offset, EEP_OFS_PWR_CCK);
315 	priv->byOFDMPwrG = SROMbyReadEmbedded(priv->port_offset,
316 					      EEP_OFS_PWR_OFDMG);
317 
318 	/* Load power Table */
319 	for (ii = 0; ii < CB_MAX_CHANNEL_24G; ii++) {
320 		priv->abyCCKPwrTbl[ii + 1] =
321 			SROMbyReadEmbedded(priv->port_offset,
322 					   (unsigned char)(ii + EEP_OFS_CCK_PWR_TBL));
323 		if (priv->abyCCKPwrTbl[ii + 1] == 0)
324 			priv->abyCCKPwrTbl[ii + 1] = priv->byCCKPwr;
325 
326 		priv->abyOFDMPwrTbl[ii + 1] =
327 			SROMbyReadEmbedded(priv->port_offset,
328 					   (unsigned char)(ii + EEP_OFS_OFDM_PWR_TBL));
329 		if (priv->abyOFDMPwrTbl[ii + 1] == 0)
330 			priv->abyOFDMPwrTbl[ii + 1] = priv->byOFDMPwrG;
331 
332 		priv->abyCCKDefaultPwr[ii + 1] = byCCKPwrdBm;
333 		priv->abyOFDMDefaultPwr[ii + 1] = byOFDMPwrdBm;
334 	}
335 
336 	/* recover 12,13 ,14channel for EUROPE by 11 channel */
337 	for (ii = 11; ii < 14; ii++) {
338 		priv->abyCCKPwrTbl[ii] = priv->abyCCKPwrTbl[10];
339 		priv->abyOFDMPwrTbl[ii] = priv->abyOFDMPwrTbl[10];
340 	}
341 
342 	/* Load OFDM A Power Table */
343 	for (ii = 0; ii < CB_MAX_CHANNEL_5G; ii++) {
344 		priv->abyOFDMPwrTbl[ii + CB_MAX_CHANNEL_24G + 1] =
345 			SROMbyReadEmbedded(priv->port_offset,
346 					   (unsigned char)(ii + EEP_OFS_OFDMA_PWR_TBL));
347 
348 		priv->abyOFDMDefaultPwr[ii + CB_MAX_CHANNEL_24G + 1] =
349 			SROMbyReadEmbedded(priv->port_offset,
350 					   (unsigned char)(ii + EEP_OFS_OFDMA_PWR_dBm));
351 	}
352 
353 	if (priv->local_id > REV_ID_VT3253_B1) {
354 		MACvSelectPage1(priv->port_offset);
355 
356 		iowrite8(MSRCTL1_TXPWR | MSRCTL1_CSAPAREN, priv->port_offset + MAC_REG_MSRCTL + 1);
357 
358 		MACvSelectPage0(priv->port_offset);
359 	}
360 
361 	/* use relative tx timeout and 802.11i D4 */
362 	vt6655_mac_word_reg_bits_on(priv->port_offset, MAC_REG_CFG,
363 				    (CFG_TKIPOPT | CFG_NOTXTIMEOUT));
364 
365 	/* set performance parameter by registry */
366 	MACvSetShortRetryLimit(priv, priv->byShortRetryLimit);
367 	MACvSetLongRetryLimit(priv, priv->byLongRetryLimit);
368 
369 	/* reset TSF counter */
370 	iowrite8(TFTCTL_TSFCNTRST, priv->port_offset + MAC_REG_TFTCTL);
371 	/* enable TSF counter */
372 	iowrite8(TFTCTL_TSFCNTREN, priv->port_offset + MAC_REG_TFTCTL);
373 
374 	/* initialize BBP registers */
375 	bb_vt3253_init(priv);
376 
377 	if (priv->bUpdateBBVGA) {
378 		priv->byBBVGACurrent = priv->abyBBVGA[0];
379 		priv->byBBVGANew = priv->byBBVGACurrent;
380 		bb_set_vga_gain_offset(priv, priv->abyBBVGA[0]);
381 	}
382 
383 	bb_set_rx_antenna_mode(priv, priv->byRxAntennaMode);
384 	bb_set_tx_antenna_mode(priv, priv->byTxAntennaMode);
385 
386 	/* Set BB and packet type at the same time. */
387 	/* Set Short Slot Time, xIFS, and RSPINF. */
388 	priv->wCurrentRate = RATE_54M;
389 
390 	priv->radio_off = false;
391 
392 	priv->byRadioCtl = SROMbyReadEmbedded(priv->port_offset,
393 					      EEP_OFS_RADIOCTL);
394 	priv->hw_radio_off = false;
395 
396 	if (priv->byRadioCtl & EEP_RADIOCTL_ENABLE) {
397 		/* Get GPIO */
398 		priv->byGPIO = ioread8(priv->port_offset + MAC_REG_GPIOCTL1);
399 
400 		if (((priv->byGPIO & GPIO0_DATA) &&
401 		     !(priv->byRadioCtl & EEP_RADIOCTL_INV)) ||
402 		     (!(priv->byGPIO & GPIO0_DATA) &&
403 		     (priv->byRadioCtl & EEP_RADIOCTL_INV)))
404 			priv->hw_radio_off = true;
405 	}
406 
407 	if (priv->hw_radio_off || priv->bRadioControlOff)
408 		CARDbRadioPowerOff(priv);
409 
410 	/* get Permanent network address */
411 	SROMvReadEtherAddress(priv->port_offset, priv->abyCurrentNetAddr);
412 	pr_debug("Network address = %pM\n", priv->abyCurrentNetAddr);
413 
414 	/* reset Tx pointer */
415 	CARDvSafeResetRx(priv);
416 	/* reset Rx pointer */
417 	CARDvSafeResetTx(priv);
418 
419 	if (priv->local_id <= REV_ID_VT3253_A1)
420 		vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_RCR, RCR_WPAERR);
421 
422 	/* Turn On Rx DMA */
423 	MACvReceive0(priv->port_offset);
424 	MACvReceive1(priv->port_offset);
425 
426 	/* start the adapter */
427 	iowrite8(HOSTCR_MACEN | HOSTCR_RXON | HOSTCR_TXON, priv->port_offset + MAC_REG_HOSTCR);
428 }
429 
430 static void device_print_info(struct vnt_private *priv)
431 {
432 	dev_info(&priv->pcid->dev, "MAC=%pM IO=0x%lx Mem=0x%lx IRQ=%d\n",
433 		 priv->abyCurrentNetAddr, (unsigned long)priv->ioaddr,
434 		 (unsigned long)priv->port_offset, priv->pcid->irq);
435 }
436 
437 static void device_free_info(struct vnt_private *priv)
438 {
439 	if (!priv)
440 		return;
441 
442 	if (priv->mac_hw)
443 		ieee80211_unregister_hw(priv->hw);
444 
445 	if (priv->port_offset)
446 		iounmap(priv->port_offset);
447 
448 	if (priv->pcid)
449 		pci_release_regions(priv->pcid);
450 
451 	if (priv->hw)
452 		ieee80211_free_hw(priv->hw);
453 }
454 
455 static bool device_init_rings(struct vnt_private *priv)
456 {
457 	void *vir_pool;
458 
459 	/*allocate all RD/TD rings a single pool*/
460 	vir_pool = dma_alloc_coherent(&priv->pcid->dev,
461 				      priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc) +
462 				      priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc) +
463 				      priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc) +
464 				      priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc),
465 				      &priv->pool_dma, GFP_ATOMIC);
466 	if (!vir_pool) {
467 		dev_err(&priv->pcid->dev, "allocate desc dma memory failed\n");
468 		return false;
469 	}
470 
471 	priv->aRD0Ring = vir_pool;
472 	priv->aRD1Ring = vir_pool +
473 		priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc);
474 
475 	priv->rd0_pool_dma = priv->pool_dma;
476 	priv->rd1_pool_dma = priv->rd0_pool_dma +
477 		priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc);
478 
479 	priv->tx0_bufs = dma_alloc_coherent(&priv->pcid->dev,
480 					    priv->opts.tx_descs[0] * PKT_BUF_SZ +
481 					    priv->opts.tx_descs[1] * PKT_BUF_SZ +
482 					    CB_BEACON_BUF_SIZE +
483 					    CB_MAX_BUF_SIZE,
484 					    &priv->tx_bufs_dma0, GFP_ATOMIC);
485 	if (!priv->tx0_bufs) {
486 		dev_err(&priv->pcid->dev, "allocate buf dma memory failed\n");
487 
488 		dma_free_coherent(&priv->pcid->dev,
489 				  priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc) +
490 				  priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc) +
491 				  priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc) +
492 				  priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc),
493 				  vir_pool, priv->pool_dma);
494 		return false;
495 	}
496 
497 	priv->td0_pool_dma = priv->rd1_pool_dma +
498 		priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc);
499 
500 	priv->td1_pool_dma = priv->td0_pool_dma +
501 		priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc);
502 
503 	/* vir_pool: pvoid type */
504 	priv->apTD0Rings = vir_pool
505 		+ priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc)
506 		+ priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc);
507 
508 	priv->apTD1Rings = vir_pool
509 		+ priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc)
510 		+ priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc)
511 		+ priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc);
512 
513 	priv->tx1_bufs = priv->tx0_bufs +
514 		priv->opts.tx_descs[0] * PKT_BUF_SZ;
515 
516 	priv->tx_beacon_bufs = priv->tx1_bufs +
517 		priv->opts.tx_descs[1] * PKT_BUF_SZ;
518 
519 	priv->pbyTmpBuff = priv->tx_beacon_bufs +
520 		CB_BEACON_BUF_SIZE;
521 
522 	priv->tx_bufs_dma1 = priv->tx_bufs_dma0 +
523 		priv->opts.tx_descs[0] * PKT_BUF_SZ;
524 
525 	priv->tx_beacon_dma = priv->tx_bufs_dma1 +
526 		priv->opts.tx_descs[1] * PKT_BUF_SZ;
527 
528 	return true;
529 }
530 
531 static void device_free_rings(struct vnt_private *priv)
532 {
533 	dma_free_coherent(&priv->pcid->dev,
534 			  priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc) +
535 			  priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc) +
536 			  priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc) +
537 			  priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc),
538 			  priv->aRD0Ring, priv->pool_dma);
539 
540 	if (priv->tx0_bufs)
541 		dma_free_coherent(&priv->pcid->dev,
542 				  priv->opts.tx_descs[0] * PKT_BUF_SZ +
543 				  priv->opts.tx_descs[1] * PKT_BUF_SZ +
544 				  CB_BEACON_BUF_SIZE +
545 				  CB_MAX_BUF_SIZE,
546 				  priv->tx0_bufs, priv->tx_bufs_dma0);
547 }
548 
549 static int device_init_rd0_ring(struct vnt_private *priv)
550 {
551 	int i;
552 	dma_addr_t      curr = priv->rd0_pool_dma;
553 	struct vnt_rx_desc *desc;
554 	int ret;
555 
556 	/* Init the RD0 ring entries */
557 	for (i = 0; i < priv->opts.rx_descs0;
558 	     i ++, curr += sizeof(struct vnt_rx_desc)) {
559 		desc = &priv->aRD0Ring[i];
560 		desc->rd_info = kzalloc(sizeof(*desc->rd_info), GFP_KERNEL);
561 		if (!desc->rd_info) {
562 			ret = -ENOMEM;
563 			goto err_free_desc;
564 		}
565 
566 		if (!device_alloc_rx_buf(priv, desc)) {
567 			dev_err(&priv->pcid->dev, "can not alloc rx bufs\n");
568 			ret = -ENOMEM;
569 			goto err_free_rd;
570 		}
571 
572 		desc->next = &priv->aRD0Ring[(i + 1) % priv->opts.rx_descs0];
573 		desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_rx_desc));
574 	}
575 
576 	if (i > 0)
577 		priv->aRD0Ring[i - 1].next_desc = cpu_to_le32(priv->rd0_pool_dma);
578 	priv->pCurrRD[0] = &priv->aRD0Ring[0];
579 
580 	return 0;
581 
582 err_free_rd:
583 	kfree(desc->rd_info);
584 
585 err_free_desc:
586 	while (--i) {
587 		desc = &priv->aRD0Ring[i];
588 		device_free_rx_buf(priv, desc);
589 		kfree(desc->rd_info);
590 	}
591 
592 	return ret;
593 }
594 
595 static int device_init_rd1_ring(struct vnt_private *priv)
596 {
597 	int i;
598 	dma_addr_t      curr = priv->rd1_pool_dma;
599 	struct vnt_rx_desc *desc;
600 	int ret;
601 
602 	/* Init the RD1 ring entries */
603 	for (i = 0; i < priv->opts.rx_descs1;
604 	     i ++, curr += sizeof(struct vnt_rx_desc)) {
605 		desc = &priv->aRD1Ring[i];
606 		desc->rd_info = kzalloc(sizeof(*desc->rd_info), GFP_KERNEL);
607 		if (!desc->rd_info) {
608 			ret = -ENOMEM;
609 			goto err_free_desc;
610 		}
611 
612 		if (!device_alloc_rx_buf(priv, desc)) {
613 			dev_err(&priv->pcid->dev, "can not alloc rx bufs\n");
614 			ret = -ENOMEM;
615 			goto err_free_rd;
616 		}
617 
618 		desc->next = &priv->aRD1Ring[(i + 1) % priv->opts.rx_descs1];
619 		desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_rx_desc));
620 	}
621 
622 	if (i > 0)
623 		priv->aRD1Ring[i - 1].next_desc = cpu_to_le32(priv->rd1_pool_dma);
624 	priv->pCurrRD[1] = &priv->aRD1Ring[0];
625 
626 	return 0;
627 
628 err_free_rd:
629 	kfree(desc->rd_info);
630 
631 err_free_desc:
632 	while (--i) {
633 		desc = &priv->aRD1Ring[i];
634 		device_free_rx_buf(priv, desc);
635 		kfree(desc->rd_info);
636 	}
637 
638 	return ret;
639 }
640 
641 static void device_free_rd0_ring(struct vnt_private *priv)
642 {
643 	int i;
644 
645 	for (i = 0; i < priv->opts.rx_descs0; i++) {
646 		struct vnt_rx_desc *desc = &priv->aRD0Ring[i];
647 
648 		device_free_rx_buf(priv, desc);
649 		kfree(desc->rd_info);
650 	}
651 }
652 
653 static void device_free_rd1_ring(struct vnt_private *priv)
654 {
655 	int i;
656 
657 	for (i = 0; i < priv->opts.rx_descs1; i++) {
658 		struct vnt_rx_desc *desc = &priv->aRD1Ring[i];
659 
660 		device_free_rx_buf(priv, desc);
661 		kfree(desc->rd_info);
662 	}
663 }
664 
665 static int device_init_td0_ring(struct vnt_private *priv)
666 {
667 	int i;
668 	dma_addr_t  curr;
669 	struct vnt_tx_desc *desc;
670 	int ret;
671 
672 	curr = priv->td0_pool_dma;
673 	for (i = 0; i < priv->opts.tx_descs[0];
674 	     i++, curr += sizeof(struct vnt_tx_desc)) {
675 		desc = &priv->apTD0Rings[i];
676 		desc->td_info = kzalloc(sizeof(*desc->td_info), GFP_KERNEL);
677 		if (!desc->td_info) {
678 			ret = -ENOMEM;
679 			goto err_free_desc;
680 		}
681 
682 		desc->td_info->buf = priv->tx0_bufs + i * PKT_BUF_SZ;
683 		desc->td_info->buf_dma = priv->tx_bufs_dma0 + i * PKT_BUF_SZ;
684 
685 		desc->next = &(priv->apTD0Rings[(i + 1) % priv->opts.tx_descs[0]]);
686 		desc->next_desc = cpu_to_le32(curr +
687 					      sizeof(struct vnt_tx_desc));
688 	}
689 
690 	if (i > 0)
691 		priv->apTD0Rings[i - 1].next_desc = cpu_to_le32(priv->td0_pool_dma);
692 	priv->apTailTD[0] = priv->apCurrTD[0] = &priv->apTD0Rings[0];
693 
694 	return 0;
695 
696 err_free_desc:
697 	while (--i) {
698 		desc = &priv->apTD0Rings[i];
699 		kfree(desc->td_info);
700 	}
701 
702 	return ret;
703 }
704 
705 static int device_init_td1_ring(struct vnt_private *priv)
706 {
707 	int i;
708 	dma_addr_t  curr;
709 	struct vnt_tx_desc *desc;
710 	int ret;
711 
712 	/* Init the TD ring entries */
713 	curr = priv->td1_pool_dma;
714 	for (i = 0; i < priv->opts.tx_descs[1];
715 	     i++, curr += sizeof(struct vnt_tx_desc)) {
716 		desc = &priv->apTD1Rings[i];
717 		desc->td_info = kzalloc(sizeof(*desc->td_info), GFP_KERNEL);
718 		if (!desc->td_info) {
719 			ret = -ENOMEM;
720 			goto err_free_desc;
721 		}
722 
723 		desc->td_info->buf = priv->tx1_bufs + i * PKT_BUF_SZ;
724 		desc->td_info->buf_dma = priv->tx_bufs_dma1 + i * PKT_BUF_SZ;
725 
726 		desc->next = &(priv->apTD1Rings[(i + 1) % priv->opts.tx_descs[1]]);
727 		desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_tx_desc));
728 	}
729 
730 	if (i > 0)
731 		priv->apTD1Rings[i - 1].next_desc = cpu_to_le32(priv->td1_pool_dma);
732 	priv->apTailTD[1] = priv->apCurrTD[1] = &priv->apTD1Rings[0];
733 
734 	return 0;
735 
736 err_free_desc:
737 	while (--i) {
738 		desc = &priv->apTD1Rings[i];
739 		kfree(desc->td_info);
740 	}
741 
742 	return ret;
743 }
744 
745 static void device_free_td0_ring(struct vnt_private *priv)
746 {
747 	int i;
748 
749 	for (i = 0; i < priv->opts.tx_descs[0]; i++) {
750 		struct vnt_tx_desc *desc = &priv->apTD0Rings[i];
751 		struct vnt_td_info *td_info = desc->td_info;
752 
753 		dev_kfree_skb(td_info->skb);
754 		kfree(desc->td_info);
755 	}
756 }
757 
758 static void device_free_td1_ring(struct vnt_private *priv)
759 {
760 	int i;
761 
762 	for (i = 0; i < priv->opts.tx_descs[1]; i++) {
763 		struct vnt_tx_desc *desc = &priv->apTD1Rings[i];
764 		struct vnt_td_info *td_info = desc->td_info;
765 
766 		dev_kfree_skb(td_info->skb);
767 		kfree(desc->td_info);
768 	}
769 }
770 
771 /*-----------------------------------------------------------------*/
772 
773 static int device_rx_srv(struct vnt_private *priv, unsigned int idx)
774 {
775 	struct vnt_rx_desc *rd;
776 	int works = 0;
777 
778 	for (rd = priv->pCurrRD[idx];
779 	     rd->rd0.owner == OWNED_BY_HOST;
780 	     rd = rd->next) {
781 		if (works++ > 15)
782 			break;
783 
784 		if (!rd->rd_info->skb)
785 			break;
786 
787 		if (vnt_receive_frame(priv, rd)) {
788 			if (!device_alloc_rx_buf(priv, rd)) {
789 				dev_err(&priv->pcid->dev,
790 					"can not allocate rx buf\n");
791 				break;
792 			}
793 		}
794 		rd->rd0.owner = OWNED_BY_NIC;
795 	}
796 
797 	priv->pCurrRD[idx] = rd;
798 
799 	return works;
800 }
801 
802 static bool device_alloc_rx_buf(struct vnt_private *priv,
803 				struct vnt_rx_desc *rd)
804 {
805 	struct vnt_rd_info *rd_info = rd->rd_info;
806 
807 	rd_info->skb = dev_alloc_skb((int)priv->rx_buf_sz);
808 	if (!rd_info->skb)
809 		return false;
810 
811 	rd_info->skb_dma =
812 		dma_map_single(&priv->pcid->dev,
813 			       skb_put(rd_info->skb, skb_tailroom(rd_info->skb)),
814 			       priv->rx_buf_sz, DMA_FROM_DEVICE);
815 	if (dma_mapping_error(&priv->pcid->dev, rd_info->skb_dma)) {
816 		dev_kfree_skb(rd_info->skb);
817 		rd_info->skb = NULL;
818 		return false;
819 	}
820 
821 	*((unsigned int *)&rd->rd0) = 0; /* FIX cast */
822 
823 	rd->rd0.res_count = cpu_to_le16(priv->rx_buf_sz);
824 	rd->rd0.owner = OWNED_BY_NIC;
825 	rd->rd1.req_count = cpu_to_le16(priv->rx_buf_sz);
826 	rd->buff_addr = cpu_to_le32(rd_info->skb_dma);
827 
828 	return true;
829 }
830 
831 static void device_free_rx_buf(struct vnt_private *priv,
832 			       struct vnt_rx_desc *rd)
833 {
834 	struct vnt_rd_info *rd_info = rd->rd_info;
835 
836 	dma_unmap_single(&priv->pcid->dev, rd_info->skb_dma,
837 			 priv->rx_buf_sz, DMA_FROM_DEVICE);
838 	dev_kfree_skb(rd_info->skb);
839 }
840 
841 static const u8 fallback_rate0[5][5] = {
842 	{RATE_18M, RATE_18M, RATE_12M, RATE_12M, RATE_12M},
843 	{RATE_24M, RATE_24M, RATE_18M, RATE_12M, RATE_12M},
844 	{RATE_36M, RATE_36M, RATE_24M, RATE_18M, RATE_18M},
845 	{RATE_48M, RATE_48M, RATE_36M, RATE_24M, RATE_24M},
846 	{RATE_54M, RATE_54M, RATE_48M, RATE_36M, RATE_36M}
847 };
848 
849 static const u8 fallback_rate1[5][5] = {
850 	{RATE_18M, RATE_18M, RATE_12M, RATE_6M, RATE_6M},
851 	{RATE_24M, RATE_24M, RATE_18M, RATE_6M, RATE_6M},
852 	{RATE_36M, RATE_36M, RATE_24M, RATE_12M, RATE_12M},
853 	{RATE_48M, RATE_48M, RATE_24M, RATE_12M, RATE_12M},
854 	{RATE_54M, RATE_54M, RATE_36M, RATE_18M, RATE_18M}
855 };
856 
857 static int vnt_int_report_rate(struct vnt_private *priv,
858 			       struct vnt_td_info *context, u8 tsr0, u8 tsr1)
859 {
860 	struct vnt_tx_fifo_head *fifo_head;
861 	struct ieee80211_tx_info *info;
862 	struct ieee80211_rate *rate;
863 	u16 fb_option;
864 	u8 tx_retry = (tsr0 & TSR0_NCR);
865 	s8 idx;
866 
867 	if (!context)
868 		return -ENOMEM;
869 
870 	if (!context->skb)
871 		return -EINVAL;
872 
873 	fifo_head = (struct vnt_tx_fifo_head *)context->buf;
874 	fb_option = (le16_to_cpu(fifo_head->fifo_ctl) &
875 			(FIFOCTL_AUTO_FB_0 | FIFOCTL_AUTO_FB_1));
876 
877 	info = IEEE80211_SKB_CB(context->skb);
878 	idx = info->control.rates[0].idx;
879 
880 	if (fb_option && !(tsr1 & TSR1_TERR)) {
881 		u8 tx_rate;
882 		u8 retry = tx_retry;
883 
884 		rate = ieee80211_get_tx_rate(priv->hw, info);
885 		tx_rate = rate->hw_value - RATE_18M;
886 
887 		if (retry > 4)
888 			retry = 4;
889 
890 		if (fb_option & FIFOCTL_AUTO_FB_0)
891 			tx_rate = fallback_rate0[tx_rate][retry];
892 		else if (fb_option & FIFOCTL_AUTO_FB_1)
893 			tx_rate = fallback_rate1[tx_rate][retry];
894 
895 		if (info->band == NL80211_BAND_5GHZ)
896 			idx = tx_rate - RATE_6M;
897 		else
898 			idx = tx_rate;
899 	}
900 
901 	ieee80211_tx_info_clear_status(info);
902 
903 	info->status.rates[0].count = tx_retry;
904 
905 	if (!(tsr1 & TSR1_TERR)) {
906 		info->status.rates[0].idx = idx;
907 
908 		if (info->flags & IEEE80211_TX_CTL_NO_ACK)
909 			info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
910 		else
911 			info->flags |= IEEE80211_TX_STAT_ACK;
912 	}
913 
914 	return 0;
915 }
916 
917 static int device_tx_srv(struct vnt_private *priv, unsigned int idx)
918 {
919 	struct vnt_tx_desc *desc;
920 	int                      works = 0;
921 	unsigned char byTsr0;
922 	unsigned char byTsr1;
923 
924 	for (desc = priv->apTailTD[idx]; priv->iTDUsed[idx] > 0; desc = desc->next) {
925 		if (desc->td0.owner == OWNED_BY_NIC)
926 			break;
927 		if (works++ > 15)
928 			break;
929 
930 		byTsr0 = desc->td0.tsr0;
931 		byTsr1 = desc->td0.tsr1;
932 
933 		/* Only the status of first TD in the chain is correct */
934 		if (desc->td1.tcr & TCR_STP) {
935 			if ((desc->td_info->flags & TD_FLAGS_NETIF_SKB) != 0) {
936 				if (!(byTsr1 & TSR1_TERR)) {
937 					if (byTsr0 != 0) {
938 						pr_debug(" Tx[%d] OK but has error. tsr1[%02X] tsr0[%02X]\n",
939 							 (int)idx, byTsr1,
940 							 byTsr0);
941 					}
942 				} else {
943 					pr_debug(" Tx[%d] dropped & tsr1[%02X] tsr0[%02X]\n",
944 						 (int)idx, byTsr1, byTsr0);
945 				}
946 			}
947 
948 			if (byTsr1 & TSR1_TERR) {
949 				if ((desc->td_info->flags & TD_FLAGS_PRIV_SKB) != 0) {
950 					pr_debug(" Tx[%d] fail has error. tsr1[%02X] tsr0[%02X]\n",
951 						 (int)idx, byTsr1, byTsr0);
952 				}
953 			}
954 
955 			vnt_int_report_rate(priv, desc->td_info, byTsr0, byTsr1);
956 
957 			device_free_tx_buf(priv, desc);
958 			priv->iTDUsed[idx]--;
959 		}
960 	}
961 
962 	priv->apTailTD[idx] = desc;
963 
964 	return works;
965 }
966 
967 static void device_error(struct vnt_private *priv, unsigned short status)
968 {
969 	if (status & ISR_FETALERR) {
970 		dev_err(&priv->pcid->dev, "Hardware fatal error\n");
971 
972 		MACbShutdown(priv);
973 		return;
974 	}
975 }
976 
977 static void device_free_tx_buf(struct vnt_private *priv,
978 			       struct vnt_tx_desc *desc)
979 {
980 	struct vnt_td_info *td_info = desc->td_info;
981 	struct sk_buff *skb = td_info->skb;
982 
983 	if (skb)
984 		ieee80211_tx_status_irqsafe(priv->hw, skb);
985 
986 	td_info->skb = NULL;
987 	td_info->flags = 0;
988 }
989 
990 static void vnt_check_bb_vga(struct vnt_private *priv)
991 {
992 	long dbm;
993 	int i;
994 
995 	if (!priv->bUpdateBBVGA)
996 		return;
997 
998 	if (priv->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
999 		return;
1000 
1001 	if (!(priv->vif->cfg.assoc && priv->current_rssi))
1002 		return;
1003 
1004 	RFvRSSITodBm(priv, (u8)priv->current_rssi, &dbm);
1005 
1006 	for (i = 0; i < BB_VGA_LEVEL; i++) {
1007 		if (dbm < priv->dbm_threshold[i]) {
1008 			priv->byBBVGANew = priv->abyBBVGA[i];
1009 			break;
1010 		}
1011 	}
1012 
1013 	if (priv->byBBVGANew == priv->byBBVGACurrent) {
1014 		priv->uBBVGADiffCount = 1;
1015 		return;
1016 	}
1017 
1018 	priv->uBBVGADiffCount++;
1019 
1020 	if (priv->uBBVGADiffCount == 1) {
1021 		/* first VGA diff gain */
1022 		bb_set_vga_gain_offset(priv, priv->byBBVGANew);
1023 
1024 		dev_dbg(&priv->pcid->dev,
1025 			"First RSSI[%d] NewGain[%d] OldGain[%d] Count[%d]\n",
1026 			(int)dbm, priv->byBBVGANew,
1027 			priv->byBBVGACurrent,
1028 			(int)priv->uBBVGADiffCount);
1029 	}
1030 
1031 	if (priv->uBBVGADiffCount >= BB_VGA_CHANGE_THRESHOLD) {
1032 		dev_dbg(&priv->pcid->dev,
1033 			"RSSI[%d] NewGain[%d] OldGain[%d] Count[%d]\n",
1034 			(int)dbm, priv->byBBVGANew,
1035 			priv->byBBVGACurrent,
1036 			(int)priv->uBBVGADiffCount);
1037 
1038 		bb_set_vga_gain_offset(priv, priv->byBBVGANew);
1039 	}
1040 }
1041 
1042 static void vnt_interrupt_process(struct vnt_private *priv)
1043 {
1044 	struct ieee80211_low_level_stats *low_stats = &priv->low_stats;
1045 	int             max_count = 0;
1046 	u32 mib_counter;
1047 	u32 isr;
1048 	unsigned long flags;
1049 
1050 	isr = ioread32(priv->port_offset + MAC_REG_ISR);
1051 
1052 	if (isr == 0)
1053 		return;
1054 
1055 	if (isr == 0xffffffff) {
1056 		pr_debug("isr = 0xffff\n");
1057 		return;
1058 	}
1059 
1060 	spin_lock_irqsave(&priv->lock, flags);
1061 
1062 	/* Read low level stats */
1063 	mib_counter = ioread32(priv->port_offset + MAC_REG_MIBCNTR);
1064 
1065 	low_stats->dot11RTSSuccessCount += mib_counter & 0xff;
1066 	low_stats->dot11RTSFailureCount += (mib_counter >> 8) & 0xff;
1067 	low_stats->dot11ACKFailureCount += (mib_counter >> 16) & 0xff;
1068 	low_stats->dot11FCSErrorCount += (mib_counter >> 24) & 0xff;
1069 
1070 	/*
1071 	 * TBD....
1072 	 * Must do this after doing rx/tx, cause ISR bit is slow
1073 	 * than RD/TD write back
1074 	 * update ISR counter
1075 	 */
1076 	while (isr && priv->vif) {
1077 		iowrite32(isr, priv->port_offset + MAC_REG_ISR);
1078 
1079 		if (isr & ISR_FETALERR) {
1080 			pr_debug(" ISR_FETALERR\n");
1081 			iowrite8(0, priv->port_offset + MAC_REG_SOFTPWRCTL);
1082 			iowrite16(SOFTPWRCTL_SWPECTI, priv->port_offset + MAC_REG_SOFTPWRCTL);
1083 			device_error(priv, isr);
1084 		}
1085 
1086 		if (isr & ISR_TBTT) {
1087 			if (priv->op_mode != NL80211_IFTYPE_ADHOC)
1088 				vnt_check_bb_vga(priv);
1089 
1090 			priv->bBeaconSent = false;
1091 			if (priv->bEnablePSMode)
1092 				PSbIsNextTBTTWakeUp((void *)priv);
1093 
1094 			if ((priv->op_mode == NL80211_IFTYPE_AP ||
1095 			    priv->op_mode == NL80211_IFTYPE_ADHOC) &&
1096 			    priv->vif->bss_conf.enable_beacon)
1097 				MACvOneShotTimer1MicroSec(priv,
1098 							  (priv->vif->bss_conf.beacon_int -
1099 							   MAKE_BEACON_RESERVED) << 10);
1100 
1101 			/* TODO: adhoc PS mode */
1102 		}
1103 
1104 		if (isr & ISR_BNTX) {
1105 			if (priv->op_mode == NL80211_IFTYPE_ADHOC) {
1106 				priv->bIsBeaconBufReadySet = false;
1107 				priv->cbBeaconBufReadySetCnt = 0;
1108 			}
1109 
1110 			priv->bBeaconSent = true;
1111 		}
1112 
1113 		if (isr & ISR_RXDMA0)
1114 			max_count += device_rx_srv(priv, TYPE_RXDMA0);
1115 
1116 		if (isr & ISR_RXDMA1)
1117 			max_count += device_rx_srv(priv, TYPE_RXDMA1);
1118 
1119 		if (isr & ISR_TXDMA0)
1120 			max_count += device_tx_srv(priv, TYPE_TXDMA0);
1121 
1122 		if (isr & ISR_AC0DMA)
1123 			max_count += device_tx_srv(priv, TYPE_AC0DMA);
1124 
1125 		if (isr & ISR_SOFTTIMER1) {
1126 			if (priv->vif->bss_conf.enable_beacon)
1127 				vnt_beacon_make(priv, priv->vif);
1128 		}
1129 
1130 		/* If both buffers available wake the queue */
1131 		if (AVAIL_TD(priv, TYPE_TXDMA0) &&
1132 		    AVAIL_TD(priv, TYPE_AC0DMA) &&
1133 		    ieee80211_queue_stopped(priv->hw, 0))
1134 			ieee80211_wake_queues(priv->hw);
1135 
1136 		isr = ioread32(priv->port_offset + MAC_REG_ISR);
1137 
1138 		MACvReceive0(priv->port_offset);
1139 		MACvReceive1(priv->port_offset);
1140 
1141 		if (max_count > priv->opts.int_works)
1142 			break;
1143 	}
1144 
1145 	spin_unlock_irqrestore(&priv->lock, flags);
1146 }
1147 
1148 static void vnt_interrupt_work(struct work_struct *work)
1149 {
1150 	struct vnt_private *priv =
1151 		container_of(work, struct vnt_private, interrupt_work);
1152 
1153 	if (priv->vif)
1154 		vnt_interrupt_process(priv);
1155 
1156 	iowrite32(IMR_MASK_VALUE, priv->port_offset + MAC_REG_IMR);
1157 }
1158 
1159 static irqreturn_t vnt_interrupt(int irq,  void *arg)
1160 {
1161 	struct vnt_private *priv = arg;
1162 
1163 	schedule_work(&priv->interrupt_work);
1164 
1165 	iowrite32(0, priv->port_offset + MAC_REG_IMR);
1166 
1167 	return IRQ_HANDLED;
1168 }
1169 
1170 static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
1171 {
1172 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1173 	struct vnt_tx_desc *head_td;
1174 	u32 dma_idx;
1175 	unsigned long flags;
1176 
1177 	spin_lock_irqsave(&priv->lock, flags);
1178 
1179 	if (ieee80211_is_data(hdr->frame_control))
1180 		dma_idx = TYPE_AC0DMA;
1181 	else
1182 		dma_idx = TYPE_TXDMA0;
1183 
1184 	if (AVAIL_TD(priv, dma_idx) < 1) {
1185 		spin_unlock_irqrestore(&priv->lock, flags);
1186 		ieee80211_stop_queues(priv->hw);
1187 		return -ENOMEM;
1188 	}
1189 
1190 	head_td = priv->apCurrTD[dma_idx];
1191 
1192 	head_td->td1.tcr = 0;
1193 
1194 	head_td->td_info->skb = skb;
1195 
1196 	if (dma_idx == TYPE_AC0DMA)
1197 		head_td->td_info->flags = TD_FLAGS_NETIF_SKB;
1198 
1199 	priv->apCurrTD[dma_idx] = head_td->next;
1200 
1201 	spin_unlock_irqrestore(&priv->lock, flags);
1202 
1203 	vnt_generate_fifo_header(priv, dma_idx, head_td, skb);
1204 
1205 	spin_lock_irqsave(&priv->lock, flags);
1206 
1207 	priv->bPWBitOn = false;
1208 
1209 	/* Set TSR1 & ReqCount in TxDescHead */
1210 	head_td->td1.tcr |= (TCR_STP | TCR_EDP | EDMSDU);
1211 	head_td->td1.req_count = cpu_to_le16(head_td->td_info->req_count);
1212 
1213 	head_td->buff_addr = cpu_to_le32(head_td->td_info->buf_dma);
1214 
1215 	/* Poll Transmit the adapter */
1216 	wmb();
1217 	head_td->td0.owner = OWNED_BY_NIC;
1218 	wmb(); /* second memory barrier */
1219 
1220 	if (head_td->td_info->flags & TD_FLAGS_NETIF_SKB)
1221 		MACvTransmitAC0(priv->port_offset);
1222 	else
1223 		MACvTransmit0(priv->port_offset);
1224 
1225 	priv->iTDUsed[dma_idx]++;
1226 
1227 	spin_unlock_irqrestore(&priv->lock, flags);
1228 
1229 	return 0;
1230 }
1231 
1232 static void vnt_tx_80211(struct ieee80211_hw *hw,
1233 			 struct ieee80211_tx_control *control,
1234 			 struct sk_buff *skb)
1235 {
1236 	struct vnt_private *priv = hw->priv;
1237 
1238 	if (vnt_tx_packet(priv, skb))
1239 		ieee80211_free_txskb(hw, skb);
1240 }
1241 
1242 static int vnt_start(struct ieee80211_hw *hw)
1243 {
1244 	struct vnt_private *priv = hw->priv;
1245 	int ret;
1246 
1247 	priv->rx_buf_sz = PKT_BUF_SZ;
1248 	if (!device_init_rings(priv))
1249 		return -ENOMEM;
1250 
1251 	ret = request_irq(priv->pcid->irq, vnt_interrupt,
1252 			  IRQF_SHARED, "vt6655", priv);
1253 	if (ret) {
1254 		dev_dbg(&priv->pcid->dev, "failed to start irq\n");
1255 		goto err_free_rings;
1256 	}
1257 
1258 	dev_dbg(&priv->pcid->dev, "call device init rd0 ring\n");
1259 	ret = device_init_rd0_ring(priv);
1260 	if (ret)
1261 		goto err_free_irq;
1262 	ret = device_init_rd1_ring(priv);
1263 	if (ret)
1264 		goto err_free_rd0_ring;
1265 	ret = device_init_td0_ring(priv);
1266 	if (ret)
1267 		goto err_free_rd1_ring;
1268 	ret = device_init_td1_ring(priv);
1269 	if (ret)
1270 		goto err_free_td0_ring;
1271 
1272 	device_init_registers(priv);
1273 
1274 	dev_dbg(&priv->pcid->dev, "enable MAC interrupt\n");
1275 	iowrite32(IMR_MASK_VALUE, priv->port_offset + MAC_REG_IMR);
1276 
1277 	ieee80211_wake_queues(hw);
1278 
1279 	return 0;
1280 
1281 err_free_td0_ring:
1282 	device_free_td0_ring(priv);
1283 err_free_rd1_ring:
1284 	device_free_rd1_ring(priv);
1285 err_free_rd0_ring:
1286 	device_free_rd0_ring(priv);
1287 err_free_irq:
1288 	free_irq(priv->pcid->irq, priv);
1289 err_free_rings:
1290 	device_free_rings(priv);
1291 	return ret;
1292 }
1293 
1294 static void vnt_stop(struct ieee80211_hw *hw)
1295 {
1296 	struct vnt_private *priv = hw->priv;
1297 
1298 	ieee80211_stop_queues(hw);
1299 
1300 	cancel_work_sync(&priv->interrupt_work);
1301 
1302 	MACbShutdown(priv);
1303 	MACbSoftwareReset(priv);
1304 	CARDbRadioPowerOff(priv);
1305 
1306 	device_free_td0_ring(priv);
1307 	device_free_td1_ring(priv);
1308 	device_free_rd0_ring(priv);
1309 	device_free_rd1_ring(priv);
1310 	device_free_rings(priv);
1311 
1312 	free_irq(priv->pcid->irq, priv);
1313 }
1314 
1315 static int vnt_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1316 {
1317 	struct vnt_private *priv = hw->priv;
1318 
1319 	priv->vif = vif;
1320 
1321 	switch (vif->type) {
1322 	case NL80211_IFTYPE_STATION:
1323 		break;
1324 	case NL80211_IFTYPE_ADHOC:
1325 		vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_RCR, RCR_UNICAST);
1326 
1327 		vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_ADHOC);
1328 
1329 		break;
1330 	case NL80211_IFTYPE_AP:
1331 		vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_RCR, RCR_UNICAST);
1332 
1333 		vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_AP);
1334 
1335 		break;
1336 	default:
1337 		return -EOPNOTSUPP;
1338 	}
1339 
1340 	priv->op_mode = vif->type;
1341 
1342 	return 0;
1343 }
1344 
1345 static void vnt_remove_interface(struct ieee80211_hw *hw,
1346 				 struct ieee80211_vif *vif)
1347 {
1348 	struct vnt_private *priv = hw->priv;
1349 
1350 	switch (vif->type) {
1351 	case NL80211_IFTYPE_STATION:
1352 		break;
1353 	case NL80211_IFTYPE_ADHOC:
1354 		vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_TCR, TCR_AUTOBCNTX);
1355 		vt6655_mac_reg_bits_off(priv->port_offset,
1356 					MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
1357 		vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_ADHOC);
1358 		break;
1359 	case NL80211_IFTYPE_AP:
1360 		vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_TCR, TCR_AUTOBCNTX);
1361 		vt6655_mac_reg_bits_off(priv->port_offset,
1362 					MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
1363 		vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_AP);
1364 		break;
1365 	default:
1366 		break;
1367 	}
1368 
1369 	priv->op_mode = NL80211_IFTYPE_UNSPECIFIED;
1370 }
1371 
1372 static int vnt_config(struct ieee80211_hw *hw, u32 changed)
1373 {
1374 	struct vnt_private *priv = hw->priv;
1375 	struct ieee80211_conf *conf = &hw->conf;
1376 	u8 bb_type;
1377 
1378 	if (changed & IEEE80211_CONF_CHANGE_PS) {
1379 		if (conf->flags & IEEE80211_CONF_PS)
1380 			PSvEnablePowerSaving(priv, conf->listen_interval);
1381 		else
1382 			PSvDisablePowerSaving(priv);
1383 	}
1384 
1385 	if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) ||
1386 	    (conf->flags & IEEE80211_CONF_OFFCHANNEL)) {
1387 		set_channel(priv, conf->chandef.chan);
1388 
1389 		if (conf->chandef.chan->band == NL80211_BAND_5GHZ)
1390 			bb_type = BB_TYPE_11A;
1391 		else
1392 			bb_type = BB_TYPE_11G;
1393 
1394 		if (priv->byBBType != bb_type) {
1395 			priv->byBBType = bb_type;
1396 
1397 			CARDbSetPhyParameter(priv, priv->byBBType);
1398 		}
1399 	}
1400 
1401 	if (changed & IEEE80211_CONF_CHANGE_POWER) {
1402 		if (priv->byBBType == BB_TYPE_11B)
1403 			priv->wCurrentRate = RATE_1M;
1404 		else
1405 			priv->wCurrentRate = RATE_54M;
1406 
1407 		RFbSetPower(priv, priv->wCurrentRate,
1408 			    conf->chandef.chan->hw_value);
1409 	}
1410 
1411 	return 0;
1412 }
1413 
1414 static void vnt_bss_info_changed(struct ieee80211_hw *hw,
1415 				 struct ieee80211_vif *vif,
1416 				 struct ieee80211_bss_conf *conf, u64 changed)
1417 {
1418 	struct vnt_private *priv = hw->priv;
1419 
1420 	priv->current_aid = vif->cfg.aid;
1421 
1422 	if (changed & BSS_CHANGED_BSSID && conf->bssid) {
1423 		unsigned long flags;
1424 
1425 		spin_lock_irqsave(&priv->lock, flags);
1426 
1427 		vt6655_mac_write_bssid_addr(priv->port_offset, conf->bssid);
1428 
1429 		spin_unlock_irqrestore(&priv->lock, flags);
1430 	}
1431 
1432 	if (changed & BSS_CHANGED_BASIC_RATES) {
1433 		priv->basic_rates = conf->basic_rates;
1434 
1435 		CARDvUpdateBasicTopRate(priv);
1436 
1437 		dev_dbg(&priv->pcid->dev,
1438 			"basic rates %x\n", conf->basic_rates);
1439 	}
1440 
1441 	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
1442 		if (conf->use_short_preamble) {
1443 			MACvEnableBarkerPreambleMd(priv->port_offset);
1444 			priv->preamble_type = true;
1445 		} else {
1446 			MACvDisableBarkerPreambleMd(priv->port_offset);
1447 			priv->preamble_type = false;
1448 		}
1449 	}
1450 
1451 	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
1452 		if (conf->use_cts_prot)
1453 			MACvEnableProtectMD(priv->port_offset);
1454 		else
1455 			MACvDisableProtectMD(priv->port_offset);
1456 	}
1457 
1458 	if (changed & BSS_CHANGED_ERP_SLOT) {
1459 		if (conf->use_short_slot)
1460 			priv->short_slot_time = true;
1461 		else
1462 			priv->short_slot_time = false;
1463 
1464 		CARDbSetPhyParameter(priv, priv->byBBType);
1465 		bb_set_vga_gain_offset(priv, priv->abyBBVGA[0]);
1466 	}
1467 
1468 	if (changed & BSS_CHANGED_TXPOWER)
1469 		RFbSetPower(priv, priv->wCurrentRate,
1470 			    conf->chandef.chan->hw_value);
1471 
1472 	if (changed & BSS_CHANGED_BEACON_ENABLED) {
1473 		dev_dbg(&priv->pcid->dev,
1474 			"Beacon enable %d\n", conf->enable_beacon);
1475 
1476 		if (conf->enable_beacon) {
1477 			vnt_beacon_enable(priv, vif, conf);
1478 
1479 			vt6655_mac_reg_bits_on(priv->port_offset, MAC_REG_TCR, TCR_AUTOBCNTX);
1480 		} else {
1481 			vt6655_mac_reg_bits_off(priv->port_offset, MAC_REG_TCR,
1482 						TCR_AUTOBCNTX);
1483 		}
1484 	}
1485 
1486 	if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INFO) &&
1487 	    priv->op_mode != NL80211_IFTYPE_AP) {
1488 		if (vif->cfg.assoc && conf->beacon_rate) {
1489 			CARDbUpdateTSF(priv, conf->beacon_rate->hw_value,
1490 				       conf->sync_tsf);
1491 
1492 			CARDbSetBeaconPeriod(priv, conf->beacon_int);
1493 
1494 			CARDvSetFirstNextTBTT(priv, conf->beacon_int);
1495 		} else {
1496 			iowrite8(TFTCTL_TSFCNTRST, priv->port_offset + MAC_REG_TFTCTL);
1497 			iowrite8(TFTCTL_TSFCNTREN, priv->port_offset + MAC_REG_TFTCTL);
1498 		}
1499 	}
1500 }
1501 
1502 static u64 vnt_prepare_multicast(struct ieee80211_hw *hw,
1503 				 struct netdev_hw_addr_list *mc_list)
1504 {
1505 	struct vnt_private *priv = hw->priv;
1506 	struct netdev_hw_addr *ha;
1507 	u64 mc_filter = 0;
1508 	u32 bit_nr = 0;
1509 
1510 	netdev_hw_addr_list_for_each(ha, mc_list) {
1511 		bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
1512 
1513 		mc_filter |= 1ULL << (bit_nr & 0x3f);
1514 	}
1515 
1516 	priv->mc_list_count = mc_list->count;
1517 
1518 	return mc_filter;
1519 }
1520 
1521 static void vnt_configure(struct ieee80211_hw *hw,
1522 			  unsigned int changed_flags,
1523 			  unsigned int *total_flags, u64 multicast)
1524 {
1525 	struct vnt_private *priv = hw->priv;
1526 	u8 rx_mode = 0;
1527 
1528 	*total_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC;
1529 
1530 	rx_mode = ioread8(priv->port_offset + MAC_REG_RCR);
1531 
1532 	dev_dbg(&priv->pcid->dev, "rx mode in = %x\n", rx_mode);
1533 
1534 	if (changed_flags & FIF_ALLMULTI) {
1535 		if (*total_flags & FIF_ALLMULTI) {
1536 			unsigned long flags;
1537 
1538 			spin_lock_irqsave(&priv->lock, flags);
1539 
1540 			if (priv->mc_list_count > 2) {
1541 				MACvSelectPage1(priv->port_offset);
1542 
1543 				iowrite32(0xffffffff, priv->port_offset + MAC_REG_MAR0);
1544 				iowrite32(0xffffffff, priv->port_offset + MAC_REG_MAR0 + 4);
1545 
1546 				MACvSelectPage0(priv->port_offset);
1547 			} else {
1548 				MACvSelectPage1(priv->port_offset);
1549 
1550 				multicast =  le64_to_cpu(multicast);
1551 				iowrite32((u32)multicast, priv->port_offset +  MAC_REG_MAR0);
1552 				iowrite32((u32)(multicast >> 32),
1553 					  priv->port_offset + MAC_REG_MAR0 + 4);
1554 
1555 				MACvSelectPage0(priv->port_offset);
1556 			}
1557 
1558 			spin_unlock_irqrestore(&priv->lock, flags);
1559 
1560 			rx_mode |= RCR_MULTICAST | RCR_BROADCAST;
1561 		} else {
1562 			rx_mode &= ~(RCR_MULTICAST | RCR_BROADCAST);
1563 		}
1564 	}
1565 
1566 	if (changed_flags & (FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC)) {
1567 		rx_mode |= RCR_MULTICAST | RCR_BROADCAST;
1568 
1569 		if (*total_flags & (FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC))
1570 			rx_mode &= ~RCR_BSSID;
1571 		else
1572 			rx_mode |= RCR_BSSID;
1573 	}
1574 
1575 	iowrite8(rx_mode, priv->port_offset + MAC_REG_RCR);
1576 
1577 	dev_dbg(&priv->pcid->dev, "rx mode out= %x\n", rx_mode);
1578 }
1579 
1580 static int vnt_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1581 		       struct ieee80211_vif *vif, struct ieee80211_sta *sta,
1582 		       struct ieee80211_key_conf *key)
1583 {
1584 	struct vnt_private *priv = hw->priv;
1585 
1586 	switch (cmd) {
1587 	case SET_KEY:
1588 		if (vnt_set_keys(hw, sta, vif, key))
1589 			return -EOPNOTSUPP;
1590 		break;
1591 	case DISABLE_KEY:
1592 		if (test_bit(key->hw_key_idx, &priv->key_entry_inuse))
1593 			clear_bit(key->hw_key_idx, &priv->key_entry_inuse);
1594 		break;
1595 	default:
1596 		break;
1597 	}
1598 
1599 	return 0;
1600 }
1601 
1602 static int vnt_get_stats(struct ieee80211_hw *hw,
1603 			 struct ieee80211_low_level_stats *stats)
1604 {
1605 	struct vnt_private *priv = hw->priv;
1606 
1607 	memcpy(stats, &priv->low_stats, sizeof(*stats));
1608 
1609 	return 0;
1610 }
1611 
1612 static u64 vnt_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1613 {
1614 	struct vnt_private *priv = hw->priv;
1615 	u64 tsf;
1616 
1617 	tsf = vt6655_get_current_tsf(priv);
1618 
1619 	return tsf;
1620 }
1621 
1622 static void vnt_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1623 			u64 tsf)
1624 {
1625 	struct vnt_private *priv = hw->priv;
1626 
1627 	CARDvUpdateNextTBTT(priv, tsf, vif->bss_conf.beacon_int);
1628 }
1629 
1630 static void vnt_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1631 {
1632 	struct vnt_private *priv = hw->priv;
1633 
1634 	/* reset TSF counter */
1635 	iowrite8(TFTCTL_TSFCNTRST, priv->port_offset + MAC_REG_TFTCTL);
1636 }
1637 
1638 static const struct ieee80211_ops vnt_mac_ops = {
1639 	.tx			= vnt_tx_80211,
1640 	.start			= vnt_start,
1641 	.stop			= vnt_stop,
1642 	.add_interface		= vnt_add_interface,
1643 	.remove_interface	= vnt_remove_interface,
1644 	.config			= vnt_config,
1645 	.bss_info_changed	= vnt_bss_info_changed,
1646 	.prepare_multicast	= vnt_prepare_multicast,
1647 	.configure_filter	= vnt_configure,
1648 	.set_key		= vnt_set_key,
1649 	.get_stats		= vnt_get_stats,
1650 	.get_tsf		= vnt_get_tsf,
1651 	.set_tsf		= vnt_set_tsf,
1652 	.reset_tsf		= vnt_reset_tsf,
1653 };
1654 
1655 static int vnt_init(struct vnt_private *priv)
1656 {
1657 	SET_IEEE80211_PERM_ADDR(priv->hw, priv->abyCurrentNetAddr);
1658 
1659 	vnt_init_bands(priv);
1660 
1661 	if (ieee80211_register_hw(priv->hw))
1662 		return -ENODEV;
1663 
1664 	priv->mac_hw = true;
1665 
1666 	CARDbRadioPowerOff(priv);
1667 
1668 	return 0;
1669 }
1670 
1671 static int
1672 vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent)
1673 {
1674 	struct vnt_private *priv;
1675 	struct ieee80211_hw *hw;
1676 	struct wiphy *wiphy;
1677 	int         rc;
1678 
1679 	dev_notice(&pcid->dev,
1680 		   "%s Ver. %s\n", DEVICE_FULL_DRV_NAM, DEVICE_VERSION);
1681 
1682 	dev_notice(&pcid->dev,
1683 		   "Copyright (c) 2003 VIA Networking Technologies, Inc.\n");
1684 
1685 	hw = ieee80211_alloc_hw(sizeof(*priv), &vnt_mac_ops);
1686 	if (!hw) {
1687 		dev_err(&pcid->dev, "could not register ieee80211_hw\n");
1688 		return -ENOMEM;
1689 	}
1690 
1691 	priv = hw->priv;
1692 	priv->pcid = pcid;
1693 
1694 	spin_lock_init(&priv->lock);
1695 
1696 	priv->hw = hw;
1697 
1698 	SET_IEEE80211_DEV(priv->hw, &pcid->dev);
1699 
1700 	if (pci_enable_device(pcid)) {
1701 		device_free_info(priv);
1702 		return -ENODEV;
1703 	}
1704 
1705 	dev_dbg(&pcid->dev,
1706 		"Before get pci_info memaddr is %x\n", priv->memaddr);
1707 
1708 	pci_set_master(pcid);
1709 
1710 	priv->memaddr = pci_resource_start(pcid, 0);
1711 	priv->ioaddr = pci_resource_start(pcid, 1);
1712 	priv->port_offset = ioremap(priv->memaddr & PCI_BASE_ADDRESS_MEM_MASK,
1713 				   256);
1714 	if (!priv->port_offset) {
1715 		dev_err(&pcid->dev, ": Failed to IO remapping ..\n");
1716 		device_free_info(priv);
1717 		return -ENODEV;
1718 	}
1719 
1720 	rc = pci_request_regions(pcid, DEVICE_NAME);
1721 	if (rc) {
1722 		dev_err(&pcid->dev, ": Failed to find PCI device\n");
1723 		device_free_info(priv);
1724 		return -ENODEV;
1725 	}
1726 
1727 	if (dma_set_mask(&pcid->dev, DMA_BIT_MASK(32))) {
1728 		dev_err(&pcid->dev, ": Failed to set dma 32 bit mask\n");
1729 		device_free_info(priv);
1730 		return -ENODEV;
1731 	}
1732 
1733 	INIT_WORK(&priv->interrupt_work, vnt_interrupt_work);
1734 
1735 	/* do reset */
1736 	if (!MACbSoftwareReset(priv)) {
1737 		dev_err(&pcid->dev, ": Failed to access MAC hardware..\n");
1738 		device_free_info(priv);
1739 		return -ENODEV;
1740 	}
1741 	/* initial to reload eeprom */
1742 	MACvInitialize(priv);
1743 	vt6655_mac_read_ether_addr(priv->port_offset, priv->abyCurrentNetAddr);
1744 
1745 	/* Get RFType */
1746 	priv->byRFType = SROMbyReadEmbedded(priv->port_offset, EEP_OFS_RFTYPE);
1747 	priv->byRFType &= RF_MASK;
1748 
1749 	dev_dbg(&pcid->dev, "RF Type = %x\n", priv->byRFType);
1750 
1751 	device_get_options(priv);
1752 	device_set_options(priv);
1753 
1754 	wiphy = priv->hw->wiphy;
1755 
1756 	wiphy->frag_threshold = FRAG_THRESH_DEF;
1757 	wiphy->rts_threshold = RTS_THRESH_DEF;
1758 	wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
1759 		BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP);
1760 
1761 	ieee80211_hw_set(priv->hw, TIMING_BEACON_ONLY);
1762 	ieee80211_hw_set(priv->hw, SIGNAL_DBM);
1763 	ieee80211_hw_set(priv->hw, RX_INCLUDES_FCS);
1764 	ieee80211_hw_set(priv->hw, REPORTS_TX_ACK_STATUS);
1765 	ieee80211_hw_set(priv->hw, SUPPORTS_PS);
1766 
1767 	priv->hw->max_signal = 100;
1768 
1769 	if (vnt_init(priv)) {
1770 		device_free_info(priv);
1771 		return -ENODEV;
1772 	}
1773 
1774 	device_print_info(priv);
1775 	pci_set_drvdata(pcid, priv);
1776 
1777 	return 0;
1778 }
1779 
1780 /*------------------------------------------------------------------*/
1781 
1782 static int __maybe_unused vt6655_suspend(struct device *dev_d)
1783 {
1784 	struct vnt_private *priv = dev_get_drvdata(dev_d);
1785 	unsigned long flags;
1786 
1787 	spin_lock_irqsave(&priv->lock, flags);
1788 
1789 	MACbShutdown(priv);
1790 
1791 	spin_unlock_irqrestore(&priv->lock, flags);
1792 
1793 	return 0;
1794 }
1795 
1796 static int __maybe_unused vt6655_resume(struct device *dev_d)
1797 {
1798 	device_wakeup_disable(dev_d);
1799 
1800 	return 0;
1801 }
1802 
1803 MODULE_DEVICE_TABLE(pci, vt6655_pci_id_table);
1804 
1805 static SIMPLE_DEV_PM_OPS(vt6655_pm_ops, vt6655_suspend, vt6655_resume);
1806 
1807 static struct pci_driver device_driver = {
1808 	.name = DEVICE_NAME,
1809 	.id_table = vt6655_pci_id_table,
1810 	.probe = vt6655_probe,
1811 	.remove = vt6655_remove,
1812 	.driver.pm = &vt6655_pm_ops,
1813 };
1814 
1815 module_pci_driver(device_driver);
1816