1 /******************************************************************************
2  *
3  * Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of version 2 of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * The full GNU General Public License is included in this distribution in the
15  * file called LICENSE.
16  *
17  * Contact Information:
18  * James P. Ketrenos <ipw2100-admin@linux.intel.com>
19  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20  *
21  *****************************************************************************
22  *
23  * Few modifications for Realtek's Wi-Fi drivers by
24  * Andrea Merello <andrea.merello@gmail.com>
25  *
26  * A special thanks goes to Realtek for their support !
27  *
28  *****************************************************************************/
29 
30 #include <linux/compiler.h>
31 #include <linux/errno.h>
32 #include <linux/if_arp.h>
33 #include <linux/in6.h>
34 #include <linux/in.h>
35 #include <linux/ip.h>
36 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/netdevice.h>
39 #include <linux/pci.h>
40 #include <linux/proc_fs.h>
41 #include <linux/skbuff.h>
42 #include <linux/slab.h>
43 #include <linux/tcp.h>
44 #include <linux/types.h>
45 #include <linux/wireless.h>
46 #include <linux/etherdevice.h>
47 #include <linux/uaccess.h>
48 #include <linux/if_vlan.h>
49 
50 #include "rtllib.h"
51 
52 /* 802.11 Data Frame
53  *
54  *
55  * 802.11 frame_control for data frames - 2 bytes
56  *      ,--------------------------------------------------------------------.
57  * bits | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |  9 |  a |  b  |  c  |  d  | e  |
58  *      |---|---|---|---|---|---|---|---|---|----|----|-----|-----|-----|----|
59  * val  | 0 | 0 | 0 | 1 | x | 0 | 0 | 0 | 1 |  0 |  x |  x  |  x  |  x  | x  |
60  *      |---|---|---|---|---|---|---|---|---|----|----|-----|-----|-----|----|
61  * desc |  ver  | type  |  ^-subtype-^  |to |from|more|retry| pwr |more |wep |
62  *      |       |       | x=0 data      |DS | DS |frag|     | mgm |data |    |
63  *      |       |       | x=1 data+ack  |   |    |    |     |     |     |    |
64  *      '--------------------------------------------------------------------'
65  *                                           /\
66  *                                           |
67  * 802.11 Data Frame                         |
68  *          ,--------- 'ctrl' expands to >---'
69  *          |
70  *       ,--'---,-------------------------------------------------------------.
71  * Bytes |  2   |  2   |    6    |    6    |    6    |  2   | 0..2312 |   4  |
72  *       |------|------|---------|---------|---------|------|---------|------|
73  * Desc. | ctrl | dura |  DA/RA  |   TA    |    SA   | Sequ |  Frame  |  fcs |
74  *       |      | tion | (BSSID) |         |         | ence |  data   |      |
75  *       `--------------------------------------------------|         |------'
76  * Total: 28 non-data bytes                                 `----.----'
77  *                                                               |
78  *        .- 'Frame data' expands to <---------------------------'
79  *        |
80  *        V
81  *       ,---------------------------------------------------.
82  * Bytes |  1   |  1   |    1    |    3     |  2   |  0-2304 |
83  *       |------|------|---------|----------|------|---------|
84  * Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP      |
85  *       | DSAP | SSAP |         |          |      | Packet  |
86  *       | 0xAA | 0xAA |0x03 (UI)|0x00-00-F8|      |         |
87  *       `-----------------------------------------|         |
88  * Total: 8 non-data bytes                         `----.----'
89  *                                                      |
90  *        .- 'IP Packet' expands, if WEP enabled, to <--'
91  *        |
92  *        V
93  *       ,-----------------------.
94  * Bytes |  4  |   0-2296  |  4  |
95  *       |-----|-----------|-----|
96  * Desc. | IV  | Encrypted | ICV |
97  *       |     | IP Packet |     |
98  *       `-----------------------'
99  * Total: 8 non-data bytes
100  *
101  *
102  * 802.3 Ethernet Data Frame
103  *
104  *       ,-----------------------------------------.
105  * Bytes |   6   |   6   |  2   |  Variable |   4  |
106  *       |-------|-------|------|-----------|------|
107  * Desc. | Dest. | Source| Type | IP Packet |  fcs |
108  *       |  MAC  |  MAC  |      |	   |      |
109  *       `-----------------------------------------'
110  * Total: 18 non-data bytes
111  *
112  * In the event that fragmentation is required, the incoming payload is split
113  * into N parts of size ieee->fts.  The first fragment contains the SNAP header
114  * and the remaining packets are just data.
115  *
116  * If encryption is enabled, each fragment payload size is reduced by enough
117  * space to add the prefix and postfix (IV and ICV totalling 8 bytes in
118  * the case of WEP) So if you have 1500 bytes of payload with ieee->fts set to
119  * 500 without encryption it will take 3 frames.  With WEP it will take 4 frames
120  * as the payload of each frame is reduced to 492 bytes.
121  *
122  * SKB visualization
123  *
124  * ,- skb->data
125  * |
126  * |    ETHERNET HEADER        ,-<-- PAYLOAD
127  * |                           |     14 bytes from skb->data
128  * |  2 bytes for Type --> ,T. |     (sizeof ethhdr)
129  * |                       | | |
130  * |,-Dest.--. ,--Src.---. | | |
131  * |  6 bytes| | 6 bytes | | | |
132  * v         | |         | | | |
133  * 0         | v       1 | v | v           2
134  * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
135  *     ^     | ^         | ^ |
136  *     |     | |         | | |
137  *     |     | |         | `T' <---- 2 bytes for Type
138  *     |     | |         |
139  *     |     | '---SNAP--' <-------- 6 bytes for SNAP
140  *     |     |
141  *     `-IV--' <-------------------- 4 bytes for IV (WEP)
142  *
143  *      SNAP HEADER
144  *
145  */
146 
147 static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
148 static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
149 
150 static int rtllib_put_snap(u8 *data, u16 h_proto)
151 {
152 	struct rtllib_snap_hdr *snap;
153 	u8 *oui;
154 
155 	snap = (struct rtllib_snap_hdr *)data;
156 	snap->dsap = 0xaa;
157 	snap->ssap = 0xaa;
158 	snap->ctrl = 0x03;
159 
160 	if (h_proto == 0x8137 || h_proto == 0x80f3)
161 		oui = P802_1H_OUI;
162 	else
163 		oui = RFC1042_OUI;
164 	snap->oui[0] = oui[0];
165 	snap->oui[1] = oui[1];
166 	snap->oui[2] = oui[2];
167 
168 	*(__be16 *)(data + SNAP_SIZE) = htons(h_proto);
169 
170 	return SNAP_SIZE + sizeof(u16);
171 }
172 
173 int rtllib_encrypt_fragment(struct rtllib_device *ieee, struct sk_buff *frag,
174 			    int hdr_len)
175 {
176 	struct lib80211_crypt_data *crypt = NULL;
177 	int res;
178 
179 	crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
180 
181 	if (!(crypt && crypt->ops)) {
182 		netdev_info(ieee->dev, "=========>%s(), crypt is null\n",
183 			    __func__);
184 		return -1;
185 	}
186 	/* To encrypt, frame format is:
187 	 * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes)
188 	 */
189 
190 	/* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so
191 	 * call both MSDU and MPDU encryption functions from here.
192 	 */
193 	atomic_inc(&crypt->refcnt);
194 	res = 0;
195 	if (crypt->ops->encrypt_msdu)
196 		res = crypt->ops->encrypt_msdu(frag, hdr_len, crypt->priv);
197 	if (res == 0 && crypt->ops->encrypt_mpdu)
198 		res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
199 
200 	atomic_dec(&crypt->refcnt);
201 	if (res < 0) {
202 		netdev_info(ieee->dev, "%s: Encryption failed: len=%d.\n",
203 			    ieee->dev->name, frag->len);
204 		return -1;
205 	}
206 
207 	return 0;
208 }
209 
210 
211 void rtllib_txb_free(struct rtllib_txb *txb)
212 {
213 	if (unlikely(!txb))
214 		return;
215 	kfree(txb);
216 }
217 
218 static struct rtllib_txb *rtllib_alloc_txb(int nr_frags, int txb_size,
219 					   gfp_t gfp_mask)
220 {
221 	struct rtllib_txb *txb;
222 	int i;
223 
224 	txb = kmalloc(sizeof(struct rtllib_txb) + (sizeof(u8 *) * nr_frags),
225 		      gfp_mask);
226 	if (!txb)
227 		return NULL;
228 
229 	memset(txb, 0, sizeof(struct rtllib_txb));
230 	txb->nr_frags = nr_frags;
231 	txb->frag_size = cpu_to_le16(txb_size);
232 
233 	for (i = 0; i < nr_frags; i++) {
234 		txb->fragments[i] = dev_alloc_skb(txb_size);
235 		if (unlikely(!txb->fragments[i])) {
236 			i--;
237 			break;
238 		}
239 		memset(txb->fragments[i]->cb, 0, sizeof(txb->fragments[i]->cb));
240 	}
241 	if (unlikely(i != nr_frags)) {
242 		while (i >= 0)
243 			dev_kfree_skb_any(txb->fragments[i--]);
244 		kfree(txb);
245 		return NULL;
246 	}
247 	return txb;
248 }
249 
250 static int rtllib_classify(struct sk_buff *skb, u8 bIsAmsdu)
251 {
252 	struct ethhdr *eth;
253 	struct iphdr *ip;
254 
255 	eth = (struct ethhdr *)skb->data;
256 	if (eth->h_proto != htons(ETH_P_IP))
257 		return 0;
258 
259 #ifdef VERBOSE_DEBUG
260 	print_hex_dump_bytes("rtllib_classify(): ", DUMP_PREFIX_NONE, skb->data,
261 			     skb->len);
262 #endif
263 	ip = ip_hdr(skb);
264 	switch (ip->tos & 0xfc) {
265 	case 0x20:
266 		return 2;
267 	case 0x40:
268 		return 1;
269 	case 0x60:
270 		return 3;
271 	case 0x80:
272 		return 4;
273 	case 0xa0:
274 		return 5;
275 	case 0xc0:
276 		return 6;
277 	case 0xe0:
278 		return 7;
279 	default:
280 		return 0;
281 	}
282 }
283 
284 static void rtllib_tx_query_agg_cap(struct rtllib_device *ieee,
285 				    struct sk_buff *skb,
286 				    struct cb_desc *tcb_desc)
287 {
288 	struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
289 	struct tx_ts_record *pTxTs = NULL;
290 	struct rtllib_hdr_1addr *hdr = (struct rtllib_hdr_1addr *)skb->data;
291 
292 	if (rtllib_act_scanning(ieee, false))
293 		return;
294 
295 	if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
296 		return;
297 	if (!IsQoSDataFrame(skb->data))
298 		return;
299 	if (is_multicast_ether_addr(hdr->addr1))
300 		return;
301 
302 	if (tcb_desc->bdhcp || ieee->CntAfterLink < 2)
303 		return;
304 
305 	if (pHTInfo->IOTAction & HT_IOT_ACT_TX_NO_AGGREGATION)
306 		return;
307 
308 	if (!ieee->GetNmodeSupportBySecCfg(ieee->dev))
309 		return;
310 	if (pHTInfo->bCurrentAMPDUEnable) {
311 		if (!GetTs(ieee, (struct ts_common_info **)(&pTxTs), hdr->addr1,
312 		    skb->priority, TX_DIR, true)) {
313 			netdev_info(ieee->dev, "%s: can't get TS\n", __func__);
314 			return;
315 		}
316 		if (pTxTs->TxAdmittedBARecord.bValid == false) {
317 			if (ieee->wpa_ie_len && (ieee->pairwise_key_type ==
318 			    KEY_TYPE_NA)) {
319 				;
320 			} else if (tcb_desc->bdhcp == 1) {
321 				;
322 			} else if (!pTxTs->bDisable_AddBa) {
323 				TsStartAddBaProcess(ieee, pTxTs);
324 			}
325 			goto FORCED_AGG_SETTING;
326 		} else if (pTxTs->bUsingBa == false) {
327 			if (SN_LESS(pTxTs->TxAdmittedBARecord.BaStartSeqCtrl.field.SeqNum,
328 			   (pTxTs->TxCurSeq+1)%4096))
329 				pTxTs->bUsingBa = true;
330 			else
331 				goto FORCED_AGG_SETTING;
332 		}
333 		if (ieee->iw_mode == IW_MODE_INFRA) {
334 			tcb_desc->bAMPDUEnable = true;
335 			tcb_desc->ampdu_factor = pHTInfo->CurrentAMPDUFactor;
336 			tcb_desc->ampdu_density = pHTInfo->CurrentMPDUDensity;
337 		}
338 	}
339 FORCED_AGG_SETTING:
340 	switch (pHTInfo->ForcedAMPDUMode) {
341 	case HT_AGG_AUTO:
342 		break;
343 
344 	case HT_AGG_FORCE_ENABLE:
345 		tcb_desc->bAMPDUEnable = true;
346 		tcb_desc->ampdu_density = pHTInfo->ForcedMPDUDensity;
347 		tcb_desc->ampdu_factor = pHTInfo->ForcedAMPDUFactor;
348 		break;
349 
350 	case HT_AGG_FORCE_DISABLE:
351 		tcb_desc->bAMPDUEnable = false;
352 		tcb_desc->ampdu_density = 0;
353 		tcb_desc->ampdu_factor = 0;
354 		break;
355 	}
356 }
357 
358 static void rtllib_qurey_ShortPreambleMode(struct rtllib_device *ieee,
359 					   struct cb_desc *tcb_desc)
360 {
361 	tcb_desc->bUseShortPreamble = false;
362 	if (tcb_desc->data_rate == 2)
363 		return;
364 	else if (ieee->current_network.capability &
365 		 WLAN_CAPABILITY_SHORT_PREAMBLE)
366 		tcb_desc->bUseShortPreamble = true;
367 }
368 
369 static void rtllib_query_HTCapShortGI(struct rtllib_device *ieee,
370 				      struct cb_desc *tcb_desc)
371 {
372 	struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
373 
374 	tcb_desc->bUseShortGI		= false;
375 
376 	if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
377 		return;
378 
379 	if (pHTInfo->bForcedShortGI) {
380 		tcb_desc->bUseShortGI = true;
381 		return;
382 	}
383 
384 	if ((pHTInfo->bCurBW40MHz == true) && pHTInfo->bCurShortGI40MHz)
385 		tcb_desc->bUseShortGI = true;
386 	else if ((pHTInfo->bCurBW40MHz == false) && pHTInfo->bCurShortGI20MHz)
387 		tcb_desc->bUseShortGI = true;
388 }
389 
390 static void rtllib_query_BandwidthMode(struct rtllib_device *ieee,
391 				       struct cb_desc *tcb_desc)
392 {
393 	struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
394 
395 	tcb_desc->bPacketBW = false;
396 
397 	if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
398 		return;
399 
400 	if (tcb_desc->bMulticast || tcb_desc->bBroadcast)
401 		return;
402 
403 	if ((tcb_desc->data_rate & 0x80) == 0)
404 		return;
405 	if (pHTInfo->bCurBW40MHz && pHTInfo->bCurTxBW40MHz &&
406 	    !ieee->bandwidth_auto_switch.bforced_tx20Mhz)
407 		tcb_desc->bPacketBW = true;
408 }
409 
410 static void rtllib_query_protectionmode(struct rtllib_device *ieee,
411 					struct cb_desc *tcb_desc,
412 					struct sk_buff *skb)
413 {
414 	struct rt_hi_throughput *pHTInfo;
415 
416 	tcb_desc->bRTSSTBC			= false;
417 	tcb_desc->bRTSUseShortGI		= false;
418 	tcb_desc->bCTSEnable			= false;
419 	tcb_desc->RTSSC				= 0;
420 	tcb_desc->bRTSBW			= false;
421 
422 	if (tcb_desc->bBroadcast || tcb_desc->bMulticast)
423 		return;
424 
425 	if (is_broadcast_ether_addr(skb->data+16))
426 		return;
427 
428 	if (ieee->mode < IEEE_N_24G) {
429 		if (skb->len > ieee->rts) {
430 			tcb_desc->bRTSEnable = true;
431 			tcb_desc->rts_rate = MGN_24M;
432 		} else if (ieee->current_network.buseprotection) {
433 			tcb_desc->bRTSEnable = true;
434 			tcb_desc->bCTSEnable = true;
435 			tcb_desc->rts_rate = MGN_24M;
436 		}
437 		return;
438 	}
439 
440 	pHTInfo = ieee->pHTInfo;
441 
442 	while (true) {
443 		if (pHTInfo->IOTAction & HT_IOT_ACT_FORCED_CTS2SELF) {
444 			tcb_desc->bCTSEnable	= true;
445 			tcb_desc->rts_rate  =	MGN_24M;
446 			tcb_desc->bRTSEnable = true;
447 			break;
448 		} else if (pHTInfo->IOTAction & (HT_IOT_ACT_FORCED_RTS |
449 			   HT_IOT_ACT_PURE_N_MODE)) {
450 			tcb_desc->bRTSEnable = true;
451 			tcb_desc->rts_rate  =	MGN_24M;
452 			break;
453 		}
454 		if (ieee->current_network.buseprotection) {
455 			tcb_desc->bRTSEnable = true;
456 			tcb_desc->bCTSEnable = true;
457 			tcb_desc->rts_rate = MGN_24M;
458 			break;
459 		}
460 		if (pHTInfo->bCurrentHTSupport  && pHTInfo->bEnableHT) {
461 			u8 HTOpMode = pHTInfo->CurrentOpMode;
462 
463 			if ((pHTInfo->bCurBW40MHz && (HTOpMode == 2 ||
464 			     HTOpMode == 3)) ||
465 			     (!pHTInfo->bCurBW40MHz && HTOpMode == 3)) {
466 				tcb_desc->rts_rate = MGN_24M;
467 				tcb_desc->bRTSEnable = true;
468 				break;
469 			}
470 		}
471 		if (skb->len > ieee->rts) {
472 			tcb_desc->rts_rate = MGN_24M;
473 			tcb_desc->bRTSEnable = true;
474 			break;
475 		}
476 		if (tcb_desc->bAMPDUEnable) {
477 			tcb_desc->rts_rate = MGN_24M;
478 			tcb_desc->bRTSEnable = false;
479 			break;
480 		}
481 		goto NO_PROTECTION;
482 	}
483 	if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
484 		tcb_desc->bUseShortPreamble = true;
485 	if (ieee->iw_mode == IW_MODE_MASTER)
486 		goto NO_PROTECTION;
487 	return;
488 NO_PROTECTION:
489 	tcb_desc->bRTSEnable	= false;
490 	tcb_desc->bCTSEnable	= false;
491 	tcb_desc->rts_rate	= 0;
492 	tcb_desc->RTSSC		= 0;
493 	tcb_desc->bRTSBW	= false;
494 }
495 
496 
497 static void rtllib_txrate_selectmode(struct rtllib_device *ieee,
498 				     struct cb_desc *tcb_desc)
499 {
500 	if (ieee->bTxDisableRateFallBack)
501 		tcb_desc->bTxDisableRateFallBack = true;
502 
503 	if (ieee->bTxUseDriverAssingedRate)
504 		tcb_desc->bTxUseDriverAssingedRate = true;
505 	if (!tcb_desc->bTxDisableRateFallBack ||
506 	    !tcb_desc->bTxUseDriverAssingedRate) {
507 		if (ieee->iw_mode == IW_MODE_INFRA ||
508 		    ieee->iw_mode == IW_MODE_ADHOC)
509 			tcb_desc->RATRIndex = 0;
510 	}
511 }
512 
513 static u16 rtllib_query_seqnum(struct rtllib_device *ieee, struct sk_buff *skb,
514 			       u8 *dst)
515 {
516 	u16 seqnum = 0;
517 
518 	if (is_multicast_ether_addr(dst))
519 		return 0;
520 	if (IsQoSDataFrame(skb->data)) {
521 		struct tx_ts_record *pTS = NULL;
522 
523 		if (!GetTs(ieee, (struct ts_common_info **)(&pTS), dst,
524 		    skb->priority, TX_DIR, true))
525 			return 0;
526 		seqnum = pTS->TxCurSeq;
527 		pTS->TxCurSeq = (pTS->TxCurSeq+1)%4096;
528 		return seqnum;
529 	}
530 	return 0;
531 }
532 
533 static int wme_downgrade_ac(struct sk_buff *skb)
534 {
535 	switch (skb->priority) {
536 	case 6:
537 	case 7:
538 		skb->priority = 5; /* VO -> VI */
539 		return 0;
540 	case 4:
541 	case 5:
542 		skb->priority = 3; /* VI -> BE */
543 		return 0;
544 	case 0:
545 	case 3:
546 		skb->priority = 1; /* BE -> BK */
547 		return 0;
548 	default:
549 		return -1;
550 	}
551 }
552 
553 static u8 rtllib_current_rate(struct rtllib_device *ieee)
554 {
555 	if (ieee->mode & IEEE_MODE_MASK)
556 		return ieee->rate;
557 
558 	if (ieee->HTCurrentOperaRate)
559 		return ieee->HTCurrentOperaRate;
560 	else
561 		return ieee->rate & 0x7F;
562 }
563 
564 static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
565 {
566 	struct rtllib_device *ieee = (struct rtllib_device *)
567 				     netdev_priv_rsl(dev);
568 	struct rtllib_txb *txb = NULL;
569 	struct rtllib_hdr_3addrqos *frag_hdr;
570 	int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
571 	unsigned long flags;
572 	struct net_device_stats *stats = &ieee->stats;
573 	int ether_type = 0, encrypt;
574 	int bytes, fc, qos_ctl = 0, hdr_len;
575 	struct sk_buff *skb_frag;
576 	struct rtllib_hdr_3addrqos header = { /* Ensure zero initialized */
577 		.duration_id = 0,
578 		.seq_ctl = 0,
579 		.qos_ctl = 0
580 	};
581 	int qos_activated = ieee->current_network.qos_data.active;
582 	u8 dest[ETH_ALEN];
583 	u8 src[ETH_ALEN];
584 	struct lib80211_crypt_data *crypt = NULL;
585 	struct cb_desc *tcb_desc;
586 	u8 bIsMulticast = false;
587 	u8 IsAmsdu = false;
588 	bool	bdhcp = false;
589 
590 	spin_lock_irqsave(&ieee->lock, flags);
591 
592 	/* If there is no driver handler to take the TXB, don't bother
593 	 * creating it...
594 	 */
595 	if ((!ieee->hard_start_xmit && !(ieee->softmac_features &
596 	   IEEE_SOFTMAC_TX_QUEUE)) ||
597 	   ((!ieee->softmac_data_hard_start_xmit &&
598 	   (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
599 		netdev_warn(ieee->dev, "No xmit handler.\n");
600 		goto success;
601 	}
602 
603 
604 	if (likely(ieee->raw_tx == 0)) {
605 		if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
606 			netdev_warn(ieee->dev, "skb too small (%d).\n",
607 				    skb->len);
608 			goto success;
609 		}
610 		/* Save source and destination addresses */
611 		ether_addr_copy(dest, skb->data);
612 		ether_addr_copy(src, skb->data + ETH_ALEN);
613 
614 		memset(skb->cb, 0, sizeof(skb->cb));
615 		ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);
616 
617 		if (ieee->iw_mode == IW_MODE_MONITOR) {
618 			txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC);
619 			if (unlikely(!txb)) {
620 				netdev_warn(ieee->dev,
621 					    "Could not allocate TXB\n");
622 				goto failed;
623 			}
624 
625 			txb->encrypted = 0;
626 			txb->payload_size = cpu_to_le16(skb->len);
627 			skb_put_data(txb->fragments[0], skb->data, skb->len);
628 
629 			goto success;
630 		}
631 
632 		if (skb->len > 282) {
633 			if (ether_type == ETH_P_IP) {
634 				const struct iphdr *ip = (struct iphdr *)
635 					((u8 *)skb->data+14);
636 				if (ip->protocol == IPPROTO_UDP) {
637 					struct udphdr *udp;
638 
639 					udp = (struct udphdr *)((u8 *)ip +
640 					      (ip->ihl << 2));
641 					if (((((u8 *)udp)[1] == 68) &&
642 					   (((u8 *)udp)[3] == 67)) ||
643 					   ((((u8 *)udp)[1] == 67) &&
644 					   (((u8 *)udp)[3] == 68))) {
645 						bdhcp = true;
646 						ieee->LPSDelayCnt = 200;
647 					}
648 				}
649 			} else if (ether_type == ETH_P_ARP) {
650 				netdev_info(ieee->dev,
651 					    "=================>DHCP Protocol start tx ARP pkt!!\n");
652 				bdhcp = true;
653 				ieee->LPSDelayCnt =
654 					 ieee->current_network.tim.tim_count;
655 			}
656 		}
657 
658 		skb->priority = rtllib_classify(skb, IsAmsdu);
659 		crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
660 		encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
661 			ieee->host_encrypt && crypt && crypt->ops;
662 		if (!encrypt && ieee->ieee802_1x &&
663 		    ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
664 			stats->tx_dropped++;
665 			goto success;
666 		}
667 		if (crypt && !encrypt && ether_type == ETH_P_PAE) {
668 			struct eapol *eap = (struct eapol *)(skb->data +
669 				sizeof(struct ethhdr) - SNAP_SIZE -
670 				sizeof(u16));
671 			netdev_dbg(ieee->dev,
672 				   "TX: IEEE 802.11 EAPOL frame: %s\n",
673 				   eap_get_type(eap->type));
674 		}
675 
676 		/* Advance the SKB to the start of the payload */
677 		skb_pull(skb, sizeof(struct ethhdr));
678 
679 		/* Determine total amount of storage required for TXB packets */
680 		bytes = skb->len + SNAP_SIZE + sizeof(u16);
681 
682 		if (encrypt)
683 			fc = RTLLIB_FTYPE_DATA | RTLLIB_FCTL_WEP;
684 		else
685 			fc = RTLLIB_FTYPE_DATA;
686 
687 		if (qos_activated)
688 			fc |= RTLLIB_STYPE_QOS_DATA;
689 		else
690 			fc |= RTLLIB_STYPE_DATA;
691 
692 		if (ieee->iw_mode == IW_MODE_INFRA) {
693 			fc |= RTLLIB_FCTL_TODS;
694 			/* To DS: Addr1 = BSSID, Addr2 = SA,
695 			 * Addr3 = DA
696 			 */
697 			ether_addr_copy(header.addr1,
698 					ieee->current_network.bssid);
699 			ether_addr_copy(header.addr2, src);
700 			if (IsAmsdu)
701 				ether_addr_copy(header.addr3,
702 						ieee->current_network.bssid);
703 			else
704 				ether_addr_copy(header.addr3, dest);
705 		} else if (ieee->iw_mode == IW_MODE_ADHOC) {
706 			/* not From/To DS: Addr1 = DA, Addr2 = SA,
707 			 * Addr3 = BSSID
708 			 */
709 			ether_addr_copy(header.addr1, dest);
710 			ether_addr_copy(header.addr2, src);
711 			ether_addr_copy(header.addr3,
712 					ieee->current_network.bssid);
713 		}
714 
715 		bIsMulticast = is_multicast_ether_addr(header.addr1);
716 
717 		header.frame_ctl = cpu_to_le16(fc);
718 
719 		/* Determine fragmentation size based on destination (multicast
720 		 * and broadcast are not fragmented)
721 		 */
722 		if (bIsMulticast) {
723 			frag_size = MAX_FRAG_THRESHOLD;
724 			qos_ctl |= QOS_CTL_NOTCONTAIN_ACK;
725 		} else {
726 			frag_size = ieee->fts;
727 			qos_ctl = 0;
728 		}
729 
730 		if (qos_activated) {
731 			hdr_len = RTLLIB_3ADDR_LEN + 2;
732 
733 			/* in case we are a client verify acm is not set for this ac */
734 			while (unlikely(ieee->wmm_acm & (0x01 << skb->priority))) {
735 				netdev_info(ieee->dev, "skb->priority = %x\n",
736 						skb->priority);
737 				if (wme_downgrade_ac(skb))
738 					break;
739 				netdev_info(ieee->dev, "converted skb->priority = %x\n",
740 					   skb->priority);
741 			}
742 
743 			qos_ctl |= skb->priority;
744 			header.qos_ctl = cpu_to_le16(qos_ctl & RTLLIB_QOS_TID);
745 
746 		} else {
747 			hdr_len = RTLLIB_3ADDR_LEN;
748 		}
749 		/* Determine amount of payload per fragment.  Regardless of if
750 		 * this stack is providing the full 802.11 header, one will
751 		 * eventually be affixed to this fragment -- so we must account
752 		 * for it when determining the amount of payload space.
753 		 */
754 		bytes_per_frag = frag_size - hdr_len;
755 		if (ieee->config &
756 		   (CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
757 			bytes_per_frag -= RTLLIB_FCS_LEN;
758 
759 		/* Each fragment may need to have room for encrypting
760 		 * pre/postfix
761 		 */
762 		if (encrypt) {
763 			bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
764 				crypt->ops->extra_mpdu_postfix_len +
765 				crypt->ops->extra_msdu_prefix_len +
766 				crypt->ops->extra_msdu_postfix_len;
767 		}
768 		/* Number of fragments is the total bytes_per_frag /
769 		 * payload_per_fragment
770 		 */
771 		nr_frags = bytes / bytes_per_frag;
772 		bytes_last_frag = bytes % bytes_per_frag;
773 		if (bytes_last_frag)
774 			nr_frags++;
775 		else
776 			bytes_last_frag = bytes_per_frag;
777 
778 		/* When we allocate the TXB we allocate enough space for the
779 		 * reserve and full fragment bytes (bytes_per_frag doesn't
780 		 * include prefix, postfix, header, FCS, etc.)
781 		 */
782 		txb = rtllib_alloc_txb(nr_frags, frag_size +
783 				       ieee->tx_headroom, GFP_ATOMIC);
784 		if (unlikely(!txb)) {
785 			netdev_warn(ieee->dev, "Could not allocate TXB\n");
786 			goto failed;
787 		}
788 		txb->encrypted = encrypt;
789 		txb->payload_size = cpu_to_le16(bytes);
790 
791 		if (qos_activated)
792 			txb->queue_index = UP2AC(skb->priority);
793 		else
794 			txb->queue_index = WME_AC_BE;
795 
796 		for (i = 0; i < nr_frags; i++) {
797 			skb_frag = txb->fragments[i];
798 			tcb_desc = (struct cb_desc *)(skb_frag->cb +
799 				    MAX_DEV_ADDR_SIZE);
800 			if (qos_activated) {
801 				skb_frag->priority = skb->priority;
802 				tcb_desc->queue_index =  UP2AC(skb->priority);
803 			} else {
804 				skb_frag->priority = WME_AC_BE;
805 				tcb_desc->queue_index = WME_AC_BE;
806 			}
807 			skb_reserve(skb_frag, ieee->tx_headroom);
808 
809 			if (encrypt) {
810 				if (ieee->hwsec_active)
811 					tcb_desc->bHwSec = 1;
812 				else
813 					tcb_desc->bHwSec = 0;
814 				skb_reserve(skb_frag,
815 					    crypt->ops->extra_mpdu_prefix_len +
816 					    crypt->ops->extra_msdu_prefix_len);
817 			} else {
818 				tcb_desc->bHwSec = 0;
819 			}
820 			frag_hdr = skb_put_data(skb_frag, &header, hdr_len);
821 
822 			/* If this is not the last fragment, then add the
823 			 * MOREFRAGS bit to the frame control
824 			 */
825 			if (i != nr_frags - 1) {
826 				frag_hdr->frame_ctl = cpu_to_le16(
827 					fc | RTLLIB_FCTL_MOREFRAGS);
828 				bytes = bytes_per_frag;
829 
830 			} else {
831 				/* The last fragment has the remaining length */
832 				bytes = bytes_last_frag;
833 			}
834 			if ((qos_activated) && (!bIsMulticast)) {
835 				frag_hdr->seq_ctl =
836 					 cpu_to_le16(rtllib_query_seqnum(ieee, skb_frag,
837 							     header.addr1));
838 				frag_hdr->seq_ctl =
839 					 cpu_to_le16(le16_to_cpu(frag_hdr->seq_ctl)<<4 | i);
840 			} else {
841 				frag_hdr->seq_ctl =
842 					 cpu_to_le16(ieee->seq_ctrl[0]<<4 | i);
843 			}
844 			/* Put a SNAP header on the first fragment */
845 			if (i == 0) {
846 				rtllib_put_snap(
847 					skb_put(skb_frag, SNAP_SIZE +
848 					sizeof(u16)), ether_type);
849 				bytes -= SNAP_SIZE + sizeof(u16);
850 			}
851 
852 			skb_put_data(skb_frag, skb->data, bytes);
853 
854 			/* Advance the SKB... */
855 			skb_pull(skb, bytes);
856 
857 			/* Encryption routine will move the header forward in
858 			 * order to insert the IV between the header and the
859 			 * payload
860 			 */
861 			if (encrypt)
862 				rtllib_encrypt_fragment(ieee, skb_frag,
863 							hdr_len);
864 			if (ieee->config &
865 			   (CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
866 				skb_put(skb_frag, 4);
867 		}
868 
869 		if ((qos_activated) && (!bIsMulticast)) {
870 			if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
871 				ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
872 			else
873 				ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
874 		} else {
875 			if (ieee->seq_ctrl[0] == 0xFFF)
876 				ieee->seq_ctrl[0] = 0;
877 			else
878 					ieee->seq_ctrl[0]++;
879 		}
880 	} else {
881 		if (unlikely(skb->len < sizeof(struct rtllib_hdr_3addr))) {
882 			netdev_warn(ieee->dev, "skb too small (%d).\n",
883 				    skb->len);
884 			goto success;
885 		}
886 
887 		txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC);
888 		if (!txb) {
889 			netdev_warn(ieee->dev, "Could not allocate TXB\n");
890 			goto failed;
891 		}
892 
893 		txb->encrypted = 0;
894 		txb->payload_size = cpu_to_le16(skb->len);
895 		skb_put_data(txb->fragments[0], skb->data, skb->len);
896 	}
897 
898  success:
899 	if (txb) {
900 		struct cb_desc *tcb_desc = (struct cb_desc *)
901 				(txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE);
902 		tcb_desc->bTxEnableFwCalcDur = 1;
903 		tcb_desc->priority = skb->priority;
904 
905 		if (ether_type == ETH_P_PAE) {
906 			if (ieee->pHTInfo->IOTAction &
907 			    HT_IOT_ACT_WA_IOT_Broadcom) {
908 				tcb_desc->data_rate =
909 					 MgntQuery_TxRateExcludeCCKRates(ieee);
910 				tcb_desc->bTxDisableRateFallBack = false;
911 			} else {
912 				tcb_desc->data_rate = ieee->basic_rate;
913 				tcb_desc->bTxDisableRateFallBack = 1;
914 			}
915 
916 
917 			tcb_desc->RATRIndex = 7;
918 			tcb_desc->bTxUseDriverAssingedRate = 1;
919 		} else {
920 			if (is_multicast_ether_addr(header.addr1))
921 				tcb_desc->bMulticast = 1;
922 			if (is_broadcast_ether_addr(header.addr1))
923 				tcb_desc->bBroadcast = 1;
924 			rtllib_txrate_selectmode(ieee, tcb_desc);
925 			if (tcb_desc->bMulticast ||  tcb_desc->bBroadcast)
926 				tcb_desc->data_rate = ieee->basic_rate;
927 			else
928 				tcb_desc->data_rate = rtllib_current_rate(ieee);
929 
930 			if (bdhcp) {
931 				if (ieee->pHTInfo->IOTAction &
932 				    HT_IOT_ACT_WA_IOT_Broadcom) {
933 					tcb_desc->data_rate =
934 					   MgntQuery_TxRateExcludeCCKRates(ieee);
935 					tcb_desc->bTxDisableRateFallBack = false;
936 				} else {
937 					tcb_desc->data_rate = MGN_1M;
938 					tcb_desc->bTxDisableRateFallBack = 1;
939 				}
940 
941 
942 				tcb_desc->RATRIndex = 7;
943 				tcb_desc->bTxUseDriverAssingedRate = 1;
944 				tcb_desc->bdhcp = 1;
945 			}
946 
947 			rtllib_qurey_ShortPreambleMode(ieee, tcb_desc);
948 			rtllib_tx_query_agg_cap(ieee, txb->fragments[0],
949 						tcb_desc);
950 			rtllib_query_HTCapShortGI(ieee, tcb_desc);
951 			rtllib_query_BandwidthMode(ieee, tcb_desc);
952 			rtllib_query_protectionmode(ieee, tcb_desc,
953 						    txb->fragments[0]);
954 		}
955 	}
956 	spin_unlock_irqrestore(&ieee->lock, flags);
957 	dev_kfree_skb_any(skb);
958 	if (txb) {
959 		if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE) {
960 			dev->stats.tx_packets++;
961 			dev->stats.tx_bytes += le16_to_cpu(txb->payload_size);
962 			rtllib_softmac_xmit(txb, ieee);
963 		} else {
964 			if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
965 				stats->tx_packets++;
966 				stats->tx_bytes += le16_to_cpu(txb->payload_size);
967 				return 0;
968 			}
969 			rtllib_txb_free(txb);
970 		}
971 	}
972 
973 	return 0;
974 
975  failed:
976 	spin_unlock_irqrestore(&ieee->lock, flags);
977 	netif_stop_queue(dev);
978 	stats->tx_errors++;
979 	return 1;
980 
981 }
982 
983 int rtllib_xmit(struct sk_buff *skb, struct net_device *dev)
984 {
985 	memset(skb->cb, 0, sizeof(skb->cb));
986 	return rtllib_xmit_inter(skb, dev);
987 }
988 EXPORT_SYMBOL(rtllib_xmit);
989