1 /******************************************************************************
2  *
3  * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of version 2 of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12  * more details.
13  *
14  ******************************************************************************/
15 #ifndef _RTW_XMIT_H_
16 #define _RTW_XMIT_H_
17 
18 
19 #define MAX_XMITBUF_SZ	(20480)	/*  20k */
20 
21 #define NR_XMITBUFF	(16)
22 
23 #define XMITBUF_ALIGN_SZ 512
24 
25 /*  xmit extension buff defination */
26 #define MAX_XMIT_EXTBUF_SZ	(1536)
27 #define NR_XMIT_EXTBUFF	(32)
28 
29 #define MAX_CMDBUF_SZ	(5120)	/* 4096) */
30 
31 #define MAX_NUMBLKS		(1)
32 
33 #define XMIT_VO_QUEUE (0)
34 #define XMIT_VI_QUEUE (1)
35 #define XMIT_BE_QUEUE (2)
36 #define XMIT_BK_QUEUE (3)
37 
38 #define VO_QUEUE_INX		0
39 #define VI_QUEUE_INX		1
40 #define BE_QUEUE_INX		2
41 #define BK_QUEUE_INX		3
42 #define BCN_QUEUE_INX		4
43 #define MGT_QUEUE_INX		5
44 #define HIGH_QUEUE_INX		6
45 #define TXCMD_QUEUE_INX	7
46 
47 #define HW_QUEUE_ENTRY	8
48 
49 #define WEP_IV(pattrib_iv, dot11txpn, keyidx)\
50 do{\
51 	pattrib_iv[0] = dot11txpn._byte_.TSC0;\
52 	pattrib_iv[1] = dot11txpn._byte_.TSC1;\
53 	pattrib_iv[2] = dot11txpn._byte_.TSC2;\
54 	pattrib_iv[3] = ((keyidx & 0x3)<<6);\
55 	dot11txpn.val = (dot11txpn.val == 0xffffff) ? 0: (dot11txpn.val+1);\
56 }while (0)
57 
58 
59 #define TKIP_IV(pattrib_iv, dot11txpn, keyidx)\
60 do{\
61 	pattrib_iv[0] = dot11txpn._byte_.TSC1;\
62 	pattrib_iv[1] = (dot11txpn._byte_.TSC1 | 0x20) & 0x7f;\
63 	pattrib_iv[2] = dot11txpn._byte_.TSC0;\
64 	pattrib_iv[3] = BIT(5) | ((keyidx & 0x3)<<6);\
65 	pattrib_iv[4] = dot11txpn._byte_.TSC2;\
66 	pattrib_iv[5] = dot11txpn._byte_.TSC3;\
67 	pattrib_iv[6] = dot11txpn._byte_.TSC4;\
68 	pattrib_iv[7] = dot11txpn._byte_.TSC5;\
69 	dot11txpn.val = dot11txpn.val == 0xffffffffffffULL ? 0: (dot11txpn.val+1);\
70 }while (0)
71 
72 #define AES_IV(pattrib_iv, dot11txpn, keyidx)\
73 do{\
74 	pattrib_iv[0] = dot11txpn._byte_.TSC0;\
75 	pattrib_iv[1] = dot11txpn._byte_.TSC1;\
76 	pattrib_iv[2] = 0;\
77 	pattrib_iv[3] = BIT(5) | ((keyidx & 0x3)<<6);\
78 	pattrib_iv[4] = dot11txpn._byte_.TSC2;\
79 	pattrib_iv[5] = dot11txpn._byte_.TSC3;\
80 	pattrib_iv[6] = dot11txpn._byte_.TSC4;\
81 	pattrib_iv[7] = dot11txpn._byte_.TSC5;\
82 	dot11txpn.val = dot11txpn.val == 0xffffffffffffULL ? 0: (dot11txpn.val+1);\
83 }while (0)
84 
85 
86 #define HWXMIT_ENTRY	4
87 
88 /*  For Buffer Descriptor ring architecture */
89 #define TXDESC_SIZE 40
90 
91 #define TXDESC_OFFSET TXDESC_SIZE
92 
93 enum TXDESC_SC{
94 	SC_DONT_CARE = 0x00,
95 	SC_UPPER = 0x01,
96 	SC_LOWER = 0x02,
97 	SC_DUPLICATE = 0x03
98 };
99 
100 #define TXDESC_40_BYTES
101 
102 struct tx_desc {
103 	__le32 txdw0;
104 	__le32 txdw1;
105 	__le32 txdw2;
106 	__le32 txdw3;
107 	__le32 txdw4;
108 	__le32 txdw5;
109 	__le32 txdw6;
110 	__le32 txdw7;
111 
112 #if defined(TXDESC_40_BYTES) || defined(TXDESC_64_BYTES)
113 	__le32 txdw8;
114 	__le32 txdw9;
115 #endif /*  TXDESC_40_BYTES */
116 
117 #ifdef TXDESC_64_BYTES
118 	__le32 txdw10;
119 	__le32 txdw11;
120 
121 	/*  2008/05/15 MH Because PCIE HW memory R/W 4K limit. And now,  our descriptor */
122 	/*  size is 40 bytes. If you use more than 102 descriptor(103*40>4096), HW will execute */
123 	/*  memoryR/W CRC error. And then all DMA fetch will fail. We must decrease descriptor */
124 	/*  number or enlarge descriptor size as 64 bytes. */
125 	__le32 txdw12;
126 	__le32 txdw13;
127 	__le32 txdw14;
128 	__le32 txdw15;
129 #endif
130 };
131 
132 union txdesc {
133 	struct tx_desc txdesc;
134 	unsigned int value[TXDESC_SIZE>>2];
135 };
136 
137 struct	hw_xmit	{
138 	/* _lock xmit_lock; */
139 	/* struct list_head	pending; */
140 	struct __queue *sta_queue;
141 	/* struct hw_txqueue *phwtxqueue; */
142 	/* sint	txcmdcnt; */
143 	int	accnt;
144 };
145 
146 /* reduce size */
147 struct pkt_attrib
148 {
149 	u8 type;
150 	u8 subtype;
151 	u8 bswenc;
152 	u8 dhcp_pkt;
153 	u16 ether_type;
154 	u16 seqnum;
155 	u16 pkt_hdrlen;	/* the original 802.3 pkt header len */
156 	u16 hdrlen;		/* the WLAN Header Len */
157 	u32 pktlen;		/* the original 802.3 pkt raw_data len (not include ether_hdr data) */
158 	u32 last_txcmdsz;
159 	u8 nr_frags;
160 	u8 encrypt;	/* when 0 indicate no encrypt. when non-zero, indicate the encrypt algorith */
161 	u8 iv_len;
162 	u8 icv_len;
163 	u8 iv[18];
164 	u8 icv[16];
165 	u8 priority;
166 	u8 ack_policy;
167 	u8 mac_id;
168 	u8 vcs_mode;	/* virtual carrier sense method */
169 	u8 dst[ETH_ALEN];
170 	u8 src[ETH_ALEN];
171 	u8 ta[ETH_ALEN];
172 	u8 ra[ETH_ALEN];
173 	u8 key_idx;
174 	u8 qos_en;
175 	u8 ht_en;
176 	u8 raid;/* rate adpative id */
177 	u8 bwmode;
178 	u8 ch_offset;/* PRIME_CHNL_OFFSET */
179 	u8 sgi;/* short GI */
180 	u8 ampdu_en;/* tx ampdu enable */
181 	u8 ampdu_spacing; /* ampdu_min_spacing for peer sta's rx */
182 	u8 mdata;/* more data bit */
183 	u8 pctrl;/* per packet txdesc control enable */
184 	u8 triggered;/* for ap mode handling Power Saving sta */
185 	u8 qsel;
186 	u8 order;/* order bit */
187 	u8 eosp;
188 	u8 rate;
189 	u8 intel_proxim;
190 	u8 retry_ctrl;
191 	u8   mbssid;
192 	u8 ldpc;
193 	u8 stbc;
194 	struct sta_info * psta;
195 
196 	u8 rtsen;
197 	u8 cts2self;
198 	union Keytype	dot11tkiptxmickey;
199 	/* union Keytype	dot11tkiprxmickey; */
200 	union Keytype	dot118021x_UncstKey;
201 
202 	u8 icmp_pkt;
203 
204 };
205 
206 #define WLANHDR_OFFSET	64
207 
208 #define NULL_FRAMETAG		(0x0)
209 #define DATA_FRAMETAG		0x01
210 #define L2_FRAMETAG		0x02
211 #define MGNT_FRAMETAG		0x03
212 #define AMSDU_FRAMETAG	0x04
213 
214 #define EII_FRAMETAG		0x05
215 #define IEEE8023_FRAMETAG  0x06
216 
217 #define MP_FRAMETAG		0x07
218 
219 #define TXAGG_FRAMETAG	0x08
220 
221 enum {
222 	XMITBUF_DATA = 0,
223 	XMITBUF_MGNT = 1,
224 	XMITBUF_CMD = 2,
225 };
226 
227 struct  submit_ctx{
228 	unsigned long submit_time; /* */
229 	u32 timeout_ms; /* <0: not synchronous, 0: wait forever, >0: up to ms waiting */
230 	int status; /* status for operation */
231 	struct completion done;
232 };
233 
234 enum {
235 	RTW_SCTX_SUBMITTED = -1,
236 	RTW_SCTX_DONE_SUCCESS = 0,
237 	RTW_SCTX_DONE_UNKNOWN,
238 	RTW_SCTX_DONE_TIMEOUT,
239 	RTW_SCTX_DONE_BUF_ALLOC,
240 	RTW_SCTX_DONE_BUF_FREE,
241 	RTW_SCTX_DONE_WRITE_PORT_ERR,
242 	RTW_SCTX_DONE_TX_DESC_NA,
243 	RTW_SCTX_DONE_TX_DENY,
244 	RTW_SCTX_DONE_CCX_PKT_FAIL,
245 	RTW_SCTX_DONE_DRV_STOP,
246 	RTW_SCTX_DONE_DEV_REMOVE,
247 	RTW_SCTX_DONE_CMD_ERROR,
248 };
249 
250 
251 void rtw_sctx_init(struct submit_ctx *sctx, int timeout_ms);
252 int rtw_sctx_wait(struct submit_ctx *sctx, const char *msg);
253 void rtw_sctx_done_err(struct submit_ctx **sctx, int status);
254 void rtw_sctx_done(struct submit_ctx **sctx);
255 
256 struct xmit_buf
257 {
258 	struct list_head	list;
259 
260 	struct adapter *padapter;
261 
262 	u8 *pallocated_buf;
263 
264 	u8 *pbuf;
265 
266 	void *priv_data;
267 
268 	u16 buf_tag; /*  0: Normal xmitbuf, 1: extension xmitbuf, 2:cmd xmitbuf */
269 	u16 flags;
270 	u32 alloc_sz;
271 
272 	u32  len;
273 
274 	struct submit_ctx *sctx;
275 
276 	u8 *phead;
277 	u8 *pdata;
278 	u8 *ptail;
279 	u8 *pend;
280 	u32 ff_hwaddr;
281 	u8 pg_num;
282 	u8 agg_num;
283 
284 #if defined(DBG_XMIT_BUF)|| defined(DBG_XMIT_BUF_EXT)
285 	u8 no;
286 #endif
287 
288 };
289 
290 
291 struct xmit_frame
292 {
293 	struct list_head	list;
294 
295 	struct pkt_attrib attrib;
296 
297 	_pkt *pkt;
298 
299 	int	frame_tag;
300 
301 	struct adapter *padapter;
302 
303 	u8 *buf_addr;
304 
305 	struct xmit_buf *pxmitbuf;
306 
307 	u8 pg_num;
308 	u8 agg_num;
309 
310 	u8 ack_report;
311 
312 	u8 *alloc_addr; /* the actual address this xmitframe allocated */
313 	u8 ext_tag; /* 0:data, 1:mgmt */
314 
315 };
316 
317 struct tx_servq {
318 	struct list_head	tx_pending;
319 	struct __queue	sta_pending;
320 	int qcnt;
321 };
322 
323 
324 struct sta_xmit_priv
325 {
326 	_lock	lock;
327 	sint	option;
328 	sint	apsd_setting;	/* When bit mask is on, the associated edca queue supports APSD. */
329 
330 
331 	/* struct tx_servq blk_q[MAX_NUMBLKS]; */
332 	struct tx_servq	be_q;			/* priority == 0, 3 */
333 	struct tx_servq	bk_q;			/* priority == 1, 2 */
334 	struct tx_servq	vi_q;			/* priority == 4, 5 */
335 	struct tx_servq	vo_q;			/* priority == 6, 7 */
336 	struct list_head	legacy_dz;
337 	struct list_head  apsd;
338 
339 	u16 txseq_tid[16];
340 
341 	/* uint	sta_tx_bytes; */
342 	/* u64	sta_tx_pkts; */
343 	/* uint	sta_tx_fail; */
344 
345 
346 };
347 
348 
349 struct	hw_txqueue	{
350 	volatile sint	head;
351 	volatile sint	tail;
352 	volatile sint	free_sz;	/* in units of 64 bytes */
353 	volatile sint      free_cmdsz;
354 	volatile sint	 txsz[8];
355 	uint	ff_hwaddr;
356 	uint	cmd_hwaddr;
357 	sint	ac_tag;
358 };
359 
360 struct agg_pkt_info{
361 	u16 offset;
362 	u16 pkt_len;
363 };
364 
365 enum cmdbuf_type {
366 	CMDBUF_BEACON = 0x00,
367 	CMDBUF_RSVD,
368 	CMDBUF_MAX
369 };
370 
371 struct	xmit_priv {
372 
373 	_lock	lock;
374 
375 	_sema	xmit_sema;
376 	_sema	terminate_xmitthread_sema;
377 
378 	/* struct __queue	blk_strms[MAX_NUMBLKS]; */
379 	struct __queue	be_pending;
380 	struct __queue	bk_pending;
381 	struct __queue	vi_pending;
382 	struct __queue	vo_pending;
383 	struct __queue	bm_pending;
384 
385 	/* struct __queue	legacy_dz_queue; */
386 	/* struct __queue	apsd_queue; */
387 
388 	u8 *pallocated_frame_buf;
389 	u8 *pxmit_frame_buf;
390 	uint free_xmitframe_cnt;
391 	struct __queue	free_xmit_queue;
392 
393 	/* uint mapping_addr; */
394 	/* uint pkt_sz; */
395 
396 	u8 *xframe_ext_alloc_addr;
397 	u8 *xframe_ext;
398 	uint free_xframe_ext_cnt;
399 	struct __queue free_xframe_ext_queue;
400 
401 	/* struct	hw_txqueue	be_txqueue; */
402 	/* struct	hw_txqueue	bk_txqueue; */
403 	/* struct	hw_txqueue	vi_txqueue; */
404 	/* struct	hw_txqueue	vo_txqueue; */
405 	/* struct	hw_txqueue	bmc_txqueue; */
406 
407 	uint	frag_len;
408 
409 	struct adapter	*adapter;
410 
411 	u8   vcs_setting;
412 	u8 vcs;
413 	u8 vcs_type;
414 	/* u16  rts_thresh; */
415 
416 	u64	tx_bytes;
417 	u64	tx_pkts;
418 	u64	tx_drop;
419 	u64	last_tx_pkts;
420 
421 	struct hw_xmit *hwxmits;
422 	u8 hwxmit_entry;
423 
424 	u8 wmm_para_seq[4];/* sequence for wmm ac parameter strength from large to small. it's value is 0->vo, 1->vi, 2->be, 3->bk. */
425 
426 #ifdef CONFIG_SDIO_TX_TASKLET
427 	struct tasklet_struct xmit_tasklet;
428 #else
429 	void *SdioXmitThread;
430 	_sema		SdioXmitSema;
431 	_sema		SdioXmitTerminateSema;
432 #endif /* CONFIG_SDIO_TX_TASKLET */
433 
434 	struct __queue free_xmitbuf_queue;
435 	struct __queue pending_xmitbuf_queue;
436 	u8 *pallocated_xmitbuf;
437 	u8 *pxmitbuf;
438 	uint free_xmitbuf_cnt;
439 
440 	struct __queue free_xmit_extbuf_queue;
441 	u8 *pallocated_xmit_extbuf;
442 	u8 *pxmit_extbuf;
443 	uint free_xmit_extbuf_cnt;
444 
445 	struct xmit_buf	pcmd_xmitbuf[CMDBUF_MAX];
446 
447 	u16 nqos_ssn;
448 
449 	int	ack_tx;
450 	_mutex ack_tx_mutex;
451 	struct submit_ctx ack_tx_ops;
452 	u8 seq_no;
453 	_lock lock_sctx;
454 };
455 
456 extern struct xmit_frame *__rtw_alloc_cmdxmitframe(struct xmit_priv *pxmitpriv,
457 		enum cmdbuf_type buf_type);
458 #define rtw_alloc_cmdxmitframe(p) __rtw_alloc_cmdxmitframe(p, CMDBUF_RSVD)
459 #define rtw_alloc_bcnxmitframe(p) __rtw_alloc_cmdxmitframe(p, CMDBUF_BEACON)
460 
461 extern struct xmit_buf *rtw_alloc_xmitbuf_ext(struct xmit_priv *pxmitpriv);
462 extern s32 rtw_free_xmitbuf_ext(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf);
463 
464 extern struct xmit_buf *rtw_alloc_xmitbuf(struct xmit_priv *pxmitpriv);
465 extern s32 rtw_free_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf);
466 
467 void rtw_count_tx_stats(struct adapter *padapter, struct xmit_frame *pxmitframe, int sz);
468 extern void rtw_update_protection(struct adapter *padapter, u8 *ie, uint ie_len);
469 extern s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattrib);
470 extern s32 rtw_put_snap(u8 *data, u16 h_proto);
471 
472 extern struct xmit_frame *rtw_alloc_xmitframe(struct xmit_priv *pxmitpriv);
473 struct xmit_frame *rtw_alloc_xmitframe_ext(struct xmit_priv *pxmitpriv);
474 struct xmit_frame *rtw_alloc_xmitframe_once(struct xmit_priv *pxmitpriv);
475 extern s32 rtw_free_xmitframe(struct xmit_priv *pxmitpriv, struct xmit_frame *pxmitframe);
476 extern void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pframequeue);
477 struct tx_servq *rtw_get_sta_pending(struct adapter *padapter, struct sta_info *psta, sint up, u8 *ac);
478 extern s32 rtw_xmitframe_enqueue(struct adapter *padapter, struct xmit_frame *pxmitframe);
479 
480 extern s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe);
481 extern u32 rtw_calculate_wlan_pkt_size_by_attribue(struct pkt_attrib *pattrib);
482 #define rtw_wlan_pkt_size(f) rtw_calculate_wlan_pkt_size_by_attribue(&f->attrib)
483 extern s32 rtw_xmitframe_coalesce(struct adapter *padapter, _pkt *pkt, struct xmit_frame *pxmitframe);
484 extern s32 rtw_mgmt_xmitframe_coalesce(struct adapter *padapter, _pkt *pkt, struct xmit_frame *pxmitframe);
485 s32 _rtw_init_hw_txqueue(struct hw_txqueue* phw_txqueue, u8 ac_tag);
486 void _rtw_init_sta_xmit_priv(struct sta_xmit_priv *psta_xmitpriv);
487 
488 
489 s32 rtw_txframes_pending(struct adapter *padapter);
490 void rtw_init_hwxmits(struct hw_xmit *phwxmit, sint entry);
491 
492 
493 s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter);
494 void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv);
495 
496 
497 void rtw_alloc_hwxmits(struct adapter *padapter);
498 void rtw_free_hwxmits(struct adapter *padapter);
499 
500 
501 s32 rtw_xmit(struct adapter *padapter, _pkt **pkt);
502 bool xmitframe_hiq_filter(struct xmit_frame *xmitframe);
503 
504 sint xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_frame *pxmitframe);
505 void stop_sta_xmit(struct adapter *padapter, struct sta_info *psta);
506 void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta);
507 void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *psta);
508 
509 u8 query_ra_short_GI(struct sta_info *psta);
510 
511 u8 qos_acm(u8 acm_mask, u8 priority);
512 
513 void enqueue_pending_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf);
514 void enqueue_pending_xmitbuf_to_head(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf);
515 struct xmit_buf*dequeue_pending_xmitbuf(struct xmit_priv *pxmitpriv);
516 struct xmit_buf*dequeue_pending_xmitbuf_under_survey(struct xmit_priv *pxmitpriv);
517 sint	check_pending_xmitbuf(struct xmit_priv *pxmitpriv);
518 int	rtw_xmit_thread(void *context);
519 
520 u32 rtw_get_ff_hwaddr(struct xmit_frame	*pxmitframe);
521 
522 int rtw_ack_tx_wait(struct xmit_priv *pxmitpriv, u32 timeout_ms);
523 void rtw_ack_tx_done(struct xmit_priv *pxmitpriv, int status);
524 
525 /* include after declaring struct xmit_buf, in order to avoid warning */
526 #include <xmit_osdep.h>
527 
528 #endif	/* _RTL871X_XMIT_H_ */
529