1 /*
2  * Copyright (c) 2012 Qualcomm Atheros, Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/etherdevice.h>
18 #include <net/ieee80211_radiotap.h>
19 #include <linux/if_arp.h>
20 #include <linux/moduleparam.h>
21 #include <linux/ip.h>
22 #include <linux/ipv6.h>
23 #include <net/ipv6.h>
24 #include <linux/prefetch.h>
25 
26 #include "wil6210.h"
27 #include "wmi.h"
28 #include "txrx.h"
29 #include "trace.h"
30 
31 static bool rtap_include_phy_info;
32 module_param(rtap_include_phy_info, bool, S_IRUGO);
33 MODULE_PARM_DESC(rtap_include_phy_info,
34 		 " Include PHY info in the radiotap header, default - no");
35 
36 static inline int wil_vring_is_empty(struct vring *vring)
37 {
38 	return vring->swhead == vring->swtail;
39 }
40 
41 static inline u32 wil_vring_next_tail(struct vring *vring)
42 {
43 	return (vring->swtail + 1) % vring->size;
44 }
45 
46 static inline void wil_vring_advance_head(struct vring *vring, int n)
47 {
48 	vring->swhead = (vring->swhead + n) % vring->size;
49 }
50 
51 static inline int wil_vring_is_full(struct vring *vring)
52 {
53 	return wil_vring_next_tail(vring) == vring->swhead;
54 }
55 /*
56  * Available space in Tx Vring
57  */
58 static inline int wil_vring_avail_tx(struct vring *vring)
59 {
60 	u32 swhead = vring->swhead;
61 	u32 swtail = vring->swtail;
62 	int used = (vring->size + swhead - swtail) % vring->size;
63 
64 	return vring->size - used - 1;
65 }
66 
67 static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
68 {
69 	struct device *dev = wil_to_dev(wil);
70 	size_t sz = vring->size * sizeof(vring->va[0]);
71 	uint i;
72 
73 	BUILD_BUG_ON(sizeof(vring->va[0]) != 32);
74 
75 	vring->swhead = 0;
76 	vring->swtail = 0;
77 	vring->ctx = kcalloc(vring->size, sizeof(vring->ctx[0]), GFP_KERNEL);
78 	if (!vring->ctx) {
79 		vring->va = NULL;
80 		return -ENOMEM;
81 	}
82 	/*
83 	 * vring->va should be aligned on its size rounded up to power of 2
84 	 * This is granted by the dma_alloc_coherent
85 	 */
86 	vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
87 	if (!vring->va) {
88 		kfree(vring->ctx);
89 		vring->ctx = NULL;
90 		return -ENOMEM;
91 	}
92 	/* initially, all descriptors are SW owned
93 	 * For Tx and Rx, ownership bit is at the same location, thus
94 	 * we can use any
95 	 */
96 	for (i = 0; i < vring->size; i++) {
97 		volatile struct vring_tx_desc *_d = &(vring->va[i].tx);
98 		_d->dma.status = TX_DMA_STATUS_DU;
99 	}
100 
101 	wil_dbg_misc(wil, "vring[%d] 0x%p:0x%016llx 0x%p\n", vring->size,
102 		     vring->va, (unsigned long long)vring->pa, vring->ctx);
103 
104 	return 0;
105 }
106 
107 static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
108 			   int tx)
109 {
110 	struct device *dev = wil_to_dev(wil);
111 	size_t sz = vring->size * sizeof(vring->va[0]);
112 
113 	while (!wil_vring_is_empty(vring)) {
114 		dma_addr_t pa;
115 		u16 dmalen;
116 		struct wil_ctx *ctx;
117 
118 		if (tx) {
119 			struct vring_tx_desc dd, *d = &dd;
120 			volatile struct vring_tx_desc *_d =
121 					&vring->va[vring->swtail].tx;
122 
123 			ctx = &vring->ctx[vring->swtail];
124 			*d = *_d;
125 			pa = wil_desc_addr(&d->dma.addr);
126 			dmalen = le16_to_cpu(d->dma.length);
127 			if (vring->ctx[vring->swtail].mapped_as_page) {
128 				dma_unmap_page(dev, pa, dmalen,
129 					       DMA_TO_DEVICE);
130 			} else {
131 				dma_unmap_single(dev, pa, dmalen,
132 						 DMA_TO_DEVICE);
133 			}
134 			if (ctx->skb)
135 				dev_kfree_skb_any(ctx->skb);
136 			vring->swtail = wil_vring_next_tail(vring);
137 		} else { /* rx */
138 			struct vring_rx_desc dd, *d = &dd;
139 			volatile struct vring_rx_desc *_d =
140 					&vring->va[vring->swhead].rx;
141 
142 			ctx = &vring->ctx[vring->swhead];
143 			*d = *_d;
144 			pa = wil_desc_addr(&d->dma.addr);
145 			dmalen = le16_to_cpu(d->dma.length);
146 			dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
147 			kfree_skb(ctx->skb);
148 			wil_vring_advance_head(vring, 1);
149 		}
150 	}
151 	dma_free_coherent(dev, sz, (void *)vring->va, vring->pa);
152 	kfree(vring->ctx);
153 	vring->pa = 0;
154 	vring->va = NULL;
155 	vring->ctx = NULL;
156 }
157 
158 /**
159  * Allocate one skb for Rx VRING
160  *
161  * Safe to call from IRQ
162  */
163 static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
164 			       u32 i, int headroom)
165 {
166 	struct device *dev = wil_to_dev(wil);
167 	unsigned int sz = RX_BUF_LEN;
168 	struct vring_rx_desc dd, *d = &dd;
169 	volatile struct vring_rx_desc *_d = &(vring->va[i].rx);
170 	dma_addr_t pa;
171 
172 	/* TODO align */
173 	struct sk_buff *skb = dev_alloc_skb(sz + headroom);
174 	if (unlikely(!skb))
175 		return -ENOMEM;
176 
177 	skb_reserve(skb, headroom);
178 	skb_put(skb, sz);
179 
180 	pa = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
181 	if (unlikely(dma_mapping_error(dev, pa))) {
182 		kfree_skb(skb);
183 		return -ENOMEM;
184 	}
185 
186 	d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT;
187 	wil_desc_addr_set(&d->dma.addr, pa);
188 	/* ip_length don't care */
189 	/* b11 don't care */
190 	/* error don't care */
191 	d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
192 	d->dma.length = cpu_to_le16(sz);
193 	*_d = *d;
194 	vring->ctx[i].skb = skb;
195 
196 	return 0;
197 }
198 
199 /**
200  * Adds radiotap header
201  *
202  * Any error indicated as "Bad FCS"
203  *
204  * Vendor data for 04:ce:14-1 (Wilocity-1) consists of:
205  *  - Rx descriptor: 32 bytes
206  *  - Phy info
207  */
208 static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
209 				       struct sk_buff *skb)
210 {
211 	struct wireless_dev *wdev = wil->wdev;
212 	struct wil6210_rtap {
213 		struct ieee80211_radiotap_header rthdr;
214 		/* fields should be in the order of bits in rthdr.it_present */
215 		/* flags */
216 		u8 flags;
217 		/* channel */
218 		__le16 chnl_freq __aligned(2);
219 		__le16 chnl_flags;
220 		/* MCS */
221 		u8 mcs_present;
222 		u8 mcs_flags;
223 		u8 mcs_index;
224 	} __packed;
225 	struct wil6210_rtap_vendor {
226 		struct wil6210_rtap rtap;
227 		/* vendor */
228 		u8 vendor_oui[3] __aligned(2);
229 		u8 vendor_ns;
230 		__le16 vendor_skip;
231 		u8 vendor_data[0];
232 	} __packed;
233 	struct vring_rx_desc *d = wil_skb_rxdesc(skb);
234 	struct wil6210_rtap_vendor *rtap_vendor;
235 	int rtap_len = sizeof(struct wil6210_rtap);
236 	int phy_length = 0; /* phy info header size, bytes */
237 	static char phy_data[128];
238 	struct ieee80211_channel *ch = wdev->preset_chandef.chan;
239 
240 	if (rtap_include_phy_info) {
241 		rtap_len = sizeof(*rtap_vendor) + sizeof(*d);
242 		/* calculate additional length */
243 		if (d->dma.status & RX_DMA_STATUS_PHY_INFO) {
244 			/**
245 			 * PHY info starts from 8-byte boundary
246 			 * there are 8-byte lines, last line may be partially
247 			 * written (HW bug), thus FW configures for last line
248 			 * to be excessive. Driver skips this last line.
249 			 */
250 			int len = min_t(int, 8 + sizeof(phy_data),
251 					wil_rxdesc_phy_length(d));
252 			if (len > 8) {
253 				void *p = skb_tail_pointer(skb);
254 				void *pa = PTR_ALIGN(p, 8);
255 				if (skb_tailroom(skb) >= len + (pa - p)) {
256 					phy_length = len - 8;
257 					memcpy(phy_data, pa, phy_length);
258 				}
259 			}
260 		}
261 		rtap_len += phy_length;
262 	}
263 
264 	if (skb_headroom(skb) < rtap_len &&
265 	    pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) {
266 		wil_err(wil, "Unable to expand headrom to %d\n", rtap_len);
267 		return;
268 	}
269 
270 	rtap_vendor = (void *)skb_push(skb, rtap_len);
271 	memset(rtap_vendor, 0, rtap_len);
272 
273 	rtap_vendor->rtap.rthdr.it_version = PKTHDR_RADIOTAP_VERSION;
274 	rtap_vendor->rtap.rthdr.it_len = cpu_to_le16(rtap_len);
275 	rtap_vendor->rtap.rthdr.it_present = cpu_to_le32(
276 			(1 << IEEE80211_RADIOTAP_FLAGS) |
277 			(1 << IEEE80211_RADIOTAP_CHANNEL) |
278 			(1 << IEEE80211_RADIOTAP_MCS));
279 	if (d->dma.status & RX_DMA_STATUS_ERROR)
280 		rtap_vendor->rtap.flags |= IEEE80211_RADIOTAP_F_BADFCS;
281 
282 	rtap_vendor->rtap.chnl_freq = cpu_to_le16(ch ? ch->center_freq : 58320);
283 	rtap_vendor->rtap.chnl_flags = cpu_to_le16(0);
284 
285 	rtap_vendor->rtap.mcs_present = IEEE80211_RADIOTAP_MCS_HAVE_MCS;
286 	rtap_vendor->rtap.mcs_flags = 0;
287 	rtap_vendor->rtap.mcs_index = wil_rxdesc_mcs(d);
288 
289 	if (rtap_include_phy_info) {
290 		rtap_vendor->rtap.rthdr.it_present |= cpu_to_le32(1 <<
291 				IEEE80211_RADIOTAP_VENDOR_NAMESPACE);
292 		/* OUI for Wilocity 04:ce:14 */
293 		rtap_vendor->vendor_oui[0] = 0x04;
294 		rtap_vendor->vendor_oui[1] = 0xce;
295 		rtap_vendor->vendor_oui[2] = 0x14;
296 		rtap_vendor->vendor_ns = 1;
297 		/* Rx descriptor + PHY data  */
298 		rtap_vendor->vendor_skip = cpu_to_le16(sizeof(*d) +
299 						       phy_length);
300 		memcpy(rtap_vendor->vendor_data, (void *)d, sizeof(*d));
301 		memcpy(rtap_vendor->vendor_data + sizeof(*d), phy_data,
302 		       phy_length);
303 	}
304 }
305 
306 /*
307  * Fast swap in place between 2 registers
308  */
309 static void wil_swap_u16(u16 *a, u16 *b)
310 {
311 	*a ^= *b;
312 	*b ^= *a;
313 	*a ^= *b;
314 }
315 
316 static void wil_swap_ethaddr(void *data)
317 {
318 	struct ethhdr *eth = data;
319 	u16 *s = (u16 *)eth->h_source;
320 	u16 *d = (u16 *)eth->h_dest;
321 
322 	wil_swap_u16(s++, d++);
323 	wil_swap_u16(s++, d++);
324 	wil_swap_u16(s, d);
325 }
326 
327 /**
328  * reap 1 frame from @swhead
329  *
330  * Rx descriptor copied to skb->cb
331  *
332  * Safe to call from IRQ
333  */
334 static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
335 					 struct vring *vring)
336 {
337 	struct device *dev = wil_to_dev(wil);
338 	struct net_device *ndev = wil_to_ndev(wil);
339 	volatile struct vring_rx_desc *_d;
340 	struct vring_rx_desc *d;
341 	struct sk_buff *skb;
342 	dma_addr_t pa;
343 	unsigned int sz = RX_BUF_LEN;
344 	u16 dmalen;
345 	u8 ftype;
346 	u8 ds_bits;
347 
348 	BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb));
349 
350 	if (wil_vring_is_empty(vring))
351 		return NULL;
352 
353 	_d = &(vring->va[vring->swhead].rx);
354 	if (!(_d->dma.status & RX_DMA_STATUS_DU)) {
355 		/* it is not error, we just reached end of Rx done area */
356 		return NULL;
357 	}
358 
359 	skb = vring->ctx[vring->swhead].skb;
360 	d = wil_skb_rxdesc(skb);
361 	*d = *_d;
362 	pa = wil_desc_addr(&d->dma.addr);
363 	vring->ctx[vring->swhead].skb = NULL;
364 	wil_vring_advance_head(vring, 1);
365 
366 	dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
367 	dmalen = le16_to_cpu(d->dma.length);
368 
369 	trace_wil6210_rx(vring->swhead, d);
370 	wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", vring->swhead, dmalen);
371 	wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4,
372 			  (const void *)d, sizeof(*d), false);
373 
374 	if (dmalen > sz) {
375 		wil_err(wil, "Rx size too large: %d bytes!\n", dmalen);
376 		kfree_skb(skb);
377 		return NULL;
378 	}
379 	skb_trim(skb, dmalen);
380 
381 	prefetch(skb->data);
382 
383 	wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1,
384 			  skb->data, skb_headlen(skb), false);
385 
386 
387 	wil->stats.last_mcs_rx = wil_rxdesc_mcs(d);
388 
389 	/* use radiotap header only if required */
390 	if (ndev->type == ARPHRD_IEEE80211_RADIOTAP)
391 		wil_rx_add_radiotap_header(wil, skb);
392 
393 	/* no extra checks if in sniffer mode */
394 	if (ndev->type != ARPHRD_ETHER)
395 		return skb;
396 	/*
397 	 * Non-data frames may be delivered through Rx DMA channel (ex: BAR)
398 	 * Driver should recognize it by frame type, that is found
399 	 * in Rx descriptor. If type is not data, it is 802.11 frame as is
400 	 */
401 	ftype = wil_rxdesc_ftype(d) << 2;
402 	if (ftype != IEEE80211_FTYPE_DATA) {
403 		wil_dbg_txrx(wil, "Non-data frame ftype 0x%08x\n", ftype);
404 		/* TODO: process it */
405 		kfree_skb(skb);
406 		return NULL;
407 	}
408 
409 	if (skb->len < ETH_HLEN) {
410 		wil_err(wil, "Short frame, len = %d\n", skb->len);
411 		/* TODO: process it (i.e. BAR) */
412 		kfree_skb(skb);
413 		return NULL;
414 	}
415 
416 	/* L4 IDENT is on when HW calculated checksum, check status
417 	 * and in case of error drop the packet
418 	 * higher stack layers will handle retransmission (if required)
419 	 */
420 	if (d->dma.status & RX_DMA_STATUS_L4_IDENT) {
421 		/* L4 protocol identified, csum calculated */
422 		if ((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0)
423 			skb->ip_summed = CHECKSUM_UNNECESSARY;
424 		/* If HW reports bad checksum, let IP stack re-check it
425 		 * For example, HW don't understand Microsoft IP stack that
426 		 * mis-calculates TCP checksum - if it should be 0x0,
427 		 * it writes 0xffff in violation of RFC 1624
428 		 */
429 	}
430 
431 	ds_bits = wil_rxdesc_ds_bits(d);
432 	if (ds_bits == 1) {
433 		/*
434 		 * HW bug - in ToDS mode, i.e. Rx on AP side,
435 		 * addresses get swapped
436 		 */
437 		wil_swap_ethaddr(skb->data);
438 	}
439 
440 	return skb;
441 }
442 
443 /**
444  * allocate and fill up to @count buffers in rx ring
445  * buffers posted at @swtail
446  */
447 static int wil_rx_refill(struct wil6210_priv *wil, int count)
448 {
449 	struct net_device *ndev = wil_to_ndev(wil);
450 	struct vring *v = &wil->vring_rx;
451 	u32 next_tail;
452 	int rc = 0;
453 	int headroom = ndev->type == ARPHRD_IEEE80211_RADIOTAP ?
454 			WIL6210_RTAP_SIZE : 0;
455 
456 	for (; next_tail = wil_vring_next_tail(v),
457 			(next_tail != v->swhead) && (count-- > 0);
458 			v->swtail = next_tail) {
459 		rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom);
460 		if (rc) {
461 			wil_err(wil, "Error %d in wil_rx_refill[%d]\n",
462 				rc, v->swtail);
463 			break;
464 		}
465 	}
466 	iowrite32(v->swtail, wil->csr + HOSTADDR(v->hwtail));
467 
468 	return rc;
469 }
470 
471 /*
472  * Pass Rx packet to the netif. Update statistics.
473  * Called in softirq context (NAPI poll).
474  */
475 static void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
476 {
477 	int rc;
478 	unsigned int len = skb->len;
479 
480 	skb_orphan(skb);
481 
482 	rc = netif_receive_skb(skb);
483 
484 	if (likely(rc == NET_RX_SUCCESS)) {
485 		ndev->stats.rx_packets++;
486 		ndev->stats.rx_bytes += len;
487 
488 	} else {
489 		ndev->stats.rx_dropped++;
490 	}
491 }
492 
493 /**
494  * Proceed all completed skb's from Rx VRING
495  *
496  * Safe to call from NAPI poll, i.e. softirq with interrupts enabled
497  */
498 void wil_rx_handle(struct wil6210_priv *wil, int *quota)
499 {
500 	struct net_device *ndev = wil_to_ndev(wil);
501 	struct vring *v = &wil->vring_rx;
502 	struct sk_buff *skb;
503 
504 	if (!v->va) {
505 		wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
506 		return;
507 	}
508 	wil_dbg_txrx(wil, "%s()\n", __func__);
509 	while ((*quota > 0) && (NULL != (skb = wil_vring_reap_rx(wil, v)))) {
510 		(*quota)--;
511 
512 		if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
513 			skb->dev = ndev;
514 			skb_reset_mac_header(skb);
515 			skb->ip_summed = CHECKSUM_UNNECESSARY;
516 			skb->pkt_type = PACKET_OTHERHOST;
517 			skb->protocol = htons(ETH_P_802_2);
518 
519 		} else {
520 			skb->protocol = eth_type_trans(skb, ndev);
521 		}
522 
523 		wil_netif_rx_any(skb, ndev);
524 	}
525 	wil_rx_refill(wil, v->size);
526 }
527 
528 int wil_rx_init(struct wil6210_priv *wil)
529 {
530 	struct vring *vring = &wil->vring_rx;
531 	int rc;
532 
533 	vring->size = WIL6210_RX_RING_SIZE;
534 	rc = wil_vring_alloc(wil, vring);
535 	if (rc)
536 		return rc;
537 
538 	rc = wmi_rx_chain_add(wil, vring);
539 	if (rc)
540 		goto err_free;
541 
542 	rc = wil_rx_refill(wil, vring->size);
543 	if (rc)
544 		goto err_free;
545 
546 	return 0;
547  err_free:
548 	wil_vring_free(wil, vring, 0);
549 
550 	return rc;
551 }
552 
553 void wil_rx_fini(struct wil6210_priv *wil)
554 {
555 	struct vring *vring = &wil->vring_rx;
556 
557 	if (vring->va)
558 		wil_vring_free(wil, vring, 0);
559 }
560 
561 int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
562 		      int cid, int tid)
563 {
564 	int rc;
565 	struct wmi_vring_cfg_cmd cmd = {
566 		.action = cpu_to_le32(WMI_VRING_CMD_ADD),
567 		.vring_cfg = {
568 			.tx_sw_ring = {
569 				.max_mpdu_size = cpu_to_le16(TX_BUF_LEN),
570 				.ring_size = cpu_to_le16(size),
571 			},
572 			.ringid = id,
573 			.cidxtid = (cid & 0xf) | ((tid & 0xf) << 4),
574 			.encap_trans_type = WMI_VRING_ENC_TYPE_802_3,
575 			.mac_ctrl = 0,
576 			.to_resolution = 0,
577 			.agg_max_wsize = 16,
578 			.schd_params = {
579 				.priority = cpu_to_le16(0),
580 				.timeslot_us = cpu_to_le16(0xfff),
581 			},
582 		},
583 	};
584 	struct {
585 		struct wil6210_mbox_hdr_wmi wmi;
586 		struct wmi_vring_cfg_done_event cmd;
587 	} __packed reply;
588 	struct vring *vring = &wil->vring_tx[id];
589 
590 	if (vring->va) {
591 		wil_err(wil, "Tx ring [%d] already allocated\n", id);
592 		rc = -EINVAL;
593 		goto out;
594 	}
595 
596 	vring->size = size;
597 	rc = wil_vring_alloc(wil, vring);
598 	if (rc)
599 		goto out;
600 
601 	cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
602 
603 	rc = wmi_call(wil, WMI_VRING_CFG_CMDID, &cmd, sizeof(cmd),
604 		      WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
605 	if (rc)
606 		goto out_free;
607 
608 	if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
609 		wil_err(wil, "Tx config failed, status 0x%02x\n",
610 			reply.cmd.status);
611 		rc = -EINVAL;
612 		goto out_free;
613 	}
614 	vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
615 
616 	return 0;
617  out_free:
618 	wil_vring_free(wil, vring, 1);
619  out:
620 
621 	return rc;
622 }
623 
624 void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
625 {
626 	struct vring *vring = &wil->vring_tx[id];
627 
628 	if (!vring->va)
629 		return;
630 
631 	wil_vring_free(wil, vring, 1);
632 }
633 
634 static struct vring *wil_find_tx_vring(struct wil6210_priv *wil,
635 				       struct sk_buff *skb)
636 {
637 	struct vring *v = &wil->vring_tx[0];
638 
639 	if (v->va)
640 		return v;
641 
642 	return NULL;
643 }
644 
645 static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
646 			   int vring_index)
647 {
648 	wil_desc_addr_set(&d->dma.addr, pa);
649 	d->dma.ip_length = 0;
650 	/* 0..6: mac_length; 7:ip_version 0-IP6 1-IP4*/
651 	d->dma.b11 = 0/*14 | BIT(7)*/;
652 	d->dma.error = 0;
653 	d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
654 	d->dma.length = cpu_to_le16((u16)len);
655 	d->dma.d0 = (vring_index << DMA_CFG_DESC_TX_0_QID_POS);
656 	d->mac.d[0] = 0;
657 	d->mac.d[1] = 0;
658 	d->mac.d[2] = 0;
659 	d->mac.ucode_cmd = 0;
660 	/* use dst index 0 */
661 	d->mac.d[1] |= BIT(MAC_CFG_DESC_TX_1_DST_INDEX_EN_POS) |
662 		       (0 << MAC_CFG_DESC_TX_1_DST_INDEX_POS);
663 	/* translation type:  0 - bypass; 1 - 802.3; 2 - native wifi */
664 	d->mac.d[2] = BIT(MAC_CFG_DESC_TX_2_SNAP_HDR_INSERTION_EN_POS) |
665 		      (1 << MAC_CFG_DESC_TX_2_L2_TRANSLATION_TYPE_POS);
666 
667 	return 0;
668 }
669 
670 static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil,
671 				struct vring_tx_desc *d,
672 				struct sk_buff *skb)
673 {
674 	int protocol;
675 
676 	if (skb->ip_summed != CHECKSUM_PARTIAL)
677 		return 0;
678 
679 	d->dma.b11 = ETH_HLEN; /* MAC header length */
680 
681 	switch (skb->protocol) {
682 	case cpu_to_be16(ETH_P_IP):
683 		protocol = ip_hdr(skb)->protocol;
684 		d->dma.b11 |= BIT(DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS);
685 		break;
686 	case cpu_to_be16(ETH_P_IPV6):
687 		protocol = ipv6_hdr(skb)->nexthdr;
688 		break;
689 	default:
690 		return -EINVAL;
691 	}
692 
693 	switch (protocol) {
694 	case IPPROTO_TCP:
695 		d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
696 		/* L4 header len: TCP header length */
697 		d->dma.d0 |=
698 		(tcp_hdrlen(skb) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
699 		break;
700 	case IPPROTO_UDP:
701 		/* L4 header len: UDP header length */
702 		d->dma.d0 |=
703 		(sizeof(struct udphdr) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
704 		break;
705 	default:
706 		return -EINVAL;
707 	}
708 
709 	d->dma.ip_length = skb_network_header_len(skb);
710 	/* Enable TCP/UDP checksum */
711 	d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
712 	/* Calculate pseudo-header */
713 	d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
714 
715 	return 0;
716 }
717 
718 static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
719 			struct sk_buff *skb)
720 {
721 	struct device *dev = wil_to_dev(wil);
722 	struct vring_tx_desc dd, *d = &dd;
723 	volatile struct vring_tx_desc *_d;
724 	u32 swhead = vring->swhead;
725 	int avail = wil_vring_avail_tx(vring);
726 	int nr_frags = skb_shinfo(skb)->nr_frags;
727 	uint f = 0;
728 	int vring_index = vring - wil->vring_tx;
729 	uint i = swhead;
730 	dma_addr_t pa;
731 
732 	wil_dbg_txrx(wil, "%s()\n", __func__);
733 
734 	if (avail < vring->size/8)
735 		netif_tx_stop_all_queues(wil_to_ndev(wil));
736 	if (avail < 1 + nr_frags) {
737 		wil_err(wil, "Tx ring full. No space for %d fragments\n",
738 			1 + nr_frags);
739 		return -ENOMEM;
740 	}
741 	_d = &(vring->va[i].tx);
742 
743 	/* FIXME FW can accept only unicast frames for the peer */
744 	memcpy(skb->data, wil->dst_addr[vring_index], ETH_ALEN);
745 
746 	pa = dma_map_single(dev, skb->data,
747 			skb_headlen(skb), DMA_TO_DEVICE);
748 
749 	wil_dbg_txrx(wil, "Tx skb %d bytes %p -> %#08llx\n", skb_headlen(skb),
750 		     skb->data, (unsigned long long)pa);
751 	wil_hex_dump_txrx("Tx ", DUMP_PREFIX_OFFSET, 16, 1,
752 			  skb->data, skb_headlen(skb), false);
753 
754 	if (unlikely(dma_mapping_error(dev, pa)))
755 		return -EINVAL;
756 	/* 1-st segment */
757 	wil_tx_desc_map(d, pa, skb_headlen(skb), vring_index);
758 	/* Process TCP/UDP checksum offloading */
759 	if (wil_tx_desc_offload_cksum_set(wil, d, skb)) {
760 		wil_err(wil, "VRING #%d Failed to set cksum, drop packet\n",
761 			vring_index);
762 		goto dma_error;
763 	}
764 
765 	d->mac.d[2] |= ((nr_frags + 1) <<
766 		       MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
767 	if (nr_frags)
768 		*_d = *d;
769 
770 	/* middle segments */
771 	for (; f < nr_frags; f++) {
772 		const struct skb_frag_struct *frag =
773 				&skb_shinfo(skb)->frags[f];
774 		int len = skb_frag_size(frag);
775 		i = (swhead + f + 1) % vring->size;
776 		_d = &(vring->va[i].tx);
777 		pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
778 				DMA_TO_DEVICE);
779 		if (unlikely(dma_mapping_error(dev, pa)))
780 			goto dma_error;
781 		wil_tx_desc_map(d, pa, len, vring_index);
782 		vring->ctx[i].mapped_as_page = 1;
783 		*_d = *d;
784 	}
785 	/* for the last seg only */
786 	d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS);
787 	d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS);
788 	d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
789 	*_d = *d;
790 
791 	/* hold reference to skb
792 	 * to prevent skb release before accounting
793 	 * in case of immediate "tx done"
794 	 */
795 	vring->ctx[i].skb = skb_get(skb);
796 
797 	wil_hex_dump_txrx("Tx ", DUMP_PREFIX_NONE, 32, 4,
798 			  (const void *)d, sizeof(*d), false);
799 
800 	/* advance swhead */
801 	wil_vring_advance_head(vring, nr_frags + 1);
802 	wil_dbg_txrx(wil, "Tx swhead %d -> %d\n", swhead, vring->swhead);
803 	trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags);
804 	iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail));
805 
806 	return 0;
807  dma_error:
808 	/* unmap what we have mapped */
809 	nr_frags = f + 1; /* frags mapped + one for skb head */
810 	for (f = 0; f < nr_frags; f++) {
811 		u16 dmalen;
812 		struct wil_ctx *ctx;
813 
814 		i = (swhead + f) % vring->size;
815 		ctx = &vring->ctx[i];
816 		_d = &(vring->va[i].tx);
817 		*d = *_d;
818 		_d->dma.status = TX_DMA_STATUS_DU;
819 		pa = wil_desc_addr(&d->dma.addr);
820 		dmalen = le16_to_cpu(d->dma.length);
821 		if (ctx->mapped_as_page)
822 			dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
823 		else
824 			dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
825 
826 		if (ctx->skb)
827 			dev_kfree_skb_any(ctx->skb);
828 
829 		memset(ctx, 0, sizeof(*ctx));
830 	}
831 
832 	return -EINVAL;
833 }
834 
835 
836 netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
837 {
838 	struct wil6210_priv *wil = ndev_to_wil(ndev);
839 	struct vring *vring;
840 	int rc;
841 
842 	wil_dbg_txrx(wil, "%s()\n", __func__);
843 	if (!test_bit(wil_status_fwready, &wil->status)) {
844 		wil_err(wil, "FW not ready\n");
845 		goto drop;
846 	}
847 	if (!test_bit(wil_status_fwconnected, &wil->status)) {
848 		wil_err(wil, "FW not connected\n");
849 		goto drop;
850 	}
851 	if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
852 		wil_err(wil, "Xmit in monitor mode not supported\n");
853 		goto drop;
854 	}
855 
856 	/* find vring */
857 	vring = wil_find_tx_vring(wil, skb);
858 	if (!vring) {
859 		wil_err(wil, "No Tx VRING available\n");
860 		goto drop;
861 	}
862 	/* set up vring entry */
863 	rc = wil_tx_vring(wil, vring, skb);
864 
865 	switch (rc) {
866 	case 0:
867 		/* statistics will be updated on the tx_complete */
868 		dev_kfree_skb_any(skb);
869 		return NETDEV_TX_OK;
870 	case -ENOMEM:
871 		return NETDEV_TX_BUSY;
872 	default:
873 		break; /* goto drop; */
874 	}
875  drop:
876 	ndev->stats.tx_dropped++;
877 	dev_kfree_skb_any(skb);
878 
879 	return NET_XMIT_DROP;
880 }
881 
882 /**
883  * Clean up transmitted skb's from the Tx VRING
884  *
885  * Return number of descriptors cleared
886  *
887  * Safe to call from IRQ
888  */
889 int wil_tx_complete(struct wil6210_priv *wil, int ringid)
890 {
891 	struct net_device *ndev = wil_to_ndev(wil);
892 	struct device *dev = wil_to_dev(wil);
893 	struct vring *vring = &wil->vring_tx[ringid];
894 	int done = 0;
895 
896 	if (!vring->va) {
897 		wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid);
898 		return 0;
899 	}
900 
901 	wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid);
902 
903 	while (!wil_vring_is_empty(vring)) {
904 		volatile struct vring_tx_desc *_d =
905 					      &vring->va[vring->swtail].tx;
906 		struct vring_tx_desc dd, *d = &dd;
907 		dma_addr_t pa;
908 		u16 dmalen;
909 		struct wil_ctx *ctx = &vring->ctx[vring->swtail];
910 		struct sk_buff *skb = ctx->skb;
911 
912 		*d = *_d;
913 
914 		if (!(d->dma.status & TX_DMA_STATUS_DU))
915 			break;
916 
917 		dmalen = le16_to_cpu(d->dma.length);
918 		trace_wil6210_tx_done(ringid, vring->swtail, dmalen,
919 				      d->dma.error);
920 		wil_dbg_txrx(wil,
921 			     "Tx[%3d] : %d bytes, status 0x%02x err 0x%02x\n",
922 			     vring->swtail, dmalen, d->dma.status,
923 			     d->dma.error);
924 		wil_hex_dump_txrx("TxC ", DUMP_PREFIX_NONE, 32, 4,
925 				  (const void *)d, sizeof(*d), false);
926 
927 		pa = wil_desc_addr(&d->dma.addr);
928 		if (ctx->mapped_as_page)
929 			dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
930 		else
931 			dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
932 
933 		if (skb) {
934 			if (d->dma.error == 0) {
935 				ndev->stats.tx_packets++;
936 				ndev->stats.tx_bytes += skb->len;
937 			} else {
938 				ndev->stats.tx_errors++;
939 			}
940 
941 			dev_kfree_skb_any(skb);
942 		}
943 		memset(ctx, 0, sizeof(*ctx));
944 		/*
945 		 * There is no need to touch HW descriptor:
946 		 * - ststus bit TX_DMA_STATUS_DU is set by design,
947 		 *   so hardware will not try to process this desc.,
948 		 * - rest of descriptor will be initialized on Tx.
949 		 */
950 		vring->swtail = wil_vring_next_tail(vring);
951 		done++;
952 	}
953 	if (wil_vring_avail_tx(vring) > vring->size/4)
954 		netif_tx_wake_all_queues(wil_to_ndev(wil));
955 
956 	return done;
957 }
958