1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include "core.h"
19 #include "txrx.h"
20 #include "htt.h"
21 #include "mac.h"
22 #include "debug.h"
23 
24 static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb)
25 {
26 	if (!ATH10K_SKB_CB(skb)->htt.is_offchan)
27 		return;
28 
29 	/* If the original wait_for_completion() timed out before
30 	 * {data,mgmt}_tx_completed() was called then we could complete
31 	 * offchan_tx_completed for a different skb. Prevent this by using
32 	 * offchan_tx_skb. */
33 	spin_lock_bh(&ar->data_lock);
34 	if (ar->offchan_tx_skb != skb) {
35 		ath10k_warn("completed old offchannel frame\n");
36 		goto out;
37 	}
38 
39 	complete(&ar->offchan_tx_completed);
40 	ar->offchan_tx_skb = NULL; /* just for sanity */
41 
42 	ath10k_dbg(ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb);
43 out:
44 	spin_unlock_bh(&ar->data_lock);
45 }
46 
47 void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc)
48 {
49 	struct device *dev = htt->ar->dev;
50 	struct ieee80211_tx_info *info;
51 	struct sk_buff *txfrag = ATH10K_SKB_CB(txdesc)->htt.txfrag;
52 	struct sk_buff *msdu = ATH10K_SKB_CB(txdesc)->htt.msdu;
53 	int ret;
54 
55 	if (ATH10K_SKB_CB(txdesc)->htt.refcount == 0)
56 		return;
57 
58 	ATH10K_SKB_CB(txdesc)->htt.refcount--;
59 
60 	if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0)
61 		return;
62 
63 	if (txfrag) {
64 		ret = ath10k_skb_unmap(dev, txfrag);
65 		if (ret)
66 			ath10k_warn("txfrag unmap failed (%d)\n", ret);
67 
68 		dev_kfree_skb_any(txfrag);
69 	}
70 
71 	ret = ath10k_skb_unmap(dev, msdu);
72 	if (ret)
73 		ath10k_warn("data skb unmap failed (%d)\n", ret);
74 
75 	ath10k_report_offchan_tx(htt->ar, msdu);
76 
77 	info = IEEE80211_SKB_CB(msdu);
78 	memset(&info->status, 0, sizeof(info->status));
79 
80 	if (ATH10K_SKB_CB(txdesc)->htt.discard) {
81 		ieee80211_free_txskb(htt->ar->hw, msdu);
82 		goto exit;
83 	}
84 
85 	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
86 		info->flags |= IEEE80211_TX_STAT_ACK;
87 
88 	if (ATH10K_SKB_CB(txdesc)->htt.no_ack)
89 		info->flags &= ~IEEE80211_TX_STAT_ACK;
90 
91 	ieee80211_tx_status(htt->ar->hw, msdu);
92 	/* we do not own the msdu anymore */
93 
94 exit:
95 	spin_lock_bh(&htt->tx_lock);
96 	htt->pending_tx[ATH10K_SKB_CB(txdesc)->htt.msdu_id] = NULL;
97 	ath10k_htt_tx_free_msdu_id(htt, ATH10K_SKB_CB(txdesc)->htt.msdu_id);
98 	__ath10k_htt_tx_dec_pending(htt);
99 	if (bitmap_empty(htt->used_msdu_ids, htt->max_num_pending_tx))
100 		wake_up(&htt->empty_tx_wq);
101 	spin_unlock_bh(&htt->tx_lock);
102 
103 	dev_kfree_skb_any(txdesc);
104 }
105 
106 void ath10k_txrx_tx_completed(struct ath10k_htt *htt,
107 			      const struct htt_tx_done *tx_done)
108 {
109 	struct sk_buff *txdesc;
110 
111 	ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
112 		   tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
113 
114 	if (tx_done->msdu_id >= htt->max_num_pending_tx) {
115 		ath10k_warn("warning: msdu_id %d too big, ignoring\n",
116 			    tx_done->msdu_id);
117 		return;
118 	}
119 
120 	txdesc = htt->pending_tx[tx_done->msdu_id];
121 
122 	ATH10K_SKB_CB(txdesc)->htt.discard = tx_done->discard;
123 	ATH10K_SKB_CB(txdesc)->htt.no_ack = tx_done->no_ack;
124 
125 	ath10k_txrx_tx_unref(htt, txdesc);
126 }
127 
128 static const u8 rx_legacy_rate_idx[] = {
129 	3,	/* 0x00  - 11Mbps  */
130 	2,	/* 0x01  - 5.5Mbps */
131 	1,	/* 0x02  - 2Mbps   */
132 	0,	/* 0x03  - 1Mbps   */
133 	3,	/* 0x04  - 11Mbps  */
134 	2,	/* 0x05  - 5.5Mbps */
135 	1,	/* 0x06  - 2Mbps   */
136 	0,	/* 0x07  - 1Mbps   */
137 	10,	/* 0x08  - 48Mbps  */
138 	8,	/* 0x09  - 24Mbps  */
139 	6,	/* 0x0A  - 12Mbps  */
140 	4,	/* 0x0B  - 6Mbps   */
141 	11,	/* 0x0C  - 54Mbps  */
142 	9,	/* 0x0D  - 36Mbps  */
143 	7,	/* 0x0E  - 18Mbps  */
144 	5,	/* 0x0F  - 9Mbps   */
145 };
146 
147 static void process_rx_rates(struct ath10k *ar, struct htt_rx_info *info,
148 			     enum ieee80211_band band,
149 			     struct ieee80211_rx_status *status)
150 {
151 	u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
152 	u8 info0 = info->rate.info0;
153 	u32 info1 = info->rate.info1;
154 	u32 info2 = info->rate.info2;
155 	u8 preamble = 0;
156 
157 	/* Check if valid fields */
158 	if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID))
159 		return;
160 
161 	preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE);
162 
163 	switch (preamble) {
164 	case HTT_RX_LEGACY:
165 		cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK;
166 		rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE);
167 		rate_idx = 0;
168 
169 		if (rate < 0x08 || rate > 0x0F)
170 			break;
171 
172 		switch (band) {
173 		case IEEE80211_BAND_2GHZ:
174 			if (cck)
175 				rate &= ~BIT(3);
176 			rate_idx = rx_legacy_rate_idx[rate];
177 			break;
178 		case IEEE80211_BAND_5GHZ:
179 			rate_idx = rx_legacy_rate_idx[rate];
180 			/* We are using same rate table registering
181 			   HW - ath10k_rates[]. In case of 5GHz skip
182 			   CCK rates, so -4 here */
183 			rate_idx -= 4;
184 			break;
185 		default:
186 			break;
187 		}
188 
189 		status->rate_idx = rate_idx;
190 		break;
191 	case HTT_RX_HT:
192 	case HTT_RX_HT_WITH_TXBF:
193 		/* HT-SIG - Table 20-11 in info1 and info2 */
194 		mcs = info1 & 0x1F;
195 		nss = mcs >> 3;
196 		bw = (info1 >> 7) & 1;
197 		sgi = (info2 >> 7) & 1;
198 
199 		status->rate_idx = mcs;
200 		status->flag |= RX_FLAG_HT;
201 		if (sgi)
202 			status->flag |= RX_FLAG_SHORT_GI;
203 		if (bw)
204 			status->flag |= RX_FLAG_40MHZ;
205 		break;
206 	case HTT_RX_VHT:
207 	case HTT_RX_VHT_WITH_TXBF:
208 		/* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2
209 		   TODO check this */
210 		mcs = (info2 >> 4) & 0x0F;
211 		nss = (info1 >> 10) & 0x07;
212 		bw = info1 & 3;
213 		sgi = info2 & 1;
214 
215 		status->rate_idx = mcs;
216 		status->vht_nss = nss;
217 
218 		if (sgi)
219 			status->flag |= RX_FLAG_SHORT_GI;
220 
221 		switch (bw) {
222 		/* 20MHZ */
223 		case 0:
224 			break;
225 		/* 40MHZ */
226 		case 1:
227 			status->flag |= RX_FLAG_40MHZ;
228 			break;
229 		/* 80MHZ */
230 		case 2:
231 			status->flag |= RX_FLAG_80MHZ;
232 		}
233 
234 		status->flag |= RX_FLAG_VHT;
235 		break;
236 	default:
237 		break;
238 	}
239 }
240 
241 void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info)
242 {
243 	struct ieee80211_rx_status *status;
244 	struct ieee80211_channel *ch;
245 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)info->skb->data;
246 
247 	status = IEEE80211_SKB_RXCB(info->skb);
248 	memset(status, 0, sizeof(*status));
249 
250 	if (info->encrypt_type != HTT_RX_MPDU_ENCRYPT_NONE) {
251 		status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED |
252 				RX_FLAG_MMIC_STRIPPED;
253 		hdr->frame_control = __cpu_to_le16(
254 				__le16_to_cpu(hdr->frame_control) &
255 				~IEEE80211_FCTL_PROTECTED);
256 	}
257 
258 	if (info->status == HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR)
259 		status->flag |= RX_FLAG_MMIC_ERROR;
260 
261 	if (info->fcs_err)
262 		status->flag |= RX_FLAG_FAILED_FCS_CRC;
263 
264 	status->signal = info->signal;
265 
266 	spin_lock_bh(&ar->data_lock);
267 	ch = ar->scan_channel;
268 	if (!ch)
269 		ch = ar->rx_channel;
270 	spin_unlock_bh(&ar->data_lock);
271 
272 	if (!ch) {
273 		ath10k_warn("no channel configured; ignoring frame!\n");
274 		dev_kfree_skb_any(info->skb);
275 		return;
276 	}
277 
278 	process_rx_rates(ar, info, ch->band, status);
279 	status->band = ch->band;
280 	status->freq = ch->center_freq;
281 
282 	ath10k_dbg(ATH10K_DBG_DATA,
283 		   "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u\n",
284 		   info->skb,
285 		   info->skb->len,
286 		   status->flag == 0 ? "legacy" : "",
287 		   status->flag & RX_FLAG_HT ? "ht" : "",
288 		   status->flag & RX_FLAG_VHT ? "vht" : "",
289 		   status->flag & RX_FLAG_40MHZ ? "40" : "",
290 		   status->flag & RX_FLAG_80MHZ ? "80" : "",
291 		   status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
292 		   status->rate_idx,
293 		   status->vht_nss,
294 		   status->freq,
295 		   status->band);
296 
297 	ieee80211_rx(ar->hw, info->skb);
298 }
299 
300 struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
301 				     const u8 *addr)
302 {
303 	struct ath10k_peer *peer;
304 
305 	lockdep_assert_held(&ar->data_lock);
306 
307 	list_for_each_entry(peer, &ar->peers, list) {
308 		if (peer->vdev_id != vdev_id)
309 			continue;
310 		if (memcmp(peer->addr, addr, ETH_ALEN))
311 			continue;
312 
313 		return peer;
314 	}
315 
316 	return NULL;
317 }
318 
319 static struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar,
320 						  int peer_id)
321 {
322 	struct ath10k_peer *peer;
323 
324 	lockdep_assert_held(&ar->data_lock);
325 
326 	list_for_each_entry(peer, &ar->peers, list)
327 		if (test_bit(peer_id, peer->peer_ids))
328 			return peer;
329 
330 	return NULL;
331 }
332 
333 static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id,
334 				       const u8 *addr, bool expect_mapped)
335 {
336 	int ret;
337 
338 	ret = wait_event_timeout(ar->peer_mapping_wq, ({
339 			bool mapped;
340 
341 			spin_lock_bh(&ar->data_lock);
342 			mapped = !!ath10k_peer_find(ar, vdev_id, addr);
343 			spin_unlock_bh(&ar->data_lock);
344 
345 			mapped == expect_mapped;
346 		}), 3*HZ);
347 
348 	if (ret <= 0)
349 		return -ETIMEDOUT;
350 
351 	return 0;
352 }
353 
354 int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id, const u8 *addr)
355 {
356 	return ath10k_wait_for_peer_common(ar, vdev_id, addr, true);
357 }
358 
359 int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id, const u8 *addr)
360 {
361 	return ath10k_wait_for_peer_common(ar, vdev_id, addr, false);
362 }
363 
364 void ath10k_peer_map_event(struct ath10k_htt *htt,
365 			   struct htt_peer_map_event *ev)
366 {
367 	struct ath10k *ar = htt->ar;
368 	struct ath10k_peer *peer;
369 
370 	spin_lock_bh(&ar->data_lock);
371 	peer = ath10k_peer_find(ar, ev->vdev_id, ev->addr);
372 	if (!peer) {
373 		peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
374 		if (!peer)
375 			goto exit;
376 
377 		peer->vdev_id = ev->vdev_id;
378 		memcpy(peer->addr, ev->addr, ETH_ALEN);
379 		list_add(&peer->list, &ar->peers);
380 		wake_up(&ar->peer_mapping_wq);
381 	}
382 
383 	ath10k_dbg(ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n",
384 		   ev->vdev_id, ev->addr, ev->peer_id);
385 
386 	set_bit(ev->peer_id, peer->peer_ids);
387 exit:
388 	spin_unlock_bh(&ar->data_lock);
389 }
390 
391 void ath10k_peer_unmap_event(struct ath10k_htt *htt,
392 			     struct htt_peer_unmap_event *ev)
393 {
394 	struct ath10k *ar = htt->ar;
395 	struct ath10k_peer *peer;
396 
397 	spin_lock_bh(&ar->data_lock);
398 	peer = ath10k_peer_find_by_id(ar, ev->peer_id);
399 	if (!peer) {
400 		ath10k_warn("unknown peer id %d\n", ev->peer_id);
401 		goto exit;
402 	}
403 
404 	ath10k_dbg(ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
405 		   peer->vdev_id, peer->addr, ev->peer_id);
406 
407 	clear_bit(ev->peer_id, peer->peer_ids);
408 
409 	if (bitmap_empty(peer->peer_ids, ATH10K_MAX_NUM_PEER_IDS)) {
410 		list_del(&peer->list);
411 		kfree(peer);
412 		wake_up(&ar->peer_mapping_wq);
413 	}
414 
415 exit:
416 	spin_unlock_bh(&ar->data_lock);
417 }
418