1 /*
2  * Copyright (C) 2018 Felix Fietkau <nbd@nbd.name>
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 #include "mt76.h"
17 
18 #define REORDER_TIMEOUT (HZ / 10)
19 
20 static void
21 mt76_aggr_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames, int idx)
22 {
23 	struct sk_buff *skb;
24 
25 	tid->head = ieee80211_sn_inc(tid->head);
26 
27 	skb = tid->reorder_buf[idx];
28 	if (!skb)
29 		return;
30 
31 	tid->reorder_buf[idx] = NULL;
32 	tid->nframes--;
33 	__skb_queue_tail(frames, skb);
34 }
35 
36 static void
37 mt76_rx_aggr_release_frames(struct mt76_rx_tid *tid, struct sk_buff_head *frames,
38 			 u16 head)
39 {
40 	int idx;
41 
42 	while (ieee80211_sn_less(tid->head, head)) {
43 		idx = tid->head % tid->size;
44 		mt76_aggr_release(tid, frames, idx);
45 	}
46 }
47 
48 static void
49 mt76_rx_aggr_release_head(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
50 {
51 	int idx = tid->head % tid->size;
52 
53 	while (tid->reorder_buf[idx]) {
54 		mt76_aggr_release(tid, frames, idx);
55 		idx = tid->head % tid->size;
56 	}
57 }
58 
59 static void
60 mt76_rx_aggr_check_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
61 {
62 	struct mt76_rx_status *status;
63 	struct sk_buff *skb;
64 	int start, idx, nframes;
65 
66 	if (!tid->nframes)
67 		return;
68 
69 	mt76_rx_aggr_release_head(tid, frames);
70 
71 	start = tid->head % tid->size;
72 	nframes = tid->nframes;
73 
74 	for (idx = (tid->head + 1) % tid->size;
75 	     idx != start && nframes;
76 	     idx = (idx + 1) % tid->size) {
77 
78 		skb = tid->reorder_buf[idx];
79 		if (!skb)
80 			continue;
81 
82 		nframes--;
83 		status = (struct mt76_rx_status *) skb->cb;
84 		if (!time_after(jiffies, status->reorder_time +
85 					 REORDER_TIMEOUT))
86 			continue;
87 
88 		mt76_rx_aggr_release_frames(tid, frames, status->seqno);
89 	}
90 
91 	mt76_rx_aggr_release_head(tid, frames);
92 }
93 
94 static void
95 mt76_rx_aggr_reorder_work(struct work_struct *work)
96 {
97 	struct mt76_rx_tid *tid = container_of(work, struct mt76_rx_tid,
98 					       reorder_work.work);
99 	struct mt76_dev *dev = tid->dev;
100 	struct sk_buff_head frames;
101 	int nframes;
102 
103 	__skb_queue_head_init(&frames);
104 
105 	local_bh_disable();
106 	rcu_read_lock();
107 
108 	spin_lock(&tid->lock);
109 	mt76_rx_aggr_check_release(tid, &frames);
110 	nframes = tid->nframes;
111 	spin_unlock(&tid->lock);
112 
113 	if (nframes)
114 		ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work,
115 					     REORDER_TIMEOUT);
116 	mt76_rx_complete(dev, &frames, NULL);
117 
118 	rcu_read_unlock();
119 	local_bh_enable();
120 }
121 
122 static void
123 mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames)
124 {
125 	struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
126 	struct ieee80211_bar *bar = (struct ieee80211_bar *) skb->data;
127 	struct mt76_wcid *wcid = status->wcid;
128 	struct mt76_rx_tid *tid;
129 	u16 seqno;
130 
131 	if (!ieee80211_is_ctl(bar->frame_control))
132 		return;
133 
134 	if (!ieee80211_is_back_req(bar->frame_control))
135 		return;
136 
137 	status->tid = le16_to_cpu(bar->control) >> 12;
138 	seqno = le16_to_cpu(bar->start_seq_num) >> 4;
139 	tid = rcu_dereference(wcid->aggr[status->tid]);
140 	if (!tid)
141 		return;
142 
143 	spin_lock_bh(&tid->lock);
144 	mt76_rx_aggr_release_frames(tid, frames, seqno);
145 	mt76_rx_aggr_release_head(tid, frames);
146 	spin_unlock_bh(&tid->lock);
147 }
148 
149 void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
150 {
151 	struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
152 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
153 	struct mt76_wcid *wcid = status->wcid;
154 	struct ieee80211_sta *sta;
155 	struct mt76_rx_tid *tid;
156 	bool sn_less;
157 	u16 seqno, head, size;
158 	u8 ackp, idx;
159 
160 	__skb_queue_tail(frames, skb);
161 
162 	sta = wcid_to_sta(wcid);
163 	if (!sta)
164 		return;
165 
166 	if (!status->aggr) {
167 		mt76_rx_aggr_check_ctl(skb, frames);
168 		return;
169 	}
170 
171 	/* not part of a BA session */
172 	ackp = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_ACK_POLICY_MASK;
173 	if (ackp != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
174 	    ackp != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
175 		return;
176 
177 	tid = rcu_dereference(wcid->aggr[status->tid]);
178 	if (!tid)
179 		return;
180 
181 	status->flag |= RX_FLAG_DUP_VALIDATED;
182 	spin_lock_bh(&tid->lock);
183 
184 	if (tid->stopped)
185 		goto out;
186 
187 	head = tid->head;
188 	seqno = status->seqno;
189 	size = tid->size;
190 	sn_less = ieee80211_sn_less(seqno, head);
191 
192 	if (!tid->started) {
193 		if (sn_less)
194 			goto out;
195 
196 		tid->started = true;
197 	}
198 
199 	if (sn_less) {
200 		__skb_unlink(skb, frames);
201 		dev_kfree_skb(skb);
202 		goto out;
203 	}
204 
205 	if (seqno == head) {
206 		tid->head = ieee80211_sn_inc(head);
207 		if (tid->nframes)
208 			mt76_rx_aggr_release_head(tid, frames);
209 		goto out;
210 	}
211 
212 	__skb_unlink(skb, frames);
213 
214 	/*
215 	 * Frame sequence number exceeds buffering window, free up some space
216 	 * by releasing previous frames
217 	 */
218 	if (!ieee80211_sn_less(seqno, head + size)) {
219 		head = ieee80211_sn_inc(ieee80211_sn_sub(seqno, size));
220 		mt76_rx_aggr_release_frames(tid, frames, head);
221 	}
222 
223 	idx = seqno % size;
224 
225 	/* Discard if the current slot is already in use */
226 	if (tid->reorder_buf[idx]) {
227 		dev_kfree_skb(skb);
228 		goto out;
229 	}
230 
231 	status->reorder_time = jiffies;
232 	tid->reorder_buf[idx] = skb;
233 	tid->nframes++;
234 	mt76_rx_aggr_release_head(tid, frames);
235 
236 	ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work, REORDER_TIMEOUT);
237 
238 out:
239 	spin_unlock_bh(&tid->lock);
240 }
241 
242 int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno,
243 		       u16 ssn, u8 size)
244 {
245 	struct mt76_rx_tid *tid;
246 
247 	mt76_rx_aggr_stop(dev, wcid, tidno);
248 
249 	tid = kzalloc(struct_size(tid, reorder_buf, size), GFP_KERNEL);
250 	if (!tid)
251 		return -ENOMEM;
252 
253 	tid->dev = dev;
254 	tid->head = ssn;
255 	tid->size = size;
256 	INIT_DELAYED_WORK(&tid->reorder_work, mt76_rx_aggr_reorder_work);
257 	spin_lock_init(&tid->lock);
258 
259 	rcu_assign_pointer(wcid->aggr[tidno], tid);
260 
261 	return 0;
262 }
263 EXPORT_SYMBOL_GPL(mt76_rx_aggr_start);
264 
265 static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid)
266 {
267 	u8 size = tid->size;
268 	int i;
269 
270 	cancel_delayed_work(&tid->reorder_work);
271 
272 	spin_lock_bh(&tid->lock);
273 
274 	tid->stopped = true;
275 	for (i = 0; tid->nframes && i < size; i++) {
276 		struct sk_buff *skb = tid->reorder_buf[i];
277 
278 		if (!skb)
279 			continue;
280 
281 		tid->nframes--;
282 		dev_kfree_skb(skb);
283 	}
284 
285 	spin_unlock_bh(&tid->lock);
286 }
287 
288 void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno)
289 {
290 	struct mt76_rx_tid *tid;
291 
292 	rcu_read_lock();
293 
294 	tid = rcu_dereference(wcid->aggr[tidno]);
295 	if (tid) {
296 		rcu_assign_pointer(wcid->aggr[tidno], NULL);
297 		mt76_rx_aggr_shutdown(dev, tid);
298 		kfree_rcu(tid, rcu_head);
299 	}
300 
301 	rcu_read_unlock();
302 }
303 EXPORT_SYMBOL_GPL(mt76_rx_aggr_stop);
304