1 /*
2  * Copyright (C) 2018 Felix Fietkau <nbd@nbd.name>
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 #include "mt76.h"
17 
18 #define REORDER_TIMEOUT (HZ / 10)
19 
20 static void
21 mt76_aggr_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames, int idx)
22 {
23 	struct sk_buff *skb;
24 
25 	tid->head = ieee80211_sn_inc(tid->head);
26 
27 	skb = tid->reorder_buf[idx];
28 	if (!skb)
29 		return;
30 
31 	tid->reorder_buf[idx] = NULL;
32 	tid->nframes--;
33 	__skb_queue_tail(frames, skb);
34 }
35 
36 static void
37 mt76_rx_aggr_release_frames(struct mt76_rx_tid *tid, struct sk_buff_head *frames,
38 			 u16 head)
39 {
40 	int idx;
41 
42 	while (ieee80211_sn_less(tid->head, head)) {
43 		idx = tid->head % tid->size;
44 		mt76_aggr_release(tid, frames, idx);
45 	}
46 }
47 
48 static void
49 mt76_rx_aggr_release_head(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
50 {
51 	int idx = tid->head % tid->size;
52 
53 	while (tid->reorder_buf[idx]) {
54 		mt76_aggr_release(tid, frames, idx);
55 		idx = tid->head % tid->size;
56 	}
57 }
58 
59 static void
60 mt76_rx_aggr_check_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
61 {
62 	struct mt76_rx_status *status;
63 	struct sk_buff *skb;
64 	int start, idx, nframes;
65 
66 	if (!tid->nframes)
67 		return;
68 
69 	mt76_rx_aggr_release_head(tid, frames);
70 
71 	start = tid->head % tid->size;
72 	nframes = tid->nframes;
73 
74 	for (idx = (tid->head + 1) % tid->size;
75 	     idx != start && nframes;
76 	     idx = (idx + 1) % tid->size) {
77 
78 		skb = tid->reorder_buf[idx];
79 		if (!skb)
80 			continue;
81 
82 		nframes--;
83 		status = (struct mt76_rx_status *) skb->cb;
84 		if (!time_after(jiffies, status->reorder_time +
85 					 REORDER_TIMEOUT))
86 			continue;
87 
88 		mt76_rx_aggr_release_frames(tid, frames, status->seqno);
89 	}
90 
91 	mt76_rx_aggr_release_head(tid, frames);
92 }
93 
94 static void
95 mt76_rx_aggr_reorder_work(struct work_struct *work)
96 {
97 	struct mt76_rx_tid *tid = container_of(work, struct mt76_rx_tid,
98 					       reorder_work.work);
99 	struct mt76_dev *dev = tid->dev;
100 	struct sk_buff_head frames;
101 	int nframes;
102 
103 	__skb_queue_head_init(&frames);
104 
105 	local_bh_disable();
106 
107 	spin_lock(&tid->lock);
108 	mt76_rx_aggr_check_release(tid, &frames);
109 	nframes = tid->nframes;
110 	spin_unlock(&tid->lock);
111 
112 	if (nframes)
113 		ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work,
114 					     REORDER_TIMEOUT);
115 	mt76_rx_complete(dev, &frames, -1);
116 
117 	local_bh_enable();
118 }
119 
120 static void
121 mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames)
122 {
123 	struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
124 	struct ieee80211_bar *bar = (struct ieee80211_bar *) skb->data;
125 	struct mt76_wcid *wcid = status->wcid;
126 	struct mt76_rx_tid *tid;
127 	u16 seqno;
128 
129 	if (!ieee80211_is_ctl(bar->frame_control))
130 		return;
131 
132 	if (!ieee80211_is_back_req(bar->frame_control))
133 		return;
134 
135 	status->tid = le16_to_cpu(bar->control) >> 12;
136 	seqno = le16_to_cpu(bar->start_seq_num) >> 4;
137 	tid = rcu_dereference(wcid->aggr[status->tid]);
138 	if (!tid)
139 		return;
140 
141 	spin_lock_bh(&tid->lock);
142 	mt76_rx_aggr_release_frames(tid, frames, seqno);
143 	mt76_rx_aggr_release_head(tid, frames);
144 	spin_unlock_bh(&tid->lock);
145 }
146 
147 void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
148 {
149 	struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
150 	struct mt76_wcid *wcid = status->wcid;
151 	struct ieee80211_sta *sta;
152 	struct mt76_rx_tid *tid;
153 	bool sn_less;
154 	u16 seqno, head, size;
155 	u8 idx;
156 
157 	__skb_queue_tail(frames, skb);
158 
159 	sta = wcid_to_sta(wcid);
160 	if (!sta)
161 		return;
162 
163 	if (!status->aggr) {
164 		mt76_rx_aggr_check_ctl(skb, frames);
165 		return;
166 	}
167 
168 	tid = rcu_dereference(wcid->aggr[status->tid]);
169 	if (!tid)
170 		return;
171 
172 	spin_lock_bh(&tid->lock);
173 
174 	if (tid->stopped)
175 		goto out;
176 
177 	head = tid->head;
178 	seqno = status->seqno;
179 	size = tid->size;
180 	sn_less = ieee80211_sn_less(seqno, head);
181 
182 	if (!tid->started) {
183 		if (sn_less)
184 			goto out;
185 
186 		tid->started = true;
187 	}
188 
189 	if (sn_less) {
190 		__skb_unlink(skb, frames);
191 		dev_kfree_skb(skb);
192 		goto out;
193 	}
194 
195 	if (seqno == head) {
196 		tid->head = ieee80211_sn_inc(head);
197 		if (tid->nframes)
198 			mt76_rx_aggr_release_head(tid, frames);
199 		goto out;
200 	}
201 
202 	__skb_unlink(skb, frames);
203 
204 	/*
205 	 * Frame sequence number exceeds buffering window, free up some space
206 	 * by releasing previous frames
207 	 */
208 	if (!ieee80211_sn_less(seqno, head + size)) {
209 		head = ieee80211_sn_inc(ieee80211_sn_sub(seqno, size));
210 		mt76_rx_aggr_release_frames(tid, frames, head);
211 	}
212 
213 	idx = seqno % size;
214 
215 	/* Discard if the current slot is already in use */
216 	if (tid->reorder_buf[idx]) {
217 		dev_kfree_skb(skb);
218 		goto out;
219 	}
220 
221 	status->reorder_time = jiffies;
222 	tid->reorder_buf[idx] = skb;
223 	tid->nframes++;
224 	mt76_rx_aggr_release_head(tid, frames);
225 
226 	ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work, REORDER_TIMEOUT);
227 
228 out:
229 	spin_unlock_bh(&tid->lock);
230 }
231 
232 int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno,
233 		       u16 ssn, u8 size)
234 {
235 	struct mt76_rx_tid *tid;
236 
237 	mt76_rx_aggr_stop(dev, wcid, tidno);
238 
239 	tid = kzalloc(sizeof(*tid) + size * sizeof(tid->reorder_buf[0]),
240 		      GFP_KERNEL);
241 	if (!tid)
242 		return -ENOMEM;
243 
244 	tid->dev = dev;
245 	tid->head = ssn;
246 	tid->size = size;
247 	INIT_DELAYED_WORK(&tid->reorder_work, mt76_rx_aggr_reorder_work);
248 	spin_lock_init(&tid->lock);
249 
250 	rcu_assign_pointer(wcid->aggr[tidno], tid);
251 
252 	return 0;
253 }
254 EXPORT_SYMBOL_GPL(mt76_rx_aggr_start);
255 
256 static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid)
257 {
258 	u8 size = tid->size;
259 	int i;
260 
261 	spin_lock_bh(&tid->lock);
262 
263 	tid->stopped = true;
264 	for (i = 0; tid->nframes && i < size; i++) {
265 		struct sk_buff *skb = tid->reorder_buf[i];
266 
267 		if (!skb)
268 			continue;
269 
270 		tid->nframes--;
271 		dev_kfree_skb(skb);
272 	}
273 
274 	spin_unlock_bh(&tid->lock);
275 
276 	cancel_delayed_work(&tid->reorder_work);
277 }
278 
279 void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno)
280 {
281 	struct mt76_rx_tid *tid;
282 
283 	rcu_read_lock();
284 
285 	tid = rcu_dereference(wcid->aggr[tidno]);
286 	if (tid) {
287 		rcu_assign_pointer(wcid->aggr[tidno], NULL);
288 		mt76_rx_aggr_shutdown(dev, tid);
289 		kfree_rcu(tid, rcu_head);
290 	}
291 
292 	rcu_read_unlock();
293 }
294 EXPORT_SYMBOL_GPL(mt76_rx_aggr_stop);
295