1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (C) 2018 Felix Fietkau <nbd@nbd.name> 4 */ 5 #include "mt76.h" 6 7 #define REORDER_TIMEOUT (HZ / 10) 8 9 static void 10 mt76_aggr_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames, int idx) 11 { 12 struct sk_buff *skb; 13 14 tid->head = ieee80211_sn_inc(tid->head); 15 16 skb = tid->reorder_buf[idx]; 17 if (!skb) 18 return; 19 20 tid->reorder_buf[idx] = NULL; 21 tid->nframes--; 22 __skb_queue_tail(frames, skb); 23 } 24 25 static void 26 mt76_rx_aggr_release_frames(struct mt76_rx_tid *tid, 27 struct sk_buff_head *frames, 28 u16 head) 29 { 30 int idx; 31 32 while (ieee80211_sn_less(tid->head, head)) { 33 idx = tid->head % tid->size; 34 mt76_aggr_release(tid, frames, idx); 35 } 36 } 37 38 static void 39 mt76_rx_aggr_release_head(struct mt76_rx_tid *tid, struct sk_buff_head *frames) 40 { 41 int idx = tid->head % tid->size; 42 43 while (tid->reorder_buf[idx]) { 44 mt76_aggr_release(tid, frames, idx); 45 idx = tid->head % tid->size; 46 } 47 } 48 49 static void 50 mt76_rx_aggr_check_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames) 51 { 52 struct mt76_rx_status *status; 53 struct sk_buff *skb; 54 int start, idx, nframes; 55 56 if (!tid->nframes) 57 return; 58 59 mt76_rx_aggr_release_head(tid, frames); 60 61 start = tid->head % tid->size; 62 nframes = tid->nframes; 63 64 for (idx = (tid->head + 1) % tid->size; 65 idx != start && nframes; 66 idx = (idx + 1) % tid->size) { 67 skb = tid->reorder_buf[idx]; 68 if (!skb) 69 continue; 70 71 nframes--; 72 status = (struct mt76_rx_status *)skb->cb; 73 if (!time_after(jiffies, 74 status->reorder_time + REORDER_TIMEOUT)) 75 continue; 76 77 mt76_rx_aggr_release_frames(tid, frames, status->seqno); 78 } 79 80 mt76_rx_aggr_release_head(tid, frames); 81 } 82 83 static void 84 mt76_rx_aggr_reorder_work(struct work_struct *work) 85 { 86 struct mt76_rx_tid *tid = container_of(work, struct mt76_rx_tid, 87 reorder_work.work); 88 struct mt76_dev *dev = tid->dev; 89 struct sk_buff_head frames; 90 int nframes; 91 92 __skb_queue_head_init(&frames); 93 94 local_bh_disable(); 95 rcu_read_lock(); 96 97 spin_lock(&tid->lock); 98 mt76_rx_aggr_check_release(tid, &frames); 99 nframes = tid->nframes; 100 spin_unlock(&tid->lock); 101 102 if (nframes) 103 ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work, 104 REORDER_TIMEOUT); 105 mt76_rx_complete(dev, &frames, NULL); 106 107 rcu_read_unlock(); 108 local_bh_enable(); 109 } 110 111 static void 112 mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames) 113 { 114 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 115 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; 116 struct mt76_wcid *wcid = status->wcid; 117 struct mt76_rx_tid *tid; 118 u16 seqno; 119 120 if (!ieee80211_is_ctl(bar->frame_control)) 121 return; 122 123 if (!ieee80211_is_back_req(bar->frame_control)) 124 return; 125 126 status->tid = le16_to_cpu(bar->control) >> 12; 127 seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num)); 128 tid = rcu_dereference(wcid->aggr[status->tid]); 129 if (!tid) 130 return; 131 132 spin_lock_bh(&tid->lock); 133 if (!tid->stopped) { 134 mt76_rx_aggr_release_frames(tid, frames, seqno); 135 mt76_rx_aggr_release_head(tid, frames); 136 } 137 spin_unlock_bh(&tid->lock); 138 } 139 140 void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames) 141 { 142 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 143 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 144 struct mt76_wcid *wcid = status->wcid; 145 struct ieee80211_sta *sta; 146 struct mt76_rx_tid *tid; 147 bool sn_less; 148 u16 seqno, head, size; 149 u8 ackp, idx; 150 151 __skb_queue_tail(frames, skb); 152 153 sta = wcid_to_sta(wcid); 154 if (!sta) 155 return; 156 157 if (!status->aggr) { 158 mt76_rx_aggr_check_ctl(skb, frames); 159 return; 160 } 161 162 /* not part of a BA session */ 163 ackp = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_ACK_POLICY_MASK; 164 if (ackp != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK && 165 ackp != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL) 166 return; 167 168 tid = rcu_dereference(wcid->aggr[status->tid]); 169 if (!tid) 170 return; 171 172 status->flag |= RX_FLAG_DUP_VALIDATED; 173 spin_lock_bh(&tid->lock); 174 175 if (tid->stopped) 176 goto out; 177 178 head = tid->head; 179 seqno = status->seqno; 180 size = tid->size; 181 sn_less = ieee80211_sn_less(seqno, head); 182 183 if (!tid->started) { 184 if (sn_less) 185 goto out; 186 187 tid->started = true; 188 } 189 190 if (sn_less) { 191 __skb_unlink(skb, frames); 192 dev_kfree_skb(skb); 193 goto out; 194 } 195 196 if (seqno == head) { 197 tid->head = ieee80211_sn_inc(head); 198 if (tid->nframes) 199 mt76_rx_aggr_release_head(tid, frames); 200 goto out; 201 } 202 203 __skb_unlink(skb, frames); 204 205 /* 206 * Frame sequence number exceeds buffering window, free up some space 207 * by releasing previous frames 208 */ 209 if (!ieee80211_sn_less(seqno, head + size)) { 210 head = ieee80211_sn_inc(ieee80211_sn_sub(seqno, size)); 211 mt76_rx_aggr_release_frames(tid, frames, head); 212 } 213 214 idx = seqno % size; 215 216 /* Discard if the current slot is already in use */ 217 if (tid->reorder_buf[idx]) { 218 dev_kfree_skb(skb); 219 goto out; 220 } 221 222 status->reorder_time = jiffies; 223 tid->reorder_buf[idx] = skb; 224 tid->nframes++; 225 mt76_rx_aggr_release_head(tid, frames); 226 227 ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work, 228 REORDER_TIMEOUT); 229 230 out: 231 spin_unlock_bh(&tid->lock); 232 } 233 234 int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno, 235 u16 ssn, u8 size) 236 { 237 struct mt76_rx_tid *tid; 238 239 mt76_rx_aggr_stop(dev, wcid, tidno); 240 241 tid = kzalloc(struct_size(tid, reorder_buf, size), GFP_KERNEL); 242 if (!tid) 243 return -ENOMEM; 244 245 tid->dev = dev; 246 tid->head = ssn; 247 tid->size = size; 248 INIT_DELAYED_WORK(&tid->reorder_work, mt76_rx_aggr_reorder_work); 249 spin_lock_init(&tid->lock); 250 251 rcu_assign_pointer(wcid->aggr[tidno], tid); 252 253 return 0; 254 } 255 EXPORT_SYMBOL_GPL(mt76_rx_aggr_start); 256 257 static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid) 258 { 259 u8 size = tid->size; 260 int i; 261 262 spin_lock_bh(&tid->lock); 263 264 tid->stopped = true; 265 for (i = 0; tid->nframes && i < size; i++) { 266 struct sk_buff *skb = tid->reorder_buf[i]; 267 268 if (!skb) 269 continue; 270 271 tid->nframes--; 272 dev_kfree_skb(skb); 273 } 274 275 spin_unlock_bh(&tid->lock); 276 277 cancel_delayed_work_sync(&tid->reorder_work); 278 } 279 280 void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno) 281 { 282 struct mt76_rx_tid *tid = NULL; 283 284 tid = rcu_replace_pointer(wcid->aggr[tidno], tid, 285 lockdep_is_held(&dev->mutex)); 286 if (tid) { 287 mt76_rx_aggr_shutdown(dev, tid); 288 kfree_rcu(tid, rcu_head); 289 } 290 } 291 EXPORT_SYMBOL_GPL(mt76_rx_aggr_stop); 292