1 /* 2 * Copyright (c) 2014 Qualcomm Atheros, Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include "wil6210.h" 18 #include "txrx.h" 19 20 #define SEQ_MODULO 0x1000 21 #define SEQ_MASK 0xfff 22 23 static inline int seq_less(u16 sq1, u16 sq2) 24 { 25 return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1); 26 } 27 28 static inline u16 seq_inc(u16 sq) 29 { 30 return (sq + 1) & SEQ_MASK; 31 } 32 33 static inline u16 seq_sub(u16 sq1, u16 sq2) 34 { 35 return (sq1 - sq2) & SEQ_MASK; 36 } 37 38 static inline int reorder_index(struct wil_tid_ampdu_rx *r, u16 seq) 39 { 40 return seq_sub(seq, r->ssn) % r->buf_size; 41 } 42 43 static void wil_release_reorder_frame(struct wil6210_priv *wil, 44 struct wil_tid_ampdu_rx *r, 45 int index) 46 { 47 struct net_device *ndev = wil_to_ndev(wil); 48 struct sk_buff *skb = r->reorder_buf[index]; 49 50 if (!skb) 51 goto no_frame; 52 53 /* release the frame from the reorder ring buffer */ 54 r->stored_mpdu_num--; 55 r->reorder_buf[index] = NULL; 56 wil_netif_rx_any(skb, ndev); 57 58 no_frame: 59 r->head_seq_num = seq_inc(r->head_seq_num); 60 } 61 62 static void wil_release_reorder_frames(struct wil6210_priv *wil, 63 struct wil_tid_ampdu_rx *r, 64 u16 hseq) 65 { 66 int index; 67 68 /* note: this function is never called with 69 * hseq preceding r->head_seq_num, i.e it is always true 70 * !seq_less(hseq, r->head_seq_num) 71 * and thus on loop exit it should be 72 * r->head_seq_num == hseq 73 */ 74 while (seq_less(r->head_seq_num, hseq) && r->stored_mpdu_num) { 75 index = reorder_index(r, r->head_seq_num); 76 wil_release_reorder_frame(wil, r, index); 77 } 78 r->head_seq_num = hseq; 79 } 80 81 static void wil_reorder_release(struct wil6210_priv *wil, 82 struct wil_tid_ampdu_rx *r) 83 { 84 int index = reorder_index(r, r->head_seq_num); 85 86 while (r->reorder_buf[index]) { 87 wil_release_reorder_frame(wil, r, index); 88 index = reorder_index(r, r->head_seq_num); 89 } 90 } 91 92 void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb) 93 { 94 struct net_device *ndev = wil_to_ndev(wil); 95 struct vring_rx_desc *d = wil_skb_rxdesc(skb); 96 int tid = wil_rxdesc_tid(d); 97 int cid = wil_rxdesc_cid(d); 98 int mid = wil_rxdesc_mid(d); 99 u16 seq = wil_rxdesc_seq(d); 100 int mcast = wil_rxdesc_mcast(d); 101 struct wil_sta_info *sta = &wil->sta[cid]; 102 struct wil_tid_ampdu_rx *r; 103 u16 hseq; 104 int index; 105 unsigned long flags; 106 107 wil_dbg_txrx(wil, "MID %d CID %d TID %d Seq 0x%03x mcast %01x\n", 108 mid, cid, tid, seq, mcast); 109 110 if (unlikely(mcast)) { 111 wil_netif_rx_any(skb, ndev); 112 return; 113 } 114 115 spin_lock_irqsave(&sta->tid_rx_lock, flags); 116 117 r = sta->tid_rx[tid]; 118 if (!r) { 119 spin_unlock_irqrestore(&sta->tid_rx_lock, flags); 120 wil_netif_rx_any(skb, ndev); 121 return; 122 } 123 124 hseq = r->head_seq_num; 125 126 /** Due to the race between WMI events, where BACK establishment 127 * reported, and data Rx, few packets may be pass up before reorder 128 * buffer get allocated. Catch up by pretending SSN is what we 129 * see in the 1-st Rx packet 130 * 131 * Another scenario, Rx get delayed and we got packet from before 132 * BACK. Pass it to the stack and wait. 133 */ 134 if (r->first_time) { 135 r->first_time = false; 136 if (seq != r->head_seq_num) { 137 if (seq_less(seq, r->head_seq_num)) { 138 wil_err(wil, 139 "Error: frame with early sequence 0x%03x, should be 0x%03x. Waiting...\n", 140 seq, r->head_seq_num); 141 r->first_time = true; 142 wil_netif_rx_any(skb, ndev); 143 goto out; 144 } 145 wil_err(wil, 146 "Error: 1-st frame with wrong sequence 0x%03x, should be 0x%03x. Fixing...\n", 147 seq, r->head_seq_num); 148 r->head_seq_num = seq; 149 r->ssn = seq; 150 } 151 } 152 153 /* frame with out of date sequence number */ 154 if (seq_less(seq, r->head_seq_num)) { 155 r->ssn_last_drop = seq; 156 dev_kfree_skb(skb); 157 goto out; 158 } 159 160 /* 161 * If frame the sequence number exceeds our buffering window 162 * size release some previous frames to make room for this one. 163 */ 164 if (!seq_less(seq, r->head_seq_num + r->buf_size)) { 165 hseq = seq_inc(seq_sub(seq, r->buf_size)); 166 /* release stored frames up to new head to stack */ 167 wil_release_reorder_frames(wil, r, hseq); 168 } 169 170 /* Now the new frame is always in the range of the reordering buffer */ 171 172 index = reorder_index(r, seq); 173 174 /* check if we already stored this frame */ 175 if (r->reorder_buf[index]) { 176 dev_kfree_skb(skb); 177 goto out; 178 } 179 180 /* 181 * If the current MPDU is in the right order and nothing else 182 * is stored we can process it directly, no need to buffer it. 183 * If it is first but there's something stored, we may be able 184 * to release frames after this one. 185 */ 186 if (seq == r->head_seq_num && r->stored_mpdu_num == 0) { 187 r->head_seq_num = seq_inc(r->head_seq_num); 188 wil_netif_rx_any(skb, ndev); 189 goto out; 190 } 191 192 /* put the frame in the reordering buffer */ 193 r->reorder_buf[index] = skb; 194 r->reorder_time[index] = jiffies; 195 r->stored_mpdu_num++; 196 wil_reorder_release(wil, r); 197 198 out: 199 spin_unlock_irqrestore(&sta->tid_rx_lock, flags); 200 } 201 202 struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil, 203 int size, u16 ssn) 204 { 205 struct wil_tid_ampdu_rx *r = kzalloc(sizeof(*r), GFP_KERNEL); 206 207 if (!r) 208 return NULL; 209 210 r->reorder_buf = 211 kcalloc(size, sizeof(struct sk_buff *), GFP_KERNEL); 212 r->reorder_time = 213 kcalloc(size, sizeof(unsigned long), GFP_KERNEL); 214 if (!r->reorder_buf || !r->reorder_time) { 215 kfree(r->reorder_buf); 216 kfree(r->reorder_time); 217 kfree(r); 218 return NULL; 219 } 220 221 r->ssn = ssn; 222 r->head_seq_num = ssn; 223 r->buf_size = size; 224 r->stored_mpdu_num = 0; 225 r->first_time = true; 226 return r; 227 } 228 229 void wil_tid_ampdu_rx_free(struct wil6210_priv *wil, 230 struct wil_tid_ampdu_rx *r) 231 { 232 if (!r) 233 return; 234 wil_release_reorder_frames(wil, r, r->head_seq_num + r->buf_size); 235 kfree(r->reorder_buf); 236 kfree(r->reorder_time); 237 kfree(r); 238 } 239 240 /* ADDBA processing */ 241 static u16 wil_agg_size(struct wil6210_priv *wil, u16 req_agg_wsize) 242 { 243 u16 max_agg_size = min_t(u16, WIL_MAX_AGG_WSIZE, WIL_MAX_AMPDU_SIZE / 244 (mtu_max + WIL_MAX_MPDU_OVERHEAD)); 245 246 if (!req_agg_wsize) 247 return max_agg_size; 248 249 return min(max_agg_size, req_agg_wsize); 250 } 251 252 /* Block Ack - Rx side (recipient */ 253 int wil_addba_rx_request(struct wil6210_priv *wil, u8 cidxtid, 254 u8 dialog_token, __le16 ba_param_set, 255 __le16 ba_timeout, __le16 ba_seq_ctrl) 256 { 257 struct wil_back_rx *req = kzalloc(sizeof(*req), GFP_KERNEL); 258 259 if (!req) 260 return -ENOMEM; 261 262 req->cidxtid = cidxtid; 263 req->dialog_token = dialog_token; 264 req->ba_param_set = le16_to_cpu(ba_param_set); 265 req->ba_timeout = le16_to_cpu(ba_timeout); 266 req->ba_seq_ctrl = le16_to_cpu(ba_seq_ctrl); 267 268 mutex_lock(&wil->back_rx_mutex); 269 list_add_tail(&req->list, &wil->back_rx_pending); 270 mutex_unlock(&wil->back_rx_mutex); 271 272 queue_work(wil->wq_service, &wil->back_rx_worker); 273 274 return 0; 275 } 276 277 static void wil_back_rx_handle(struct wil6210_priv *wil, 278 struct wil_back_rx *req) 279 { 280 struct wil_sta_info *sta; 281 u8 cid, tid; 282 u16 agg_wsize = 0; 283 /* bit 0: A-MSDU supported 284 * bit 1: policy (should be 0 for us) 285 * bits 2..5: TID 286 * bits 6..15: buffer size 287 */ 288 u16 req_agg_wsize = WIL_GET_BITS(req->ba_param_set, 6, 15); 289 bool agg_amsdu = !!(req->ba_param_set & BIT(0)); 290 int ba_policy = req->ba_param_set & BIT(1); 291 u16 agg_timeout = req->ba_timeout; 292 u16 status = WLAN_STATUS_SUCCESS; 293 u16 ssn = req->ba_seq_ctrl >> 4; 294 unsigned long flags; 295 int rc; 296 297 parse_cidxtid(req->cidxtid, &cid, &tid); 298 299 /* sanity checks */ 300 if (cid >= WIL6210_MAX_CID) { 301 wil_err(wil, "BACK: invalid CID %d\n", cid); 302 return; 303 } 304 305 sta = &wil->sta[cid]; 306 if (sta->status != wil_sta_connected) { 307 wil_err(wil, "BACK: CID %d not connected\n", cid); 308 return; 309 } 310 311 wil_dbg_wmi(wil, 312 "ADDBA request for CID %d %pM TID %d size %d timeout %d AMSDU%s policy %d token %d SSN 0x%03x\n", 313 cid, sta->addr, tid, req_agg_wsize, req->ba_timeout, 314 agg_amsdu ? "+" : "-", !!ba_policy, req->dialog_token, ssn); 315 316 /* apply policies */ 317 if (ba_policy) { 318 wil_err(wil, "BACK requested unsupported ba_policy == 1\n"); 319 status = WLAN_STATUS_INVALID_QOS_PARAM; 320 } 321 if (status == WLAN_STATUS_SUCCESS) 322 agg_wsize = wil_agg_size(wil, req_agg_wsize); 323 324 rc = wmi_addba_rx_resp(wil, cid, tid, req->dialog_token, status, 325 agg_amsdu, agg_wsize, agg_timeout); 326 if (rc || (status != WLAN_STATUS_SUCCESS)) 327 return; 328 329 /* apply */ 330 spin_lock_irqsave(&sta->tid_rx_lock, flags); 331 332 wil_tid_ampdu_rx_free(wil, sta->tid_rx[tid]); 333 sta->tid_rx[tid] = wil_tid_ampdu_rx_alloc(wil, agg_wsize, ssn); 334 335 spin_unlock_irqrestore(&sta->tid_rx_lock, flags); 336 } 337 338 void wil_back_rx_flush(struct wil6210_priv *wil) 339 { 340 struct wil_back_rx *evt, *t; 341 342 wil_dbg_misc(wil, "%s()\n", __func__); 343 344 mutex_lock(&wil->back_rx_mutex); 345 346 list_for_each_entry_safe(evt, t, &wil->back_rx_pending, list) { 347 list_del(&evt->list); 348 kfree(evt); 349 } 350 351 mutex_unlock(&wil->back_rx_mutex); 352 } 353 354 /* Retrieve next ADDBA request from the pending list */ 355 static struct list_head *next_back_rx(struct wil6210_priv *wil) 356 { 357 struct list_head *ret = NULL; 358 359 mutex_lock(&wil->back_rx_mutex); 360 361 if (!list_empty(&wil->back_rx_pending)) { 362 ret = wil->back_rx_pending.next; 363 list_del(ret); 364 } 365 366 mutex_unlock(&wil->back_rx_mutex); 367 368 return ret; 369 } 370 371 void wil_back_rx_worker(struct work_struct *work) 372 { 373 struct wil6210_priv *wil = container_of(work, struct wil6210_priv, 374 back_rx_worker); 375 struct wil_back_rx *evt; 376 struct list_head *lh; 377 378 while ((lh = next_back_rx(wil)) != NULL) { 379 evt = list_entry(lh, struct wil_back_rx, list); 380 381 wil_back_rx_handle(wil, evt); 382 kfree(evt); 383 } 384 } 385 386 /* BACK - Tx (originator) side */ 387 static void wil_back_tx_handle(struct wil6210_priv *wil, 388 struct wil_back_tx *req) 389 { 390 struct vring_tx_data *txdata = &wil->vring_tx_data[req->ringid]; 391 int rc; 392 393 if (txdata->addba_in_progress) { 394 wil_dbg_misc(wil, "ADDBA for vring[%d] already in progress\n", 395 req->ringid); 396 return; 397 } 398 if (txdata->agg_wsize) { 399 wil_dbg_misc(wil, 400 "ADDBA for vring[%d] already established wsize %d\n", 401 req->ringid, txdata->agg_wsize); 402 return; 403 } 404 txdata->addba_in_progress = true; 405 rc = wmi_addba(wil, req->ringid, req->agg_wsize, req->agg_timeout); 406 if (rc) 407 txdata->addba_in_progress = false; 408 } 409 410 static struct list_head *next_back_tx(struct wil6210_priv *wil) 411 { 412 struct list_head *ret = NULL; 413 414 mutex_lock(&wil->back_tx_mutex); 415 416 if (!list_empty(&wil->back_tx_pending)) { 417 ret = wil->back_tx_pending.next; 418 list_del(ret); 419 } 420 421 mutex_unlock(&wil->back_tx_mutex); 422 423 return ret; 424 } 425 426 void wil_back_tx_worker(struct work_struct *work) 427 { 428 struct wil6210_priv *wil = container_of(work, struct wil6210_priv, 429 back_tx_worker); 430 struct wil_back_tx *evt; 431 struct list_head *lh; 432 433 while ((lh = next_back_tx(wil)) != NULL) { 434 evt = list_entry(lh, struct wil_back_tx, list); 435 436 wil_back_tx_handle(wil, evt); 437 kfree(evt); 438 } 439 } 440 441 void wil_back_tx_flush(struct wil6210_priv *wil) 442 { 443 struct wil_back_tx *evt, *t; 444 445 wil_dbg_misc(wil, "%s()\n", __func__); 446 447 mutex_lock(&wil->back_tx_mutex); 448 449 list_for_each_entry_safe(evt, t, &wil->back_tx_pending, list) { 450 list_del(&evt->list); 451 kfree(evt); 452 } 453 454 mutex_unlock(&wil->back_tx_mutex); 455 } 456 457 int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize) 458 { 459 struct wil_back_tx *req = kzalloc(sizeof(*req), GFP_KERNEL); 460 461 if (!req) 462 return -ENOMEM; 463 464 req->ringid = ringid; 465 req->agg_wsize = wil_agg_size(wil, wsize); 466 req->agg_timeout = 0; 467 468 mutex_lock(&wil->back_tx_mutex); 469 list_add_tail(&req->list, &wil->back_tx_pending); 470 mutex_unlock(&wil->back_tx_mutex); 471 472 queue_work(wil->wq_service, &wil->back_tx_worker); 473 474 return 0; 475 } 476