1 /* 2 * Copyright (c) 2007-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include "core.h" 19 #include "hif.h" 20 #include "debug.h" 21 #include "hif-ops.h" 22 #include <asm/unaligned.h> 23 24 #define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask)) 25 26 static void ath6kl_htc_mbox_cleanup(struct htc_target *target); 27 static void ath6kl_htc_mbox_stop(struct htc_target *target); 28 static int ath6kl_htc_mbox_add_rxbuf_multiple(struct htc_target *target, 29 struct list_head *pkt_queue); 30 static void ath6kl_htc_set_credit_dist(struct htc_target *target, 31 struct ath6kl_htc_credit_info *cred_info, 32 u16 svc_pri_order[], int len); 33 34 /* threshold to re-enable Tx bundling for an AC*/ 35 #define TX_RESUME_BUNDLE_THRESHOLD 1500 36 37 /* Functions for Tx credit handling */ 38 static void ath6kl_credit_deposit(struct ath6kl_htc_credit_info *cred_info, 39 struct htc_endpoint_credit_dist *ep_dist, 40 int credits) 41 { 42 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit deposit ep %d credits %d\n", 43 ep_dist->endpoint, credits); 44 45 ep_dist->credits += credits; 46 ep_dist->cred_assngd += credits; 47 cred_info->cur_free_credits -= credits; 48 } 49 50 static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info, 51 struct list_head *ep_list, 52 int tot_credits) 53 { 54 struct htc_endpoint_credit_dist *cur_ep_dist; 55 int count; 56 57 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit init total %d\n", tot_credits); 58 59 cred_info->cur_free_credits = tot_credits; 60 cred_info->total_avail_credits = tot_credits; 61 62 list_for_each_entry(cur_ep_dist, ep_list, list) { 63 if (cur_ep_dist->endpoint == ENDPOINT_0) 64 continue; 65 66 cur_ep_dist->cred_min = cur_ep_dist->cred_per_msg; 67 68 if (tot_credits > 4) { 69 if ((cur_ep_dist->svc_id == WMI_DATA_BK_SVC) || 70 (cur_ep_dist->svc_id == WMI_DATA_BE_SVC)) { 71 ath6kl_credit_deposit(cred_info, 72 cur_ep_dist, 73 cur_ep_dist->cred_min); 74 cur_ep_dist->dist_flags |= HTC_EP_ACTIVE; 75 } 76 } 77 78 if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) { 79 ath6kl_credit_deposit(cred_info, cur_ep_dist, 80 cur_ep_dist->cred_min); 81 /* 82 * Control service is always marked active, it 83 * never goes inactive EVER. 84 */ 85 cur_ep_dist->dist_flags |= HTC_EP_ACTIVE; 86 } 87 88 /* 89 * Streams have to be created (explicit | implicit) for all 90 * kinds of traffic. BE endpoints are also inactive in the 91 * beginning. When BE traffic starts it creates implicit 92 * streams that redistributes credits. 93 * 94 * Note: all other endpoints have minimums set but are 95 * initially given NO credits. credits will be distributed 96 * as traffic activity demands 97 */ 98 } 99 100 /* 101 * For ath6kl_credit_seek function, 102 * it use list_for_each_entry_reverse to walk around the whole ep list. 103 * Therefore assign this lowestpri_ep_dist after walk around the ep_list 104 */ 105 cred_info->lowestpri_ep_dist = cur_ep_dist->list; 106 107 WARN_ON(cred_info->cur_free_credits <= 0); 108 109 list_for_each_entry(cur_ep_dist, ep_list, list) { 110 if (cur_ep_dist->endpoint == ENDPOINT_0) 111 continue; 112 113 if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) 114 cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg; 115 else { 116 /* 117 * For the remaining data endpoints, we assume that 118 * each cred_per_msg are the same. We use a simple 119 * calculation here, we take the remaining credits 120 * and determine how many max messages this can 121 * cover and then set each endpoint's normal value 122 * equal to 3/4 this amount. 123 */ 124 count = (cred_info->cur_free_credits / 125 cur_ep_dist->cred_per_msg) 126 * cur_ep_dist->cred_per_msg; 127 count = (count * 3) >> 2; 128 count = max(count, cur_ep_dist->cred_per_msg); 129 cur_ep_dist->cred_norm = count; 130 131 } 132 133 ath6kl_dbg(ATH6KL_DBG_CREDIT, 134 "credit ep %d svc_id %d credits %d per_msg %d norm %d min %d\n", 135 cur_ep_dist->endpoint, 136 cur_ep_dist->svc_id, 137 cur_ep_dist->credits, 138 cur_ep_dist->cred_per_msg, 139 cur_ep_dist->cred_norm, 140 cur_ep_dist->cred_min); 141 } 142 } 143 144 /* initialize and setup credit distribution */ 145 static int ath6kl_htc_mbox_credit_setup(struct htc_target *htc_target, 146 struct ath6kl_htc_credit_info *cred_info) 147 { 148 u16 servicepriority[5]; 149 150 memset(cred_info, 0, sizeof(struct ath6kl_htc_credit_info)); 151 152 servicepriority[0] = WMI_CONTROL_SVC; /* highest */ 153 servicepriority[1] = WMI_DATA_VO_SVC; 154 servicepriority[2] = WMI_DATA_VI_SVC; 155 servicepriority[3] = WMI_DATA_BE_SVC; 156 servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */ 157 158 /* set priority list */ 159 ath6kl_htc_set_credit_dist(htc_target, cred_info, servicepriority, 5); 160 161 return 0; 162 } 163 164 /* reduce an ep's credits back to a set limit */ 165 static void ath6kl_credit_reduce(struct ath6kl_htc_credit_info *cred_info, 166 struct htc_endpoint_credit_dist *ep_dist, 167 int limit) 168 { 169 int credits; 170 171 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit reduce ep %d limit %d\n", 172 ep_dist->endpoint, limit); 173 174 ep_dist->cred_assngd = limit; 175 176 if (ep_dist->credits <= limit) 177 return; 178 179 credits = ep_dist->credits - limit; 180 ep_dist->credits -= credits; 181 cred_info->cur_free_credits += credits; 182 } 183 184 static void ath6kl_credit_update(struct ath6kl_htc_credit_info *cred_info, 185 struct list_head *epdist_list) 186 { 187 struct htc_endpoint_credit_dist *cur_list; 188 189 list_for_each_entry(cur_list, epdist_list, list) { 190 if (cur_list->endpoint == ENDPOINT_0) 191 continue; 192 193 if (cur_list->cred_to_dist > 0) { 194 cur_list->credits += cur_list->cred_to_dist; 195 cur_list->cred_to_dist = 0; 196 197 if (cur_list->credits > cur_list->cred_assngd) 198 ath6kl_credit_reduce(cred_info, 199 cur_list, 200 cur_list->cred_assngd); 201 202 if (cur_list->credits > cur_list->cred_norm) 203 ath6kl_credit_reduce(cred_info, cur_list, 204 cur_list->cred_norm); 205 206 if (!(cur_list->dist_flags & HTC_EP_ACTIVE)) { 207 if (cur_list->txq_depth == 0) 208 ath6kl_credit_reduce(cred_info, 209 cur_list, 0); 210 } 211 } 212 } 213 } 214 215 /* 216 * HTC has an endpoint that needs credits, ep_dist is the endpoint in 217 * question. 218 */ 219 static void ath6kl_credit_seek(struct ath6kl_htc_credit_info *cred_info, 220 struct htc_endpoint_credit_dist *ep_dist) 221 { 222 struct htc_endpoint_credit_dist *curdist_list; 223 int credits = 0; 224 int need; 225 226 if (ep_dist->svc_id == WMI_CONTROL_SVC) 227 goto out; 228 229 if ((ep_dist->svc_id == WMI_DATA_VI_SVC) || 230 (ep_dist->svc_id == WMI_DATA_VO_SVC)) 231 if ((ep_dist->cred_assngd >= ep_dist->cred_norm)) 232 goto out; 233 234 /* 235 * For all other services, we follow a simple algorithm of: 236 * 237 * 1. checking the free pool for credits 238 * 2. checking lower priority endpoints for credits to take 239 */ 240 241 credits = min(cred_info->cur_free_credits, ep_dist->seek_cred); 242 243 if (credits >= ep_dist->seek_cred) 244 goto out; 245 246 /* 247 * We don't have enough in the free pool, try taking away from 248 * lower priority services The rule for taking away credits: 249 * 250 * 1. Only take from lower priority endpoints 251 * 2. Only take what is allocated above the minimum (never 252 * starve an endpoint completely) 253 * 3. Only take what you need. 254 */ 255 256 list_for_each_entry_reverse(curdist_list, 257 &cred_info->lowestpri_ep_dist, 258 list) { 259 if (curdist_list == ep_dist) 260 break; 261 262 need = ep_dist->seek_cred - cred_info->cur_free_credits; 263 264 if ((curdist_list->cred_assngd - need) >= 265 curdist_list->cred_min) { 266 /* 267 * The current one has been allocated more than 268 * it's minimum and it has enough credits assigned 269 * above it's minimum to fulfill our need try to 270 * take away just enough to fulfill our need. 271 */ 272 ath6kl_credit_reduce(cred_info, curdist_list, 273 curdist_list->cred_assngd - need); 274 275 if (cred_info->cur_free_credits >= 276 ep_dist->seek_cred) 277 break; 278 } 279 280 if (curdist_list->endpoint == ENDPOINT_0) 281 break; 282 } 283 284 credits = min(cred_info->cur_free_credits, ep_dist->seek_cred); 285 286 out: 287 /* did we find some credits? */ 288 if (credits) 289 ath6kl_credit_deposit(cred_info, ep_dist, credits); 290 291 ep_dist->seek_cred = 0; 292 } 293 294 /* redistribute credits based on activity change */ 295 static void ath6kl_credit_redistribute(struct ath6kl_htc_credit_info *info, 296 struct list_head *ep_dist_list) 297 { 298 struct htc_endpoint_credit_dist *curdist_list; 299 300 list_for_each_entry(curdist_list, ep_dist_list, list) { 301 if (curdist_list->endpoint == ENDPOINT_0) 302 continue; 303 304 if ((curdist_list->svc_id == WMI_DATA_BK_SVC) || 305 (curdist_list->svc_id == WMI_DATA_BE_SVC)) 306 curdist_list->dist_flags |= HTC_EP_ACTIVE; 307 308 if ((curdist_list->svc_id != WMI_CONTROL_SVC) && 309 !(curdist_list->dist_flags & HTC_EP_ACTIVE)) { 310 if (curdist_list->txq_depth == 0) 311 ath6kl_credit_reduce(info, curdist_list, 0); 312 else 313 ath6kl_credit_reduce(info, 314 curdist_list, 315 curdist_list->cred_min); 316 } 317 } 318 } 319 320 /* 321 * 322 * This function is invoked whenever endpoints require credit 323 * distributions. A lock is held while this function is invoked, this 324 * function shall NOT block. The ep_dist_list is a list of distribution 325 * structures in prioritized order as defined by the call to the 326 * htc_set_credit_dist() api. 327 */ 328 static void ath6kl_credit_distribute(struct ath6kl_htc_credit_info *cred_info, 329 struct list_head *ep_dist_list, 330 enum htc_credit_dist_reason reason) 331 { 332 switch (reason) { 333 case HTC_CREDIT_DIST_SEND_COMPLETE: 334 ath6kl_credit_update(cred_info, ep_dist_list); 335 break; 336 case HTC_CREDIT_DIST_ACTIVITY_CHANGE: 337 ath6kl_credit_redistribute(cred_info, ep_dist_list); 338 break; 339 default: 340 break; 341 } 342 343 WARN_ON(cred_info->cur_free_credits > cred_info->total_avail_credits); 344 WARN_ON(cred_info->cur_free_credits < 0); 345 } 346 347 static void ath6kl_htc_tx_buf_align(u8 **buf, unsigned long len) 348 { 349 u8 *align_addr; 350 351 if (!IS_ALIGNED((unsigned long) *buf, 4)) { 352 align_addr = PTR_ALIGN(*buf - 4, 4); 353 memmove(align_addr, *buf, len); 354 *buf = align_addr; 355 } 356 } 357 358 static void ath6kl_htc_tx_prep_pkt(struct htc_packet *packet, u8 flags, 359 int ctrl0, int ctrl1) 360 { 361 struct htc_frame_hdr *hdr; 362 363 packet->buf -= HTC_HDR_LENGTH; 364 hdr = (struct htc_frame_hdr *)packet->buf; 365 366 /* Endianess? */ 367 put_unaligned((u16)packet->act_len, &hdr->payld_len); 368 hdr->flags = flags; 369 hdr->eid = packet->endpoint; 370 hdr->ctrl[0] = ctrl0; 371 hdr->ctrl[1] = ctrl1; 372 } 373 374 static void htc_reclaim_txctrl_buf(struct htc_target *target, 375 struct htc_packet *pkt) 376 { 377 spin_lock_bh(&target->htc_lock); 378 list_add_tail(&pkt->list, &target->free_ctrl_txbuf); 379 spin_unlock_bh(&target->htc_lock); 380 } 381 382 static struct htc_packet *htc_get_control_buf(struct htc_target *target, 383 bool tx) 384 { 385 struct htc_packet *packet = NULL; 386 struct list_head *buf_list; 387 388 buf_list = tx ? &target->free_ctrl_txbuf : &target->free_ctrl_rxbuf; 389 390 spin_lock_bh(&target->htc_lock); 391 392 if (list_empty(buf_list)) { 393 spin_unlock_bh(&target->htc_lock); 394 return NULL; 395 } 396 397 packet = list_first_entry(buf_list, struct htc_packet, list); 398 list_del(&packet->list); 399 spin_unlock_bh(&target->htc_lock); 400 401 if (tx) 402 packet->buf = packet->buf_start + HTC_HDR_LENGTH; 403 404 return packet; 405 } 406 407 static void htc_tx_comp_update(struct htc_target *target, 408 struct htc_endpoint *endpoint, 409 struct htc_packet *packet) 410 { 411 packet->completion = NULL; 412 packet->buf += HTC_HDR_LENGTH; 413 414 if (!packet->status) 415 return; 416 417 ath6kl_err("req failed (status:%d, ep:%d, len:%d creds:%d)\n", 418 packet->status, packet->endpoint, packet->act_len, 419 packet->info.tx.cred_used); 420 421 /* on failure to submit, reclaim credits for this packet */ 422 spin_lock_bh(&target->tx_lock); 423 endpoint->cred_dist.cred_to_dist += 424 packet->info.tx.cred_used; 425 endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq); 426 427 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx ctxt 0x%p dist 0x%p\n", 428 target->credit_info, &target->cred_dist_list); 429 430 ath6kl_credit_distribute(target->credit_info, 431 &target->cred_dist_list, 432 HTC_CREDIT_DIST_SEND_COMPLETE); 433 434 spin_unlock_bh(&target->tx_lock); 435 } 436 437 static void htc_tx_complete(struct htc_endpoint *endpoint, 438 struct list_head *txq) 439 { 440 if (list_empty(txq)) 441 return; 442 443 ath6kl_dbg(ATH6KL_DBG_HTC, 444 "htc tx complete ep %d pkts %d\n", 445 endpoint->eid, get_queue_depth(txq)); 446 447 ath6kl_tx_complete(endpoint->target, txq); 448 } 449 450 static void htc_tx_comp_handler(struct htc_target *target, 451 struct htc_packet *packet) 452 { 453 struct htc_endpoint *endpoint = &target->endpoint[packet->endpoint]; 454 struct list_head container; 455 456 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx complete seqno %d\n", 457 packet->info.tx.seqno); 458 459 htc_tx_comp_update(target, endpoint, packet); 460 INIT_LIST_HEAD(&container); 461 list_add_tail(&packet->list, &container); 462 /* do completion */ 463 htc_tx_complete(endpoint, &container); 464 } 465 466 static void htc_async_tx_scat_complete(struct htc_target *target, 467 struct hif_scatter_req *scat_req) 468 { 469 struct htc_endpoint *endpoint; 470 struct htc_packet *packet; 471 struct list_head tx_compq; 472 int i; 473 474 INIT_LIST_HEAD(&tx_compq); 475 476 ath6kl_dbg(ATH6KL_DBG_HTC, 477 "htc tx scat complete len %d entries %d\n", 478 scat_req->len, scat_req->scat_entries); 479 480 if (scat_req->status) 481 ath6kl_err("send scatter req failed: %d\n", scat_req->status); 482 483 packet = scat_req->scat_list[0].packet; 484 endpoint = &target->endpoint[packet->endpoint]; 485 486 /* walk through the scatter list and process */ 487 for (i = 0; i < scat_req->scat_entries; i++) { 488 packet = scat_req->scat_list[i].packet; 489 if (!packet) { 490 WARN_ON(1); 491 return; 492 } 493 494 packet->status = scat_req->status; 495 htc_tx_comp_update(target, endpoint, packet); 496 list_add_tail(&packet->list, &tx_compq); 497 } 498 499 /* free scatter request */ 500 hif_scatter_req_add(target->dev->ar, scat_req); 501 502 /* complete all packets */ 503 htc_tx_complete(endpoint, &tx_compq); 504 } 505 506 static int ath6kl_htc_tx_issue(struct htc_target *target, 507 struct htc_packet *packet) 508 { 509 int status; 510 bool sync = false; 511 u32 padded_len, send_len; 512 513 if (!packet->completion) 514 sync = true; 515 516 send_len = packet->act_len + HTC_HDR_LENGTH; 517 518 padded_len = CALC_TXRX_PADDED_LEN(target, send_len); 519 520 ath6kl_dbg(ATH6KL_DBG_HTC, 521 "htc tx issue len %d seqno %d padded_len %d mbox 0x%X %s\n", 522 send_len, packet->info.tx.seqno, padded_len, 523 target->dev->ar->mbox_info.htc_addr, 524 sync ? "sync" : "async"); 525 526 if (sync) { 527 status = hif_read_write_sync(target->dev->ar, 528 target->dev->ar->mbox_info.htc_addr, 529 packet->buf, padded_len, 530 HIF_WR_SYNC_BLOCK_INC); 531 532 packet->status = status; 533 packet->buf += HTC_HDR_LENGTH; 534 } else 535 status = hif_write_async(target->dev->ar, 536 target->dev->ar->mbox_info.htc_addr, 537 packet->buf, padded_len, 538 HIF_WR_ASYNC_BLOCK_INC, packet); 539 540 return status; 541 } 542 543 static int htc_check_credits(struct htc_target *target, 544 struct htc_endpoint *ep, u8 *flags, 545 enum htc_endpoint_id eid, unsigned int len, 546 int *req_cred) 547 { 548 549 *req_cred = (len > target->tgt_cred_sz) ? 550 DIV_ROUND_UP(len, target->tgt_cred_sz) : 1; 551 552 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit check need %d got %d\n", 553 *req_cred, ep->cred_dist.credits); 554 555 if (ep->cred_dist.credits < *req_cred) { 556 if (eid == ENDPOINT_0) 557 return -EINVAL; 558 559 /* Seek more credits */ 560 ep->cred_dist.seek_cred = *req_cred - ep->cred_dist.credits; 561 562 ath6kl_credit_seek(target->credit_info, &ep->cred_dist); 563 564 ep->cred_dist.seek_cred = 0; 565 566 if (ep->cred_dist.credits < *req_cred) { 567 ath6kl_dbg(ATH6KL_DBG_CREDIT, 568 "credit not found for ep %d\n", 569 eid); 570 return -EINVAL; 571 } 572 } 573 574 ep->cred_dist.credits -= *req_cred; 575 ep->ep_st.cred_cosumd += *req_cred; 576 577 /* When we are getting low on credits, ask for more */ 578 if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) { 579 ep->cred_dist.seek_cred = 580 ep->cred_dist.cred_per_msg - ep->cred_dist.credits; 581 582 ath6kl_credit_seek(target->credit_info, &ep->cred_dist); 583 584 /* see if we were successful in getting more */ 585 if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) { 586 /* tell the target we need credits ASAP! */ 587 *flags |= HTC_FLAGS_NEED_CREDIT_UPDATE; 588 ep->ep_st.cred_low_indicate += 1; 589 ath6kl_dbg(ATH6KL_DBG_CREDIT, 590 "credit we need credits asap\n"); 591 } 592 } 593 594 return 0; 595 } 596 597 static void ath6kl_htc_tx_pkts_get(struct htc_target *target, 598 struct htc_endpoint *endpoint, 599 struct list_head *queue) 600 { 601 int req_cred; 602 u8 flags; 603 struct htc_packet *packet; 604 unsigned int len; 605 606 while (true) { 607 608 flags = 0; 609 610 if (list_empty(&endpoint->txq)) 611 break; 612 packet = list_first_entry(&endpoint->txq, struct htc_packet, 613 list); 614 615 ath6kl_dbg(ATH6KL_DBG_HTC, 616 "htc tx got packet 0x%p queue depth %d\n", 617 packet, get_queue_depth(&endpoint->txq)); 618 619 len = CALC_TXRX_PADDED_LEN(target, 620 packet->act_len + HTC_HDR_LENGTH); 621 622 if (htc_check_credits(target, endpoint, &flags, 623 packet->endpoint, len, &req_cred)) 624 break; 625 626 /* now we can fully move onto caller's queue */ 627 packet = list_first_entry(&endpoint->txq, struct htc_packet, 628 list); 629 list_move_tail(&packet->list, queue); 630 631 /* save the number of credits this packet consumed */ 632 packet->info.tx.cred_used = req_cred; 633 634 /* all TX packets are handled asynchronously */ 635 packet->completion = htc_tx_comp_handler; 636 packet->context = target; 637 endpoint->ep_st.tx_issued += 1; 638 639 /* save send flags */ 640 packet->info.tx.flags = flags; 641 packet->info.tx.seqno = endpoint->seqno; 642 endpoint->seqno++; 643 } 644 } 645 646 /* See if the padded tx length falls on a credit boundary */ 647 static int htc_get_credit_padding(unsigned int cred_sz, int *len, 648 struct htc_endpoint *ep) 649 { 650 int rem_cred, cred_pad; 651 652 rem_cred = *len % cred_sz; 653 654 /* No padding needed */ 655 if (!rem_cred) 656 return 0; 657 658 if (!(ep->conn_flags & HTC_FLGS_TX_BNDL_PAD_EN)) 659 return -1; 660 661 /* 662 * The transfer consumes a "partial" credit, this 663 * packet cannot be bundled unless we add 664 * additional "dummy" padding (max 255 bytes) to 665 * consume the entire credit. 666 */ 667 cred_pad = *len < cred_sz ? (cred_sz - *len) : rem_cred; 668 669 if ((cred_pad > 0) && (cred_pad <= 255)) 670 *len += cred_pad; 671 else 672 /* The amount of padding is too large, send as non-bundled */ 673 return -1; 674 675 return cred_pad; 676 } 677 678 static int ath6kl_htc_tx_setup_scat_list(struct htc_target *target, 679 struct htc_endpoint *endpoint, 680 struct hif_scatter_req *scat_req, 681 int n_scat, 682 struct list_head *queue) 683 { 684 struct htc_packet *packet; 685 int i, len, rem_scat, cred_pad; 686 int status = 0; 687 u8 flags; 688 689 rem_scat = target->max_tx_bndl_sz; 690 691 for (i = 0; i < n_scat; i++) { 692 scat_req->scat_list[i].packet = NULL; 693 694 if (list_empty(queue)) 695 break; 696 697 packet = list_first_entry(queue, struct htc_packet, list); 698 len = CALC_TXRX_PADDED_LEN(target, 699 packet->act_len + HTC_HDR_LENGTH); 700 701 cred_pad = htc_get_credit_padding(target->tgt_cred_sz, 702 &len, endpoint); 703 if (cred_pad < 0 || rem_scat < len) { 704 status = -ENOSPC; 705 break; 706 } 707 708 rem_scat -= len; 709 /* now remove it from the queue */ 710 list_del(&packet->list); 711 712 scat_req->scat_list[i].packet = packet; 713 /* prepare packet and flag message as part of a send bundle */ 714 flags = packet->info.tx.flags | HTC_FLAGS_SEND_BUNDLE; 715 ath6kl_htc_tx_prep_pkt(packet, flags, 716 cred_pad, packet->info.tx.seqno); 717 /* Make sure the buffer is 4-byte aligned */ 718 ath6kl_htc_tx_buf_align(&packet->buf, 719 packet->act_len + HTC_HDR_LENGTH); 720 scat_req->scat_list[i].buf = packet->buf; 721 scat_req->scat_list[i].len = len; 722 723 scat_req->len += len; 724 scat_req->scat_entries++; 725 ath6kl_dbg(ATH6KL_DBG_HTC, 726 "htc tx adding (%d) pkt 0x%p seqno %d len %d remaining %d\n", 727 i, packet, packet->info.tx.seqno, len, rem_scat); 728 } 729 730 /* Roll back scatter setup in case of any failure */ 731 if (scat_req->scat_entries < HTC_MIN_HTC_MSGS_TO_BUNDLE) { 732 for (i = scat_req->scat_entries - 1; i >= 0; i--) { 733 packet = scat_req->scat_list[i].packet; 734 if (packet) { 735 packet->buf += HTC_HDR_LENGTH; 736 list_add(&packet->list, queue); 737 } 738 } 739 return -EAGAIN; 740 } 741 742 return status; 743 } 744 745 /* 746 * Drain a queue and send as bundles this function may return without fully 747 * draining the queue when 748 * 749 * 1. scatter resources are exhausted 750 * 2. a message that will consume a partial credit will stop the 751 * bundling process early 752 * 3. we drop below the minimum number of messages for a bundle 753 */ 754 static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint, 755 struct list_head *queue, 756 int *sent_bundle, int *n_bundle_pkts) 757 { 758 struct htc_target *target = endpoint->target; 759 struct hif_scatter_req *scat_req = NULL; 760 int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0; 761 int status; 762 u32 txb_mask; 763 u8 ac = WMM_NUM_AC; 764 765 if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) && 766 (WMI_CONTROL_SVC != endpoint->svc_id)) 767 ac = target->dev->ar->ep2ac_map[endpoint->eid]; 768 769 while (true) { 770 status = 0; 771 n_scat = get_queue_depth(queue); 772 n_scat = min(n_scat, target->msg_per_bndl_max); 773 774 if (n_scat < HTC_MIN_HTC_MSGS_TO_BUNDLE) 775 /* not enough to bundle */ 776 break; 777 778 scat_req = hif_scatter_req_get(target->dev->ar); 779 780 if (!scat_req) { 781 /* no scatter resources */ 782 ath6kl_dbg(ATH6KL_DBG_HTC, 783 "htc tx no more scatter resources\n"); 784 break; 785 } 786 787 if ((ac < WMM_NUM_AC) && (ac != WMM_AC_BK)) { 788 if (WMM_AC_BE == ac) 789 /* 790 * BE, BK have priorities and bit 791 * positions reversed 792 */ 793 txb_mask = (1 << WMM_AC_BK); 794 else 795 /* 796 * any AC with priority lower than 797 * itself 798 */ 799 txb_mask = ((1 << ac) - 1); 800 801 /* 802 * when the scatter request resources drop below a 803 * certain threshold, disable Tx bundling for all 804 * AC's with priority lower than the current requesting 805 * AC. Otherwise re-enable Tx bundling for them 806 */ 807 if (scat_req->scat_q_depth < ATH6KL_SCATTER_REQS) 808 target->tx_bndl_mask &= ~txb_mask; 809 else 810 target->tx_bndl_mask |= txb_mask; 811 } 812 813 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx pkts to scatter: %d\n", 814 n_scat); 815 816 scat_req->len = 0; 817 scat_req->scat_entries = 0; 818 819 status = ath6kl_htc_tx_setup_scat_list(target, endpoint, 820 scat_req, n_scat, 821 queue); 822 if (status == -EAGAIN) { 823 hif_scatter_req_add(target->dev->ar, scat_req); 824 break; 825 } 826 827 /* send path is always asynchronous */ 828 scat_req->complete = htc_async_tx_scat_complete; 829 n_sent_bundle++; 830 tot_pkts_bundle += scat_req->scat_entries; 831 832 ath6kl_dbg(ATH6KL_DBG_HTC, 833 "htc tx scatter bytes %d entries %d\n", 834 scat_req->len, scat_req->scat_entries); 835 ath6kl_hif_submit_scat_req(target->dev, scat_req, false); 836 837 if (status) 838 break; 839 } 840 841 *sent_bundle = n_sent_bundle; 842 *n_bundle_pkts = tot_pkts_bundle; 843 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx bundle sent %d pkts\n", 844 n_sent_bundle); 845 846 return; 847 } 848 849 static void ath6kl_htc_tx_from_queue(struct htc_target *target, 850 struct htc_endpoint *endpoint) 851 { 852 struct list_head txq; 853 struct htc_packet *packet; 854 int bundle_sent; 855 int n_pkts_bundle; 856 u8 ac = WMM_NUM_AC; 857 int status; 858 859 spin_lock_bh(&target->tx_lock); 860 861 endpoint->tx_proc_cnt++; 862 if (endpoint->tx_proc_cnt > 1) { 863 endpoint->tx_proc_cnt--; 864 spin_unlock_bh(&target->tx_lock); 865 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx busy\n"); 866 return; 867 } 868 869 /* 870 * drain the endpoint TX queue for transmission as long 871 * as we have enough credits. 872 */ 873 INIT_LIST_HEAD(&txq); 874 875 if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) && 876 (WMI_CONTROL_SVC != endpoint->svc_id)) 877 ac = target->dev->ar->ep2ac_map[endpoint->eid]; 878 879 while (true) { 880 881 if (list_empty(&endpoint->txq)) 882 break; 883 884 ath6kl_htc_tx_pkts_get(target, endpoint, &txq); 885 886 if (list_empty(&txq)) 887 break; 888 889 spin_unlock_bh(&target->tx_lock); 890 891 bundle_sent = 0; 892 n_pkts_bundle = 0; 893 894 while (true) { 895 /* try to send a bundle on each pass */ 896 if ((target->tx_bndl_mask) && 897 (get_queue_depth(&txq) >= 898 HTC_MIN_HTC_MSGS_TO_BUNDLE)) { 899 int temp1 = 0, temp2 = 0; 900 901 /* check if bundling is enabled for an AC */ 902 if (target->tx_bndl_mask & (1 << ac)) { 903 ath6kl_htc_tx_bundle(endpoint, &txq, 904 &temp1, &temp2); 905 bundle_sent += temp1; 906 n_pkts_bundle += temp2; 907 } 908 } 909 910 if (list_empty(&txq)) 911 break; 912 913 packet = list_first_entry(&txq, struct htc_packet, 914 list); 915 list_del(&packet->list); 916 917 ath6kl_htc_tx_prep_pkt(packet, packet->info.tx.flags, 918 0, packet->info.tx.seqno); 919 status = ath6kl_htc_tx_issue(target, packet); 920 921 if (status) { 922 packet->status = status; 923 packet->completion(packet->context, packet); 924 } 925 } 926 927 spin_lock_bh(&target->tx_lock); 928 929 endpoint->ep_st.tx_bundles += bundle_sent; 930 endpoint->ep_st.tx_pkt_bundled += n_pkts_bundle; 931 932 /* 933 * if an AC has bundling disabled and no tx bundling 934 * has occured continously for a certain number of TX, 935 * enable tx bundling for this AC 936 */ 937 if (!bundle_sent) { 938 if (!(target->tx_bndl_mask & (1 << ac)) && 939 (ac < WMM_NUM_AC)) { 940 if (++target->ac_tx_count[ac] >= 941 TX_RESUME_BUNDLE_THRESHOLD) { 942 target->ac_tx_count[ac] = 0; 943 target->tx_bndl_mask |= (1 << ac); 944 } 945 } 946 } else { 947 /* tx bundling will reset the counter */ 948 if (ac < WMM_NUM_AC) 949 target->ac_tx_count[ac] = 0; 950 } 951 } 952 953 endpoint->tx_proc_cnt = 0; 954 spin_unlock_bh(&target->tx_lock); 955 } 956 957 static bool ath6kl_htc_tx_try(struct htc_target *target, 958 struct htc_endpoint *endpoint, 959 struct htc_packet *tx_pkt) 960 { 961 struct htc_ep_callbacks ep_cb; 962 int txq_depth; 963 bool overflow = false; 964 965 ep_cb = endpoint->ep_cb; 966 967 spin_lock_bh(&target->tx_lock); 968 txq_depth = get_queue_depth(&endpoint->txq); 969 spin_unlock_bh(&target->tx_lock); 970 971 if (txq_depth >= endpoint->max_txq_depth) 972 overflow = true; 973 974 if (overflow) 975 ath6kl_dbg(ATH6KL_DBG_HTC, 976 "htc tx overflow ep %d depth %d max %d\n", 977 endpoint->eid, txq_depth, 978 endpoint->max_txq_depth); 979 980 if (overflow && ep_cb.tx_full) { 981 if (ep_cb.tx_full(endpoint->target, tx_pkt) == 982 HTC_SEND_FULL_DROP) { 983 endpoint->ep_st.tx_dropped += 1; 984 return false; 985 } 986 } 987 988 spin_lock_bh(&target->tx_lock); 989 list_add_tail(&tx_pkt->list, &endpoint->txq); 990 spin_unlock_bh(&target->tx_lock); 991 992 ath6kl_htc_tx_from_queue(target, endpoint); 993 994 return true; 995 } 996 997 static void htc_chk_ep_txq(struct htc_target *target) 998 { 999 struct htc_endpoint *endpoint; 1000 struct htc_endpoint_credit_dist *cred_dist; 1001 1002 /* 1003 * Run through the credit distribution list to see if there are 1004 * packets queued. NOTE: no locks need to be taken since the 1005 * distribution list is not dynamic (cannot be re-ordered) and we 1006 * are not modifying any state. 1007 */ 1008 list_for_each_entry(cred_dist, &target->cred_dist_list, list) { 1009 endpoint = cred_dist->htc_ep; 1010 1011 spin_lock_bh(&target->tx_lock); 1012 if (!list_empty(&endpoint->txq)) { 1013 ath6kl_dbg(ATH6KL_DBG_HTC, 1014 "htc creds ep %d credits %d pkts %d\n", 1015 cred_dist->endpoint, 1016 endpoint->cred_dist.credits, 1017 get_queue_depth(&endpoint->txq)); 1018 spin_unlock_bh(&target->tx_lock); 1019 /* 1020 * Try to start the stalled queue, this list is 1021 * ordered by priority. If there are credits 1022 * available the highest priority queue will get a 1023 * chance to reclaim credits from lower priority 1024 * ones. 1025 */ 1026 ath6kl_htc_tx_from_queue(target, endpoint); 1027 spin_lock_bh(&target->tx_lock); 1028 } 1029 spin_unlock_bh(&target->tx_lock); 1030 } 1031 } 1032 1033 static int htc_setup_tx_complete(struct htc_target *target) 1034 { 1035 struct htc_packet *send_pkt = NULL; 1036 int status; 1037 1038 send_pkt = htc_get_control_buf(target, true); 1039 1040 if (!send_pkt) 1041 return -ENOMEM; 1042 1043 if (target->htc_tgt_ver >= HTC_VERSION_2P1) { 1044 struct htc_setup_comp_ext_msg *setup_comp_ext; 1045 u32 flags = 0; 1046 1047 setup_comp_ext = 1048 (struct htc_setup_comp_ext_msg *)send_pkt->buf; 1049 memset(setup_comp_ext, 0, sizeof(*setup_comp_ext)); 1050 setup_comp_ext->msg_id = 1051 cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID); 1052 1053 if (target->msg_per_bndl_max > 0) { 1054 /* Indicate HTC bundling to the target */ 1055 flags |= HTC_SETUP_COMP_FLG_RX_BNDL_EN; 1056 setup_comp_ext->msg_per_rxbndl = 1057 target->msg_per_bndl_max; 1058 } 1059 1060 memcpy(&setup_comp_ext->flags, &flags, 1061 sizeof(setup_comp_ext->flags)); 1062 set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp_ext, 1063 sizeof(struct htc_setup_comp_ext_msg), 1064 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG); 1065 1066 } else { 1067 struct htc_setup_comp_msg *setup_comp; 1068 setup_comp = (struct htc_setup_comp_msg *)send_pkt->buf; 1069 memset(setup_comp, 0, sizeof(struct htc_setup_comp_msg)); 1070 setup_comp->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_ID); 1071 set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp, 1072 sizeof(struct htc_setup_comp_msg), 1073 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG); 1074 } 1075 1076 /* we want synchronous operation */ 1077 send_pkt->completion = NULL; 1078 ath6kl_htc_tx_prep_pkt(send_pkt, 0, 0, 0); 1079 status = ath6kl_htc_tx_issue(target, send_pkt); 1080 1081 if (send_pkt != NULL) 1082 htc_reclaim_txctrl_buf(target, send_pkt); 1083 1084 return status; 1085 } 1086 1087 static void ath6kl_htc_set_credit_dist(struct htc_target *target, 1088 struct ath6kl_htc_credit_info *credit_info, 1089 u16 srvc_pri_order[], int list_len) 1090 { 1091 struct htc_endpoint *endpoint; 1092 int i, ep; 1093 1094 target->credit_info = credit_info; 1095 1096 list_add_tail(&target->endpoint[ENDPOINT_0].cred_dist.list, 1097 &target->cred_dist_list); 1098 1099 for (i = 0; i < list_len; i++) { 1100 for (ep = ENDPOINT_1; ep < ENDPOINT_MAX; ep++) { 1101 endpoint = &target->endpoint[ep]; 1102 if (endpoint->svc_id == srvc_pri_order[i]) { 1103 list_add_tail(&endpoint->cred_dist.list, 1104 &target->cred_dist_list); 1105 break; 1106 } 1107 } 1108 if (ep >= ENDPOINT_MAX) { 1109 WARN_ON(1); 1110 return; 1111 } 1112 } 1113 } 1114 1115 static int ath6kl_htc_mbox_tx(struct htc_target *target, 1116 struct htc_packet *packet) 1117 { 1118 struct htc_endpoint *endpoint; 1119 struct list_head queue; 1120 1121 ath6kl_dbg(ATH6KL_DBG_HTC, 1122 "htc tx ep id %d buf 0x%p len %d\n", 1123 packet->endpoint, packet->buf, packet->act_len); 1124 1125 if (packet->endpoint >= ENDPOINT_MAX) { 1126 WARN_ON(1); 1127 return -EINVAL; 1128 } 1129 1130 endpoint = &target->endpoint[packet->endpoint]; 1131 1132 if (!ath6kl_htc_tx_try(target, endpoint, packet)) { 1133 packet->status = (target->htc_flags & HTC_OP_STATE_STOPPING) ? 1134 -ECANCELED : -ENOSPC; 1135 INIT_LIST_HEAD(&queue); 1136 list_add(&packet->list, &queue); 1137 htc_tx_complete(endpoint, &queue); 1138 } 1139 1140 return 0; 1141 } 1142 1143 /* flush endpoint TX queue */ 1144 static void ath6kl_htc_mbox_flush_txep(struct htc_target *target, 1145 enum htc_endpoint_id eid, u16 tag) 1146 { 1147 struct htc_packet *packet, *tmp_pkt; 1148 struct list_head discard_q, container; 1149 struct htc_endpoint *endpoint = &target->endpoint[eid]; 1150 1151 if (!endpoint->svc_id) { 1152 WARN_ON(1); 1153 return; 1154 } 1155 1156 /* initialize the discard queue */ 1157 INIT_LIST_HEAD(&discard_q); 1158 1159 spin_lock_bh(&target->tx_lock); 1160 1161 list_for_each_entry_safe(packet, tmp_pkt, &endpoint->txq, list) { 1162 if ((tag == HTC_TX_PACKET_TAG_ALL) || 1163 (tag == packet->info.tx.tag)) 1164 list_move_tail(&packet->list, &discard_q); 1165 } 1166 1167 spin_unlock_bh(&target->tx_lock); 1168 1169 list_for_each_entry_safe(packet, tmp_pkt, &discard_q, list) { 1170 packet->status = -ECANCELED; 1171 list_del(&packet->list); 1172 ath6kl_dbg(ATH6KL_DBG_HTC, 1173 "htc tx flushing pkt 0x%p len %d ep %d tag 0x%x\n", 1174 packet, packet->act_len, 1175 packet->endpoint, packet->info.tx.tag); 1176 1177 INIT_LIST_HEAD(&container); 1178 list_add_tail(&packet->list, &container); 1179 htc_tx_complete(endpoint, &container); 1180 } 1181 1182 } 1183 1184 static void ath6kl_htc_flush_txep_all(struct htc_target *target) 1185 { 1186 struct htc_endpoint *endpoint; 1187 int i; 1188 1189 dump_cred_dist_stats(target); 1190 1191 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) { 1192 endpoint = &target->endpoint[i]; 1193 if (endpoint->svc_id == 0) 1194 /* not in use.. */ 1195 continue; 1196 ath6kl_htc_mbox_flush_txep(target, i, HTC_TX_PACKET_TAG_ALL); 1197 } 1198 } 1199 1200 static void ath6kl_htc_mbox_activity_changed(struct htc_target *target, 1201 enum htc_endpoint_id eid, 1202 bool active) 1203 { 1204 struct htc_endpoint *endpoint = &target->endpoint[eid]; 1205 bool dist = false; 1206 1207 if (endpoint->svc_id == 0) { 1208 WARN_ON(1); 1209 return; 1210 } 1211 1212 spin_lock_bh(&target->tx_lock); 1213 1214 if (active) { 1215 if (!(endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE)) { 1216 endpoint->cred_dist.dist_flags |= HTC_EP_ACTIVE; 1217 dist = true; 1218 } 1219 } else { 1220 if (endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE) { 1221 endpoint->cred_dist.dist_flags &= ~HTC_EP_ACTIVE; 1222 dist = true; 1223 } 1224 } 1225 1226 if (dist) { 1227 endpoint->cred_dist.txq_depth = 1228 get_queue_depth(&endpoint->txq); 1229 1230 ath6kl_dbg(ATH6KL_DBG_HTC, 1231 "htc tx activity ctxt 0x%p dist 0x%p\n", 1232 target->credit_info, &target->cred_dist_list); 1233 1234 ath6kl_credit_distribute(target->credit_info, 1235 &target->cred_dist_list, 1236 HTC_CREDIT_DIST_ACTIVITY_CHANGE); 1237 } 1238 1239 spin_unlock_bh(&target->tx_lock); 1240 1241 if (dist && !active) 1242 htc_chk_ep_txq(target); 1243 } 1244 1245 /* HTC Rx */ 1246 1247 static inline void ath6kl_htc_rx_update_stats(struct htc_endpoint *endpoint, 1248 int n_look_ahds) 1249 { 1250 endpoint->ep_st.rx_pkts++; 1251 if (n_look_ahds == 1) 1252 endpoint->ep_st.rx_lkahds++; 1253 else if (n_look_ahds > 1) 1254 endpoint->ep_st.rx_bundle_lkahd++; 1255 } 1256 1257 static inline bool htc_valid_rx_frame_len(struct htc_target *target, 1258 enum htc_endpoint_id eid, int len) 1259 { 1260 return (eid == target->dev->ar->ctrl_ep) ? 1261 len <= ATH6KL_BUFFER_SIZE : len <= ATH6KL_AMSDU_BUFFER_SIZE; 1262 } 1263 1264 static int htc_add_rxbuf(struct htc_target *target, struct htc_packet *packet) 1265 { 1266 struct list_head queue; 1267 1268 INIT_LIST_HEAD(&queue); 1269 list_add_tail(&packet->list, &queue); 1270 return ath6kl_htc_mbox_add_rxbuf_multiple(target, &queue); 1271 } 1272 1273 static void htc_reclaim_rxbuf(struct htc_target *target, 1274 struct htc_packet *packet, 1275 struct htc_endpoint *ep) 1276 { 1277 if (packet->info.rx.rx_flags & HTC_RX_PKT_NO_RECYCLE) { 1278 htc_rxpkt_reset(packet); 1279 packet->status = -ECANCELED; 1280 ep->ep_cb.rx(ep->target, packet); 1281 } else { 1282 htc_rxpkt_reset(packet); 1283 htc_add_rxbuf((void *)(target), packet); 1284 } 1285 } 1286 1287 static void reclaim_rx_ctrl_buf(struct htc_target *target, 1288 struct htc_packet *packet) 1289 { 1290 spin_lock_bh(&target->htc_lock); 1291 list_add_tail(&packet->list, &target->free_ctrl_rxbuf); 1292 spin_unlock_bh(&target->htc_lock); 1293 } 1294 1295 static int ath6kl_htc_rx_packet(struct htc_target *target, 1296 struct htc_packet *packet, 1297 u32 rx_len) 1298 { 1299 struct ath6kl_device *dev = target->dev; 1300 u32 padded_len; 1301 int status; 1302 1303 padded_len = CALC_TXRX_PADDED_LEN(target, rx_len); 1304 1305 if (padded_len > packet->buf_len) { 1306 ath6kl_err("not enough receive space for packet - padlen %d recvlen %d bufferlen %d\n", 1307 padded_len, rx_len, packet->buf_len); 1308 return -ENOMEM; 1309 } 1310 1311 ath6kl_dbg(ATH6KL_DBG_HTC, 1312 "htc rx 0x%p hdr 0x%x len %d mbox 0x%x\n", 1313 packet, packet->info.rx.exp_hdr, 1314 padded_len, dev->ar->mbox_info.htc_addr); 1315 1316 status = hif_read_write_sync(dev->ar, 1317 dev->ar->mbox_info.htc_addr, 1318 packet->buf, padded_len, 1319 HIF_RD_SYNC_BLOCK_FIX); 1320 1321 packet->status = status; 1322 1323 return status; 1324 } 1325 1326 /* 1327 * optimization for recv packets, we can indicate a 1328 * "hint" that there are more single-packets to fetch 1329 * on this endpoint. 1330 */ 1331 static void ath6kl_htc_rx_set_indicate(u32 lk_ahd, 1332 struct htc_endpoint *endpoint, 1333 struct htc_packet *packet) 1334 { 1335 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)&lk_ahd; 1336 1337 if (htc_hdr->eid == packet->endpoint) { 1338 if (!list_empty(&endpoint->rx_bufq)) 1339 packet->info.rx.indicat_flags |= 1340 HTC_RX_FLAGS_INDICATE_MORE_PKTS; 1341 } 1342 } 1343 1344 static void ath6kl_htc_rx_chk_water_mark(struct htc_endpoint *endpoint) 1345 { 1346 struct htc_ep_callbacks ep_cb = endpoint->ep_cb; 1347 1348 if (ep_cb.rx_refill_thresh > 0) { 1349 spin_lock_bh(&endpoint->target->rx_lock); 1350 if (get_queue_depth(&endpoint->rx_bufq) 1351 < ep_cb.rx_refill_thresh) { 1352 spin_unlock_bh(&endpoint->target->rx_lock); 1353 ep_cb.rx_refill(endpoint->target, endpoint->eid); 1354 return; 1355 } 1356 spin_unlock_bh(&endpoint->target->rx_lock); 1357 } 1358 } 1359 1360 /* This function is called with rx_lock held */ 1361 static int ath6kl_htc_rx_setup(struct htc_target *target, 1362 struct htc_endpoint *ep, 1363 u32 *lk_ahds, struct list_head *queue, int n_msg) 1364 { 1365 struct htc_packet *packet; 1366 /* FIXME: type of lk_ahds can't be right */ 1367 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)lk_ahds; 1368 struct htc_ep_callbacks ep_cb; 1369 int status = 0, j, full_len; 1370 bool no_recycle; 1371 1372 full_len = CALC_TXRX_PADDED_LEN(target, 1373 le16_to_cpu(htc_hdr->payld_len) + 1374 sizeof(*htc_hdr)); 1375 1376 if (!htc_valid_rx_frame_len(target, ep->eid, full_len)) { 1377 ath6kl_warn("Rx buffer requested with invalid length htc_hdr:eid %d, flags 0x%x, len %d\n", 1378 htc_hdr->eid, htc_hdr->flags, 1379 le16_to_cpu(htc_hdr->payld_len)); 1380 return -EINVAL; 1381 } 1382 1383 ep_cb = ep->ep_cb; 1384 for (j = 0; j < n_msg; j++) { 1385 1386 /* 1387 * Reset flag, any packets allocated using the 1388 * rx_alloc() API cannot be recycled on 1389 * cleanup,they must be explicitly returned. 1390 */ 1391 no_recycle = false; 1392 1393 if (ep_cb.rx_allocthresh && 1394 (full_len > ep_cb.rx_alloc_thresh)) { 1395 ep->ep_st.rx_alloc_thresh_hit += 1; 1396 ep->ep_st.rxalloc_thresh_byte += 1397 le16_to_cpu(htc_hdr->payld_len); 1398 1399 spin_unlock_bh(&target->rx_lock); 1400 no_recycle = true; 1401 1402 packet = ep_cb.rx_allocthresh(ep->target, ep->eid, 1403 full_len); 1404 spin_lock_bh(&target->rx_lock); 1405 } else { 1406 /* refill handler is being used */ 1407 if (list_empty(&ep->rx_bufq)) { 1408 if (ep_cb.rx_refill) { 1409 spin_unlock_bh(&target->rx_lock); 1410 ep_cb.rx_refill(ep->target, ep->eid); 1411 spin_lock_bh(&target->rx_lock); 1412 } 1413 } 1414 1415 if (list_empty(&ep->rx_bufq)) 1416 packet = NULL; 1417 else { 1418 packet = list_first_entry(&ep->rx_bufq, 1419 struct htc_packet, list); 1420 list_del(&packet->list); 1421 } 1422 } 1423 1424 if (!packet) { 1425 target->rx_st_flags |= HTC_RECV_WAIT_BUFFERS; 1426 target->ep_waiting = ep->eid; 1427 return -ENOSPC; 1428 } 1429 1430 /* clear flags */ 1431 packet->info.rx.rx_flags = 0; 1432 packet->info.rx.indicat_flags = 0; 1433 packet->status = 0; 1434 1435 if (no_recycle) 1436 /* 1437 * flag that these packets cannot be 1438 * recycled, they have to be returned to 1439 * the user 1440 */ 1441 packet->info.rx.rx_flags |= HTC_RX_PKT_NO_RECYCLE; 1442 1443 /* Caller needs to free this upon any failure */ 1444 list_add_tail(&packet->list, queue); 1445 1446 if (target->htc_flags & HTC_OP_STATE_STOPPING) { 1447 status = -ECANCELED; 1448 break; 1449 } 1450 1451 if (j) { 1452 packet->info.rx.rx_flags |= HTC_RX_PKT_REFRESH_HDR; 1453 packet->info.rx.exp_hdr = 0xFFFFFFFF; 1454 } else 1455 /* set expected look ahead */ 1456 packet->info.rx.exp_hdr = *lk_ahds; 1457 1458 packet->act_len = le16_to_cpu(htc_hdr->payld_len) + 1459 HTC_HDR_LENGTH; 1460 } 1461 1462 return status; 1463 } 1464 1465 static int ath6kl_htc_rx_alloc(struct htc_target *target, 1466 u32 lk_ahds[], int msg, 1467 struct htc_endpoint *endpoint, 1468 struct list_head *queue) 1469 { 1470 int status = 0; 1471 struct htc_packet *packet, *tmp_pkt; 1472 struct htc_frame_hdr *htc_hdr; 1473 int i, n_msg; 1474 1475 spin_lock_bh(&target->rx_lock); 1476 1477 for (i = 0; i < msg; i++) { 1478 1479 htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i]; 1480 1481 if (htc_hdr->eid >= ENDPOINT_MAX) { 1482 ath6kl_err("invalid ep in look-ahead: %d\n", 1483 htc_hdr->eid); 1484 status = -ENOMEM; 1485 break; 1486 } 1487 1488 if (htc_hdr->eid != endpoint->eid) { 1489 ath6kl_err("invalid ep in look-ahead: %d should be : %d (index:%d)\n", 1490 htc_hdr->eid, endpoint->eid, i); 1491 status = -ENOMEM; 1492 break; 1493 } 1494 1495 if (le16_to_cpu(htc_hdr->payld_len) > HTC_MAX_PAYLOAD_LENGTH) { 1496 ath6kl_err("payload len %d exceeds max htc : %d !\n", 1497 htc_hdr->payld_len, 1498 (u32) HTC_MAX_PAYLOAD_LENGTH); 1499 status = -ENOMEM; 1500 break; 1501 } 1502 1503 if (endpoint->svc_id == 0) { 1504 ath6kl_err("ep %d is not connected !\n", htc_hdr->eid); 1505 status = -ENOMEM; 1506 break; 1507 } 1508 1509 if (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) { 1510 /* 1511 * HTC header indicates that every packet to follow 1512 * has the same padded length so that it can be 1513 * optimally fetched as a full bundle. 1514 */ 1515 n_msg = (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) >> 1516 HTC_FLG_RX_BNDL_CNT_S; 1517 1518 /* the count doesn't include the starter frame */ 1519 n_msg++; 1520 if (n_msg > target->msg_per_bndl_max) { 1521 status = -ENOMEM; 1522 break; 1523 } 1524 1525 endpoint->ep_st.rx_bundle_from_hdr += 1; 1526 ath6kl_dbg(ATH6KL_DBG_HTC, 1527 "htc rx bundle pkts %d\n", 1528 n_msg); 1529 } else 1530 /* HTC header only indicates 1 message to fetch */ 1531 n_msg = 1; 1532 1533 /* Setup packet buffers for each message */ 1534 status = ath6kl_htc_rx_setup(target, endpoint, &lk_ahds[i], 1535 queue, n_msg); 1536 1537 /* 1538 * This is due to unavailabilty of buffers to rx entire data. 1539 * Return no error so that free buffers from queue can be used 1540 * to receive partial data. 1541 */ 1542 if (status == -ENOSPC) { 1543 spin_unlock_bh(&target->rx_lock); 1544 return 0; 1545 } 1546 1547 if (status) 1548 break; 1549 } 1550 1551 spin_unlock_bh(&target->rx_lock); 1552 1553 if (status) { 1554 list_for_each_entry_safe(packet, tmp_pkt, queue, list) { 1555 list_del(&packet->list); 1556 htc_reclaim_rxbuf(target, packet, 1557 &target->endpoint[packet->endpoint]); 1558 } 1559 } 1560 1561 return status; 1562 } 1563 1564 static void htc_ctrl_rx(struct htc_target *context, struct htc_packet *packets) 1565 { 1566 if (packets->endpoint != ENDPOINT_0) { 1567 WARN_ON(1); 1568 return; 1569 } 1570 1571 if (packets->status == -ECANCELED) { 1572 reclaim_rx_ctrl_buf(context, packets); 1573 return; 1574 } 1575 1576 if (packets->act_len > 0) { 1577 ath6kl_err("htc_ctrl_rx, got message with len:%zu\n", 1578 packets->act_len + HTC_HDR_LENGTH); 1579 1580 ath6kl_dbg_dump(ATH6KL_DBG_HTC, 1581 "htc rx unexpected endpoint 0 message", "", 1582 packets->buf - HTC_HDR_LENGTH, 1583 packets->act_len + HTC_HDR_LENGTH); 1584 } 1585 1586 htc_reclaim_rxbuf(context, packets, &context->endpoint[0]); 1587 } 1588 1589 static void htc_proc_cred_rpt(struct htc_target *target, 1590 struct htc_credit_report *rpt, 1591 int n_entries, 1592 enum htc_endpoint_id from_ep) 1593 { 1594 struct htc_endpoint *endpoint; 1595 int tot_credits = 0, i; 1596 bool dist = false; 1597 1598 spin_lock_bh(&target->tx_lock); 1599 1600 for (i = 0; i < n_entries; i++, rpt++) { 1601 if (rpt->eid >= ENDPOINT_MAX) { 1602 WARN_ON(1); 1603 spin_unlock_bh(&target->tx_lock); 1604 return; 1605 } 1606 1607 endpoint = &target->endpoint[rpt->eid]; 1608 1609 ath6kl_dbg(ATH6KL_DBG_CREDIT, 1610 "credit report ep %d credits %d\n", 1611 rpt->eid, rpt->credits); 1612 1613 endpoint->ep_st.tx_cred_rpt += 1; 1614 endpoint->ep_st.cred_retnd += rpt->credits; 1615 1616 if (from_ep == rpt->eid) { 1617 /* 1618 * This credit report arrived on the same endpoint 1619 * indicating it arrived in an RX packet. 1620 */ 1621 endpoint->ep_st.cred_from_rx += rpt->credits; 1622 endpoint->ep_st.cred_rpt_from_rx += 1; 1623 } else if (from_ep == ENDPOINT_0) { 1624 /* credit arrived on endpoint 0 as a NULL message */ 1625 endpoint->ep_st.cred_from_ep0 += rpt->credits; 1626 endpoint->ep_st.cred_rpt_ep0 += 1; 1627 } else { 1628 endpoint->ep_st.cred_from_other += rpt->credits; 1629 endpoint->ep_st.cred_rpt_from_other += 1; 1630 } 1631 1632 if (rpt->eid == ENDPOINT_0) 1633 /* always give endpoint 0 credits back */ 1634 endpoint->cred_dist.credits += rpt->credits; 1635 else { 1636 endpoint->cred_dist.cred_to_dist += rpt->credits; 1637 dist = true; 1638 } 1639 1640 /* 1641 * Refresh tx depth for distribution function that will 1642 * recover these credits NOTE: this is only valid when 1643 * there are credits to recover! 1644 */ 1645 endpoint->cred_dist.txq_depth = 1646 get_queue_depth(&endpoint->txq); 1647 1648 tot_credits += rpt->credits; 1649 } 1650 1651 if (dist) { 1652 /* 1653 * This was a credit return based on a completed send 1654 * operations note, this is done with the lock held 1655 */ 1656 ath6kl_credit_distribute(target->credit_info, 1657 &target->cred_dist_list, 1658 HTC_CREDIT_DIST_SEND_COMPLETE); 1659 } 1660 1661 spin_unlock_bh(&target->tx_lock); 1662 1663 if (tot_credits) 1664 htc_chk_ep_txq(target); 1665 } 1666 1667 static int htc_parse_trailer(struct htc_target *target, 1668 struct htc_record_hdr *record, 1669 u8 *record_buf, u32 *next_lk_ahds, 1670 enum htc_endpoint_id endpoint, 1671 int *n_lk_ahds) 1672 { 1673 struct htc_bundle_lkahd_rpt *bundle_lkahd_rpt; 1674 struct htc_lookahead_report *lk_ahd; 1675 int len; 1676 1677 switch (record->rec_id) { 1678 case HTC_RECORD_CREDITS: 1679 len = record->len / sizeof(struct htc_credit_report); 1680 if (!len) { 1681 WARN_ON(1); 1682 return -EINVAL; 1683 } 1684 1685 htc_proc_cred_rpt(target, 1686 (struct htc_credit_report *) record_buf, 1687 len, endpoint); 1688 break; 1689 case HTC_RECORD_LOOKAHEAD: 1690 len = record->len / sizeof(*lk_ahd); 1691 if (!len) { 1692 WARN_ON(1); 1693 return -EINVAL; 1694 } 1695 1696 lk_ahd = (struct htc_lookahead_report *) record_buf; 1697 if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF)) && 1698 next_lk_ahds) { 1699 1700 ath6kl_dbg(ATH6KL_DBG_HTC, 1701 "htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n", 1702 lk_ahd->pre_valid, lk_ahd->post_valid); 1703 1704 /* look ahead bytes are valid, copy them over */ 1705 memcpy((u8 *)&next_lk_ahds[0], lk_ahd->lk_ahd, 4); 1706 1707 ath6kl_dbg_dump(ATH6KL_DBG_HTC, 1708 "htc rx next look ahead", 1709 "", next_lk_ahds, 4); 1710 1711 *n_lk_ahds = 1; 1712 } 1713 break; 1714 case HTC_RECORD_LOOKAHEAD_BUNDLE: 1715 len = record->len / sizeof(*bundle_lkahd_rpt); 1716 if (!len || (len > HTC_HOST_MAX_MSG_PER_BUNDLE)) { 1717 WARN_ON(1); 1718 return -EINVAL; 1719 } 1720 1721 if (next_lk_ahds) { 1722 int i; 1723 1724 bundle_lkahd_rpt = 1725 (struct htc_bundle_lkahd_rpt *) record_buf; 1726 1727 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bundle lk_ahd", 1728 "", record_buf, record->len); 1729 1730 for (i = 0; i < len; i++) { 1731 memcpy((u8 *)&next_lk_ahds[i], 1732 bundle_lkahd_rpt->lk_ahd, 4); 1733 bundle_lkahd_rpt++; 1734 } 1735 1736 *n_lk_ahds = i; 1737 } 1738 break; 1739 default: 1740 ath6kl_err("unhandled record: id:%d len:%d\n", 1741 record->rec_id, record->len); 1742 break; 1743 } 1744 1745 return 0; 1746 1747 } 1748 1749 static int htc_proc_trailer(struct htc_target *target, 1750 u8 *buf, int len, u32 *next_lk_ahds, 1751 int *n_lk_ahds, enum htc_endpoint_id endpoint) 1752 { 1753 struct htc_record_hdr *record; 1754 int orig_len; 1755 int status; 1756 u8 *record_buf; 1757 u8 *orig_buf; 1758 1759 ath6kl_dbg(ATH6KL_DBG_HTC, "htc rx trailer len %d\n", len); 1760 ath6kl_dbg_dump(ATH6KL_DBG_HTC, NULL, "", buf, len); 1761 1762 orig_buf = buf; 1763 orig_len = len; 1764 status = 0; 1765 1766 while (len > 0) { 1767 1768 if (len < sizeof(struct htc_record_hdr)) { 1769 status = -ENOMEM; 1770 break; 1771 } 1772 /* these are byte aligned structs */ 1773 record = (struct htc_record_hdr *) buf; 1774 len -= sizeof(struct htc_record_hdr); 1775 buf += sizeof(struct htc_record_hdr); 1776 1777 if (record->len > len) { 1778 ath6kl_err("invalid record len: %d (id:%d) buf has: %d bytes left\n", 1779 record->len, record->rec_id, len); 1780 status = -ENOMEM; 1781 break; 1782 } 1783 record_buf = buf; 1784 1785 status = htc_parse_trailer(target, record, record_buf, 1786 next_lk_ahds, endpoint, n_lk_ahds); 1787 1788 if (status) 1789 break; 1790 1791 /* advance buffer past this record for next time around */ 1792 buf += record->len; 1793 len -= record->len; 1794 } 1795 1796 if (status) 1797 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad trailer", 1798 "", orig_buf, orig_len); 1799 1800 return status; 1801 } 1802 1803 static int ath6kl_htc_rx_process_hdr(struct htc_target *target, 1804 struct htc_packet *packet, 1805 u32 *next_lkahds, int *n_lkahds) 1806 { 1807 int status = 0; 1808 u16 payload_len; 1809 u32 lk_ahd; 1810 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)packet->buf; 1811 1812 if (n_lkahds != NULL) 1813 *n_lkahds = 0; 1814 1815 /* 1816 * NOTE: we cannot assume the alignment of buf, so we use the safe 1817 * macros to retrieve 16 bit fields. 1818 */ 1819 payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len)); 1820 1821 memcpy((u8 *)&lk_ahd, packet->buf, sizeof(lk_ahd)); 1822 1823 if (packet->info.rx.rx_flags & HTC_RX_PKT_REFRESH_HDR) { 1824 /* 1825 * Refresh the expected header and the actual length as it 1826 * was unknown when this packet was grabbed as part of the 1827 * bundle. 1828 */ 1829 packet->info.rx.exp_hdr = lk_ahd; 1830 packet->act_len = payload_len + HTC_HDR_LENGTH; 1831 1832 /* validate the actual header that was refreshed */ 1833 if (packet->act_len > packet->buf_len) { 1834 ath6kl_err("refreshed hdr payload len (%d) in bundled recv is invalid (hdr: 0x%X)\n", 1835 payload_len, lk_ahd); 1836 /* 1837 * Limit this to max buffer just to print out some 1838 * of the buffer. 1839 */ 1840 packet->act_len = min(packet->act_len, packet->buf_len); 1841 status = -ENOMEM; 1842 goto fail_rx; 1843 } 1844 1845 if (packet->endpoint != htc_hdr->eid) { 1846 ath6kl_err("refreshed hdr ep (%d) does not match expected ep (%d)\n", 1847 htc_hdr->eid, packet->endpoint); 1848 status = -ENOMEM; 1849 goto fail_rx; 1850 } 1851 } 1852 1853 if (lk_ahd != packet->info.rx.exp_hdr) { 1854 ath6kl_err("%s(): lk_ahd mismatch! (pPkt:0x%p flags:0x%X)\n", 1855 __func__, packet, packet->info.rx.rx_flags); 1856 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx expected lk_ahd", 1857 "", &packet->info.rx.exp_hdr, 4); 1858 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx current header", 1859 "", (u8 *)&lk_ahd, sizeof(lk_ahd)); 1860 status = -ENOMEM; 1861 goto fail_rx; 1862 } 1863 1864 if (htc_hdr->flags & HTC_FLG_RX_TRAILER) { 1865 if (htc_hdr->ctrl[0] < sizeof(struct htc_record_hdr) || 1866 htc_hdr->ctrl[0] > payload_len) { 1867 ath6kl_err("%s(): invalid hdr (payload len should be :%d, CB[0] is:%d)\n", 1868 __func__, payload_len, htc_hdr->ctrl[0]); 1869 status = -ENOMEM; 1870 goto fail_rx; 1871 } 1872 1873 if (packet->info.rx.rx_flags & HTC_RX_PKT_IGNORE_LOOKAHEAD) { 1874 next_lkahds = NULL; 1875 n_lkahds = NULL; 1876 } 1877 1878 status = htc_proc_trailer(target, packet->buf + HTC_HDR_LENGTH 1879 + payload_len - htc_hdr->ctrl[0], 1880 htc_hdr->ctrl[0], next_lkahds, 1881 n_lkahds, packet->endpoint); 1882 1883 if (status) 1884 goto fail_rx; 1885 1886 packet->act_len -= htc_hdr->ctrl[0]; 1887 } 1888 1889 packet->buf += HTC_HDR_LENGTH; 1890 packet->act_len -= HTC_HDR_LENGTH; 1891 1892 fail_rx: 1893 if (status) 1894 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad packet", 1895 "", packet->buf, packet->act_len); 1896 1897 return status; 1898 } 1899 1900 static void ath6kl_htc_rx_complete(struct htc_endpoint *endpoint, 1901 struct htc_packet *packet) 1902 { 1903 ath6kl_dbg(ATH6KL_DBG_HTC, 1904 "htc rx complete ep %d packet 0x%p\n", 1905 endpoint->eid, packet); 1906 endpoint->ep_cb.rx(endpoint->target, packet); 1907 } 1908 1909 static int ath6kl_htc_rx_bundle(struct htc_target *target, 1910 struct list_head *rxq, 1911 struct list_head *sync_compq, 1912 int *n_pkt_fetched, bool part_bundle) 1913 { 1914 struct hif_scatter_req *scat_req; 1915 struct htc_packet *packet; 1916 int rem_space = target->max_rx_bndl_sz; 1917 int n_scat_pkt, status = 0, i, len; 1918 1919 n_scat_pkt = get_queue_depth(rxq); 1920 n_scat_pkt = min(n_scat_pkt, target->msg_per_bndl_max); 1921 1922 if ((get_queue_depth(rxq) - n_scat_pkt) > 0) { 1923 /* 1924 * We were forced to split this bundle receive operation 1925 * all packets in this partial bundle must have their 1926 * lookaheads ignored. 1927 */ 1928 part_bundle = true; 1929 1930 /* 1931 * This would only happen if the target ignored our max 1932 * bundle limit. 1933 */ 1934 ath6kl_warn("%s(): partial bundle detected num:%d , %d\n", 1935 __func__, get_queue_depth(rxq), n_scat_pkt); 1936 } 1937 1938 len = 0; 1939 1940 ath6kl_dbg(ATH6KL_DBG_HTC, 1941 "htc rx bundle depth %d pkts %d\n", 1942 get_queue_depth(rxq), n_scat_pkt); 1943 1944 scat_req = hif_scatter_req_get(target->dev->ar); 1945 1946 if (scat_req == NULL) 1947 goto fail_rx_pkt; 1948 1949 for (i = 0; i < n_scat_pkt; i++) { 1950 int pad_len; 1951 1952 packet = list_first_entry(rxq, struct htc_packet, list); 1953 list_del(&packet->list); 1954 1955 pad_len = CALC_TXRX_PADDED_LEN(target, 1956 packet->act_len); 1957 1958 if ((rem_space - pad_len) < 0) { 1959 list_add(&packet->list, rxq); 1960 break; 1961 } 1962 1963 rem_space -= pad_len; 1964 1965 if (part_bundle || (i < (n_scat_pkt - 1))) 1966 /* 1967 * Packet 0..n-1 cannot be checked for look-aheads 1968 * since we are fetching a bundle the last packet 1969 * however can have it's lookahead used 1970 */ 1971 packet->info.rx.rx_flags |= 1972 HTC_RX_PKT_IGNORE_LOOKAHEAD; 1973 1974 /* NOTE: 1 HTC packet per scatter entry */ 1975 scat_req->scat_list[i].buf = packet->buf; 1976 scat_req->scat_list[i].len = pad_len; 1977 1978 packet->info.rx.rx_flags |= HTC_RX_PKT_PART_OF_BUNDLE; 1979 1980 list_add_tail(&packet->list, sync_compq); 1981 1982 WARN_ON(!scat_req->scat_list[i].len); 1983 len += scat_req->scat_list[i].len; 1984 } 1985 1986 scat_req->len = len; 1987 scat_req->scat_entries = i; 1988 1989 status = ath6kl_hif_submit_scat_req(target->dev, scat_req, true); 1990 1991 if (!status) 1992 *n_pkt_fetched = i; 1993 1994 /* free scatter request */ 1995 hif_scatter_req_add(target->dev->ar, scat_req); 1996 1997 fail_rx_pkt: 1998 1999 return status; 2000 } 2001 2002 static int ath6kl_htc_rx_process_packets(struct htc_target *target, 2003 struct list_head *comp_pktq, 2004 u32 lk_ahds[], 2005 int *n_lk_ahd) 2006 { 2007 struct htc_packet *packet, *tmp_pkt; 2008 struct htc_endpoint *ep; 2009 int status = 0; 2010 2011 list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) { 2012 ep = &target->endpoint[packet->endpoint]; 2013 2014 /* process header for each of the recv packet */ 2015 status = ath6kl_htc_rx_process_hdr(target, packet, lk_ahds, 2016 n_lk_ahd); 2017 if (status) 2018 return status; 2019 2020 list_del(&packet->list); 2021 2022 if (list_empty(comp_pktq)) { 2023 /* 2024 * Last packet's more packet flag is set 2025 * based on the lookahead. 2026 */ 2027 if (*n_lk_ahd > 0) 2028 ath6kl_htc_rx_set_indicate(lk_ahds[0], 2029 ep, packet); 2030 } else 2031 /* 2032 * Packets in a bundle automatically have 2033 * this flag set. 2034 */ 2035 packet->info.rx.indicat_flags |= 2036 HTC_RX_FLAGS_INDICATE_MORE_PKTS; 2037 2038 ath6kl_htc_rx_update_stats(ep, *n_lk_ahd); 2039 2040 if (packet->info.rx.rx_flags & HTC_RX_PKT_PART_OF_BUNDLE) 2041 ep->ep_st.rx_bundl += 1; 2042 2043 ath6kl_htc_rx_complete(ep, packet); 2044 } 2045 2046 return status; 2047 } 2048 2049 static int ath6kl_htc_rx_fetch(struct htc_target *target, 2050 struct list_head *rx_pktq, 2051 struct list_head *comp_pktq) 2052 { 2053 int fetched_pkts; 2054 bool part_bundle = false; 2055 int status = 0; 2056 struct list_head tmp_rxq; 2057 struct htc_packet *packet, *tmp_pkt; 2058 2059 /* now go fetch the list of HTC packets */ 2060 while (!list_empty(rx_pktq)) { 2061 fetched_pkts = 0; 2062 2063 INIT_LIST_HEAD(&tmp_rxq); 2064 2065 if (target->rx_bndl_enable && (get_queue_depth(rx_pktq) > 1)) { 2066 /* 2067 * There are enough packets to attempt a 2068 * bundle transfer and recv bundling is 2069 * allowed. 2070 */ 2071 status = ath6kl_htc_rx_bundle(target, rx_pktq, 2072 &tmp_rxq, 2073 &fetched_pkts, 2074 part_bundle); 2075 if (status) 2076 goto fail_rx; 2077 2078 if (!list_empty(rx_pktq)) 2079 part_bundle = true; 2080 2081 list_splice_tail_init(&tmp_rxq, comp_pktq); 2082 } 2083 2084 if (!fetched_pkts) { 2085 2086 packet = list_first_entry(rx_pktq, struct htc_packet, 2087 list); 2088 2089 /* fully synchronous */ 2090 packet->completion = NULL; 2091 2092 if (!list_is_singular(rx_pktq)) 2093 /* 2094 * look_aheads in all packet 2095 * except the last one in the 2096 * bundle must be ignored 2097 */ 2098 packet->info.rx.rx_flags |= 2099 HTC_RX_PKT_IGNORE_LOOKAHEAD; 2100 2101 /* go fetch the packet */ 2102 status = ath6kl_htc_rx_packet(target, packet, 2103 packet->act_len); 2104 2105 list_move_tail(&packet->list, &tmp_rxq); 2106 2107 if (status) 2108 goto fail_rx; 2109 2110 list_splice_tail_init(&tmp_rxq, comp_pktq); 2111 } 2112 } 2113 2114 return 0; 2115 2116 fail_rx: 2117 2118 /* 2119 * Cleanup any packets we allocated but didn't use to 2120 * actually fetch any packets. 2121 */ 2122 2123 list_for_each_entry_safe(packet, tmp_pkt, rx_pktq, list) { 2124 list_del(&packet->list); 2125 htc_reclaim_rxbuf(target, packet, 2126 &target->endpoint[packet->endpoint]); 2127 } 2128 2129 list_for_each_entry_safe(packet, tmp_pkt, &tmp_rxq, list) { 2130 list_del(&packet->list); 2131 htc_reclaim_rxbuf(target, packet, 2132 &target->endpoint[packet->endpoint]); 2133 } 2134 2135 return status; 2136 } 2137 2138 int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target, 2139 u32 msg_look_ahead, int *num_pkts) 2140 { 2141 struct htc_packet *packets, *tmp_pkt; 2142 struct htc_endpoint *endpoint; 2143 struct list_head rx_pktq, comp_pktq; 2144 int status = 0; 2145 u32 look_aheads[HTC_HOST_MAX_MSG_PER_BUNDLE]; 2146 int num_look_ahead = 1; 2147 enum htc_endpoint_id id; 2148 int n_fetched = 0; 2149 2150 INIT_LIST_HEAD(&comp_pktq); 2151 *num_pkts = 0; 2152 2153 /* 2154 * On first entry copy the look_aheads into our temp array for 2155 * processing 2156 */ 2157 look_aheads[0] = msg_look_ahead; 2158 2159 while (true) { 2160 2161 /* 2162 * First lookahead sets the expected endpoint IDs for all 2163 * packets in a bundle. 2164 */ 2165 id = ((struct htc_frame_hdr *)&look_aheads[0])->eid; 2166 endpoint = &target->endpoint[id]; 2167 2168 if (id >= ENDPOINT_MAX) { 2169 ath6kl_err("MsgPend, invalid endpoint in look-ahead: %d\n", 2170 id); 2171 status = -ENOMEM; 2172 break; 2173 } 2174 2175 INIT_LIST_HEAD(&rx_pktq); 2176 INIT_LIST_HEAD(&comp_pktq); 2177 2178 /* 2179 * Try to allocate as many HTC RX packets indicated by the 2180 * look_aheads. 2181 */ 2182 status = ath6kl_htc_rx_alloc(target, look_aheads, 2183 num_look_ahead, endpoint, 2184 &rx_pktq); 2185 if (status) 2186 break; 2187 2188 if (get_queue_depth(&rx_pktq) >= 2) 2189 /* 2190 * A recv bundle was detected, force IRQ status 2191 * re-check again 2192 */ 2193 target->chk_irq_status_cnt = 1; 2194 2195 n_fetched += get_queue_depth(&rx_pktq); 2196 2197 num_look_ahead = 0; 2198 2199 status = ath6kl_htc_rx_fetch(target, &rx_pktq, &comp_pktq); 2200 2201 if (!status) 2202 ath6kl_htc_rx_chk_water_mark(endpoint); 2203 2204 /* Process fetched packets */ 2205 status = ath6kl_htc_rx_process_packets(target, &comp_pktq, 2206 look_aheads, 2207 &num_look_ahead); 2208 2209 if (!num_look_ahead || status) 2210 break; 2211 2212 /* 2213 * For SYNCH processing, if we get here, we are running 2214 * through the loop again due to a detected lookahead. Set 2215 * flag that we should re-check IRQ status registers again 2216 * before leaving IRQ processing, this can net better 2217 * performance in high throughput situations. 2218 */ 2219 target->chk_irq_status_cnt = 1; 2220 } 2221 2222 if (status) { 2223 ath6kl_err("failed to get pending recv messages: %d\n", 2224 status); 2225 2226 /* cleanup any packets in sync completion queue */ 2227 list_for_each_entry_safe(packets, tmp_pkt, &comp_pktq, list) { 2228 list_del(&packets->list); 2229 htc_reclaim_rxbuf(target, packets, 2230 &target->endpoint[packets->endpoint]); 2231 } 2232 2233 if (target->htc_flags & HTC_OP_STATE_STOPPING) { 2234 ath6kl_warn("host is going to stop blocking receiver for htc_stop\n"); 2235 ath6kl_hif_rx_control(target->dev, false); 2236 } 2237 } 2238 2239 /* 2240 * Before leaving, check to see if host ran out of buffers and 2241 * needs to stop the receiver. 2242 */ 2243 if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) { 2244 ath6kl_warn("host has no rx buffers blocking receiver to prevent overrun\n"); 2245 ath6kl_hif_rx_control(target->dev, false); 2246 } 2247 *num_pkts = n_fetched; 2248 2249 return status; 2250 } 2251 2252 /* 2253 * Synchronously wait for a control message from the target, 2254 * This function is used at initialization time ONLY. At init messages 2255 * on ENDPOINT 0 are expected. 2256 */ 2257 static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target) 2258 { 2259 struct htc_packet *packet = NULL; 2260 struct htc_frame_hdr *htc_hdr; 2261 u32 look_ahead; 2262 2263 if (ath6kl_hif_poll_mboxmsg_rx(target->dev, &look_ahead, 2264 HTC_TARGET_RESPONSE_TIMEOUT)) 2265 return NULL; 2266 2267 ath6kl_dbg(ATH6KL_DBG_HTC, 2268 "htc rx wait ctrl look_ahead 0x%X\n", look_ahead); 2269 2270 htc_hdr = (struct htc_frame_hdr *)&look_ahead; 2271 2272 if (htc_hdr->eid != ENDPOINT_0) 2273 return NULL; 2274 2275 packet = htc_get_control_buf(target, false); 2276 2277 if (!packet) 2278 return NULL; 2279 2280 packet->info.rx.rx_flags = 0; 2281 packet->info.rx.exp_hdr = look_ahead; 2282 packet->act_len = le16_to_cpu(htc_hdr->payld_len) + HTC_HDR_LENGTH; 2283 2284 if (packet->act_len > packet->buf_len) 2285 goto fail_ctrl_rx; 2286 2287 /* we want synchronous operation */ 2288 packet->completion = NULL; 2289 2290 /* get the message from the device, this will block */ 2291 if (ath6kl_htc_rx_packet(target, packet, packet->act_len)) 2292 goto fail_ctrl_rx; 2293 2294 /* process receive header */ 2295 packet->status = ath6kl_htc_rx_process_hdr(target, packet, NULL, NULL); 2296 2297 if (packet->status) { 2298 ath6kl_err("htc_wait_for_ctrl_msg, ath6kl_htc_rx_process_hdr failed (status = %d)\n", 2299 packet->status); 2300 goto fail_ctrl_rx; 2301 } 2302 2303 return packet; 2304 2305 fail_ctrl_rx: 2306 if (packet != NULL) { 2307 htc_rxpkt_reset(packet); 2308 reclaim_rx_ctrl_buf(target, packet); 2309 } 2310 2311 return NULL; 2312 } 2313 2314 static int ath6kl_htc_mbox_add_rxbuf_multiple(struct htc_target *target, 2315 struct list_head *pkt_queue) 2316 { 2317 struct htc_endpoint *endpoint; 2318 struct htc_packet *first_pkt; 2319 bool rx_unblock = false; 2320 int status = 0, depth; 2321 2322 if (list_empty(pkt_queue)) 2323 return -ENOMEM; 2324 2325 first_pkt = list_first_entry(pkt_queue, struct htc_packet, list); 2326 2327 if (first_pkt->endpoint >= ENDPOINT_MAX) 2328 return status; 2329 2330 depth = get_queue_depth(pkt_queue); 2331 2332 ath6kl_dbg(ATH6KL_DBG_HTC, 2333 "htc rx add multiple ep id %d cnt %d len %d\n", 2334 first_pkt->endpoint, depth, first_pkt->buf_len); 2335 2336 endpoint = &target->endpoint[first_pkt->endpoint]; 2337 2338 if (target->htc_flags & HTC_OP_STATE_STOPPING) { 2339 struct htc_packet *packet, *tmp_pkt; 2340 2341 /* walk through queue and mark each one canceled */ 2342 list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) { 2343 packet->status = -ECANCELED; 2344 list_del(&packet->list); 2345 ath6kl_htc_rx_complete(endpoint, packet); 2346 } 2347 2348 return status; 2349 } 2350 2351 spin_lock_bh(&target->rx_lock); 2352 2353 list_splice_tail_init(pkt_queue, &endpoint->rx_bufq); 2354 2355 /* check if we are blocked waiting for a new buffer */ 2356 if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) { 2357 if (target->ep_waiting == first_pkt->endpoint) { 2358 ath6kl_dbg(ATH6KL_DBG_HTC, 2359 "htc rx blocked on ep %d, unblocking\n", 2360 target->ep_waiting); 2361 target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS; 2362 target->ep_waiting = ENDPOINT_MAX; 2363 rx_unblock = true; 2364 } 2365 } 2366 2367 spin_unlock_bh(&target->rx_lock); 2368 2369 if (rx_unblock && !(target->htc_flags & HTC_OP_STATE_STOPPING)) 2370 /* TODO : implement a buffer threshold count? */ 2371 ath6kl_hif_rx_control(target->dev, true); 2372 2373 return status; 2374 } 2375 2376 static void ath6kl_htc_mbox_flush_rx_buf(struct htc_target *target) 2377 { 2378 struct htc_endpoint *endpoint; 2379 struct htc_packet *packet, *tmp_pkt; 2380 int i; 2381 2382 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) { 2383 endpoint = &target->endpoint[i]; 2384 if (!endpoint->svc_id) 2385 /* not in use.. */ 2386 continue; 2387 2388 spin_lock_bh(&target->rx_lock); 2389 list_for_each_entry_safe(packet, tmp_pkt, 2390 &endpoint->rx_bufq, list) { 2391 list_del(&packet->list); 2392 spin_unlock_bh(&target->rx_lock); 2393 ath6kl_dbg(ATH6KL_DBG_HTC, 2394 "htc rx flush pkt 0x%p len %d ep %d\n", 2395 packet, packet->buf_len, 2396 packet->endpoint); 2397 /* 2398 * packets in rx_bufq of endpoint 0 have originally 2399 * been queued from target->free_ctrl_rxbuf where 2400 * packet and packet->buf_start are allocated 2401 * separately using kmalloc(). For other endpoint 2402 * rx_bufq, it is allocated as skb where packet is 2403 * skb->head. Take care of this difference while freeing 2404 * the memory. 2405 */ 2406 if (packet->endpoint == ENDPOINT_0) { 2407 kfree(packet->buf_start); 2408 kfree(packet); 2409 } else { 2410 dev_kfree_skb(packet->pkt_cntxt); 2411 } 2412 spin_lock_bh(&target->rx_lock); 2413 } 2414 spin_unlock_bh(&target->rx_lock); 2415 } 2416 } 2417 2418 static int ath6kl_htc_mbox_conn_service(struct htc_target *target, 2419 struct htc_service_connect_req *conn_req, 2420 struct htc_service_connect_resp *conn_resp) 2421 { 2422 struct htc_packet *rx_pkt = NULL; 2423 struct htc_packet *tx_pkt = NULL; 2424 struct htc_conn_service_resp *resp_msg; 2425 struct htc_conn_service_msg *conn_msg; 2426 struct htc_endpoint *endpoint; 2427 enum htc_endpoint_id assigned_ep = ENDPOINT_MAX; 2428 unsigned int max_msg_sz = 0; 2429 int status = 0; 2430 u16 msg_id; 2431 2432 ath6kl_dbg(ATH6KL_DBG_HTC, 2433 "htc connect service target 0x%p service id 0x%x\n", 2434 target, conn_req->svc_id); 2435 2436 if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) { 2437 /* special case for pseudo control service */ 2438 assigned_ep = ENDPOINT_0; 2439 max_msg_sz = HTC_MAX_CTRL_MSG_LEN; 2440 } else { 2441 /* allocate a packet to send to the target */ 2442 tx_pkt = htc_get_control_buf(target, true); 2443 2444 if (!tx_pkt) 2445 return -ENOMEM; 2446 2447 conn_msg = (struct htc_conn_service_msg *)tx_pkt->buf; 2448 memset(conn_msg, 0, sizeof(*conn_msg)); 2449 conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID); 2450 conn_msg->svc_id = cpu_to_le16(conn_req->svc_id); 2451 conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags); 2452 2453 set_htc_pkt_info(tx_pkt, NULL, (u8 *) conn_msg, 2454 sizeof(*conn_msg) + conn_msg->svc_meta_len, 2455 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG); 2456 2457 /* we want synchronous operation */ 2458 tx_pkt->completion = NULL; 2459 ath6kl_htc_tx_prep_pkt(tx_pkt, 0, 0, 0); 2460 status = ath6kl_htc_tx_issue(target, tx_pkt); 2461 2462 if (status) 2463 goto fail_tx; 2464 2465 /* wait for response */ 2466 rx_pkt = htc_wait_for_ctrl_msg(target); 2467 2468 if (!rx_pkt) { 2469 status = -ENOMEM; 2470 goto fail_tx; 2471 } 2472 2473 resp_msg = (struct htc_conn_service_resp *)rx_pkt->buf; 2474 msg_id = le16_to_cpu(resp_msg->msg_id); 2475 2476 if ((msg_id != HTC_MSG_CONN_SVC_RESP_ID) || 2477 (rx_pkt->act_len < sizeof(*resp_msg))) { 2478 status = -ENOMEM; 2479 goto fail_tx; 2480 } 2481 2482 conn_resp->resp_code = resp_msg->status; 2483 /* check response status */ 2484 if (resp_msg->status != HTC_SERVICE_SUCCESS) { 2485 ath6kl_err("target failed service 0x%X connect request (status:%d)\n", 2486 resp_msg->svc_id, resp_msg->status); 2487 status = -ENOMEM; 2488 goto fail_tx; 2489 } 2490 2491 assigned_ep = (enum htc_endpoint_id)resp_msg->eid; 2492 max_msg_sz = le16_to_cpu(resp_msg->max_msg_sz); 2493 } 2494 2495 if (assigned_ep >= ENDPOINT_MAX || !max_msg_sz) { 2496 status = -ENOMEM; 2497 goto fail_tx; 2498 } 2499 2500 endpoint = &target->endpoint[assigned_ep]; 2501 endpoint->eid = assigned_ep; 2502 if (endpoint->svc_id) { 2503 status = -ENOMEM; 2504 goto fail_tx; 2505 } 2506 2507 /* return assigned endpoint to caller */ 2508 conn_resp->endpoint = assigned_ep; 2509 conn_resp->len_max = max_msg_sz; 2510 2511 /* setup the endpoint */ 2512 2513 /* this marks the endpoint in use */ 2514 endpoint->svc_id = conn_req->svc_id; 2515 2516 endpoint->max_txq_depth = conn_req->max_txq_depth; 2517 endpoint->len_max = max_msg_sz; 2518 endpoint->ep_cb = conn_req->ep_cb; 2519 endpoint->cred_dist.svc_id = conn_req->svc_id; 2520 endpoint->cred_dist.htc_ep = endpoint; 2521 endpoint->cred_dist.endpoint = assigned_ep; 2522 endpoint->cred_dist.cred_sz = target->tgt_cred_sz; 2523 2524 switch (endpoint->svc_id) { 2525 case WMI_DATA_BK_SVC: 2526 endpoint->tx_drop_packet_threshold = MAX_DEF_COOKIE_NUM / 3; 2527 break; 2528 default: 2529 endpoint->tx_drop_packet_threshold = MAX_HI_COOKIE_NUM; 2530 break; 2531 } 2532 2533 if (conn_req->max_rxmsg_sz) { 2534 /* 2535 * Override cred_per_msg calculation, this optimizes 2536 * the credit-low indications since the host will actually 2537 * issue smaller messages in the Send path. 2538 */ 2539 if (conn_req->max_rxmsg_sz > max_msg_sz) { 2540 status = -ENOMEM; 2541 goto fail_tx; 2542 } 2543 endpoint->cred_dist.cred_per_msg = 2544 conn_req->max_rxmsg_sz / target->tgt_cred_sz; 2545 } else 2546 endpoint->cred_dist.cred_per_msg = 2547 max_msg_sz / target->tgt_cred_sz; 2548 2549 if (!endpoint->cred_dist.cred_per_msg) 2550 endpoint->cred_dist.cred_per_msg = 1; 2551 2552 /* save local connection flags */ 2553 endpoint->conn_flags = conn_req->flags; 2554 2555 fail_tx: 2556 if (tx_pkt) 2557 htc_reclaim_txctrl_buf(target, tx_pkt); 2558 2559 if (rx_pkt) { 2560 htc_rxpkt_reset(rx_pkt); 2561 reclaim_rx_ctrl_buf(target, rx_pkt); 2562 } 2563 2564 return status; 2565 } 2566 2567 static void reset_ep_state(struct htc_target *target) 2568 { 2569 struct htc_endpoint *endpoint; 2570 int i; 2571 2572 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) { 2573 endpoint = &target->endpoint[i]; 2574 memset(&endpoint->cred_dist, 0, sizeof(endpoint->cred_dist)); 2575 endpoint->svc_id = 0; 2576 endpoint->len_max = 0; 2577 endpoint->max_txq_depth = 0; 2578 memset(&endpoint->ep_st, 0, 2579 sizeof(endpoint->ep_st)); 2580 INIT_LIST_HEAD(&endpoint->rx_bufq); 2581 INIT_LIST_HEAD(&endpoint->txq); 2582 endpoint->target = target; 2583 } 2584 2585 /* reset distribution list */ 2586 /* FIXME: free existing entries */ 2587 INIT_LIST_HEAD(&target->cred_dist_list); 2588 } 2589 2590 static int ath6kl_htc_mbox_get_rxbuf_num(struct htc_target *target, 2591 enum htc_endpoint_id endpoint) 2592 { 2593 int num; 2594 2595 spin_lock_bh(&target->rx_lock); 2596 num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq)); 2597 spin_unlock_bh(&target->rx_lock); 2598 return num; 2599 } 2600 2601 static void htc_setup_msg_bndl(struct htc_target *target) 2602 { 2603 /* limit what HTC can handle */ 2604 target->msg_per_bndl_max = min(HTC_HOST_MAX_MSG_PER_BUNDLE, 2605 target->msg_per_bndl_max); 2606 2607 if (ath6kl_hif_enable_scatter(target->dev->ar)) { 2608 target->msg_per_bndl_max = 0; 2609 return; 2610 } 2611 2612 /* limit bundle what the device layer can handle */ 2613 target->msg_per_bndl_max = min(target->max_scat_entries, 2614 target->msg_per_bndl_max); 2615 2616 ath6kl_dbg(ATH6KL_DBG_BOOT, 2617 "htc bundling allowed msg_per_bndl_max %d\n", 2618 target->msg_per_bndl_max); 2619 2620 /* Max rx bundle size is limited by the max tx bundle size */ 2621 target->max_rx_bndl_sz = target->max_xfer_szper_scatreq; 2622 /* Max tx bundle size if limited by the extended mbox address range */ 2623 target->max_tx_bndl_sz = min(HIF_MBOX0_EXT_WIDTH, 2624 target->max_xfer_szper_scatreq); 2625 2626 ath6kl_dbg(ATH6KL_DBG_BOOT, "htc max_rx_bndl_sz %d max_tx_bndl_sz %d\n", 2627 target->max_rx_bndl_sz, target->max_tx_bndl_sz); 2628 2629 if (target->max_tx_bndl_sz) 2630 /* tx_bndl_mask is enabled per AC, each has 1 bit */ 2631 target->tx_bndl_mask = (1 << WMM_NUM_AC) - 1; 2632 2633 if (target->max_rx_bndl_sz) 2634 target->rx_bndl_enable = true; 2635 2636 if ((target->tgt_cred_sz % target->block_sz) != 0) { 2637 ath6kl_warn("credit size: %d is not block aligned! Disabling send bundling\n", 2638 target->tgt_cred_sz); 2639 2640 /* 2641 * Disallow send bundling since the credit size is 2642 * not aligned to a block size the I/O block 2643 * padding will spill into the next credit buffer 2644 * which is fatal. 2645 */ 2646 target->tx_bndl_mask = 0; 2647 } 2648 } 2649 2650 static int ath6kl_htc_mbox_wait_target(struct htc_target *target) 2651 { 2652 struct htc_packet *packet = NULL; 2653 struct htc_ready_ext_msg *rdy_msg; 2654 struct htc_service_connect_req connect; 2655 struct htc_service_connect_resp resp; 2656 int status; 2657 2658 /* FIXME: remove once USB support is implemented */ 2659 if (target->dev->ar->hif_type == ATH6KL_HIF_TYPE_USB) { 2660 ath6kl_err("HTC doesn't support USB yet. Patience!\n"); 2661 return -EOPNOTSUPP; 2662 } 2663 2664 /* we should be getting 1 control message that the target is ready */ 2665 packet = htc_wait_for_ctrl_msg(target); 2666 2667 if (!packet) 2668 return -ENOMEM; 2669 2670 /* we controlled the buffer creation so it's properly aligned */ 2671 rdy_msg = (struct htc_ready_ext_msg *)packet->buf; 2672 2673 if ((le16_to_cpu(rdy_msg->ver2_0_info.msg_id) != HTC_MSG_READY_ID) || 2674 (packet->act_len < sizeof(struct htc_ready_msg))) { 2675 status = -ENOMEM; 2676 goto fail_wait_target; 2677 } 2678 2679 if (!rdy_msg->ver2_0_info.cred_cnt || !rdy_msg->ver2_0_info.cred_sz) { 2680 status = -ENOMEM; 2681 goto fail_wait_target; 2682 } 2683 2684 target->tgt_creds = le16_to_cpu(rdy_msg->ver2_0_info.cred_cnt); 2685 target->tgt_cred_sz = le16_to_cpu(rdy_msg->ver2_0_info.cred_sz); 2686 2687 ath6kl_dbg(ATH6KL_DBG_BOOT, 2688 "htc target ready credits %d size %d\n", 2689 target->tgt_creds, target->tgt_cred_sz); 2690 2691 /* check if this is an extended ready message */ 2692 if (packet->act_len >= sizeof(struct htc_ready_ext_msg)) { 2693 /* this is an extended message */ 2694 target->htc_tgt_ver = rdy_msg->htc_ver; 2695 target->msg_per_bndl_max = rdy_msg->msg_per_htc_bndl; 2696 } else { 2697 /* legacy */ 2698 target->htc_tgt_ver = HTC_VERSION_2P0; 2699 target->msg_per_bndl_max = 0; 2700 } 2701 2702 ath6kl_dbg(ATH6KL_DBG_BOOT, "htc using protocol %s (%d)\n", 2703 (target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1", 2704 target->htc_tgt_ver); 2705 2706 if (target->msg_per_bndl_max > 0) 2707 htc_setup_msg_bndl(target); 2708 2709 /* setup our pseudo HTC control endpoint connection */ 2710 memset(&connect, 0, sizeof(connect)); 2711 memset(&resp, 0, sizeof(resp)); 2712 connect.ep_cb.rx = htc_ctrl_rx; 2713 connect.ep_cb.rx_refill = NULL; 2714 connect.ep_cb.tx_full = NULL; 2715 connect.max_txq_depth = NUM_CONTROL_BUFFERS; 2716 connect.svc_id = HTC_CTRL_RSVD_SVC; 2717 2718 /* connect fake service */ 2719 status = ath6kl_htc_mbox_conn_service((void *)target, &connect, &resp); 2720 2721 if (status) 2722 /* 2723 * FIXME: this call doesn't make sense, the caller should 2724 * call ath6kl_htc_mbox_cleanup() when it wants remove htc 2725 */ 2726 ath6kl_hif_cleanup_scatter(target->dev->ar); 2727 2728 fail_wait_target: 2729 if (packet) { 2730 htc_rxpkt_reset(packet); 2731 reclaim_rx_ctrl_buf(target, packet); 2732 } 2733 2734 return status; 2735 } 2736 2737 /* 2738 * Start HTC, enable interrupts and let the target know 2739 * host has finished setup. 2740 */ 2741 static int ath6kl_htc_mbox_start(struct htc_target *target) 2742 { 2743 struct htc_packet *packet; 2744 int status; 2745 2746 memset(&target->dev->irq_proc_reg, 0, 2747 sizeof(target->dev->irq_proc_reg)); 2748 2749 /* Disable interrupts at the chip level */ 2750 ath6kl_hif_disable_intrs(target->dev); 2751 2752 target->htc_flags = 0; 2753 target->rx_st_flags = 0; 2754 2755 /* Push control receive buffers into htc control endpoint */ 2756 while ((packet = htc_get_control_buf(target, false)) != NULL) { 2757 status = htc_add_rxbuf(target, packet); 2758 if (status) 2759 return status; 2760 } 2761 2762 /* NOTE: the first entry in the distribution list is ENDPOINT_0 */ 2763 ath6kl_credit_init(target->credit_info, &target->cred_dist_list, 2764 target->tgt_creds); 2765 2766 dump_cred_dist_stats(target); 2767 2768 /* Indicate to the target of the setup completion */ 2769 status = htc_setup_tx_complete(target); 2770 2771 if (status) 2772 return status; 2773 2774 /* unmask interrupts */ 2775 status = ath6kl_hif_unmask_intrs(target->dev); 2776 2777 if (status) 2778 ath6kl_htc_mbox_stop(target); 2779 2780 return status; 2781 } 2782 2783 static int ath6kl_htc_reset(struct htc_target *target) 2784 { 2785 u32 block_size, ctrl_bufsz; 2786 struct htc_packet *packet; 2787 int i; 2788 2789 reset_ep_state(target); 2790 2791 block_size = target->dev->ar->mbox_info.block_size; 2792 2793 ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ? 2794 (block_size + HTC_HDR_LENGTH) : 2795 (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH); 2796 2797 for (i = 0; i < NUM_CONTROL_BUFFERS; i++) { 2798 packet = kzalloc(sizeof(*packet), GFP_KERNEL); 2799 if (!packet) 2800 return -ENOMEM; 2801 2802 packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL); 2803 if (!packet->buf_start) { 2804 kfree(packet); 2805 return -ENOMEM; 2806 } 2807 2808 packet->buf_len = ctrl_bufsz; 2809 if (i < NUM_CONTROL_RX_BUFFERS) { 2810 packet->act_len = 0; 2811 packet->buf = packet->buf_start; 2812 packet->endpoint = ENDPOINT_0; 2813 list_add_tail(&packet->list, &target->free_ctrl_rxbuf); 2814 } else 2815 list_add_tail(&packet->list, &target->free_ctrl_txbuf); 2816 } 2817 2818 return 0; 2819 } 2820 2821 /* htc_stop: stop interrupt reception, and flush all queued buffers */ 2822 static void ath6kl_htc_mbox_stop(struct htc_target *target) 2823 { 2824 spin_lock_bh(&target->htc_lock); 2825 target->htc_flags |= HTC_OP_STATE_STOPPING; 2826 spin_unlock_bh(&target->htc_lock); 2827 2828 /* 2829 * Masking interrupts is a synchronous operation, when this 2830 * function returns all pending HIF I/O has completed, we can 2831 * safely flush the queues. 2832 */ 2833 ath6kl_hif_mask_intrs(target->dev); 2834 2835 ath6kl_htc_flush_txep_all(target); 2836 2837 ath6kl_htc_mbox_flush_rx_buf(target); 2838 2839 ath6kl_htc_reset(target); 2840 } 2841 2842 static void *ath6kl_htc_mbox_create(struct ath6kl *ar) 2843 { 2844 struct htc_target *target = NULL; 2845 int status = 0; 2846 2847 target = kzalloc(sizeof(*target), GFP_KERNEL); 2848 if (!target) { 2849 ath6kl_err("unable to allocate memory\n"); 2850 return NULL; 2851 } 2852 2853 target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL); 2854 if (!target->dev) { 2855 ath6kl_err("unable to allocate memory\n"); 2856 status = -ENOMEM; 2857 goto err_htc_cleanup; 2858 } 2859 2860 spin_lock_init(&target->htc_lock); 2861 spin_lock_init(&target->rx_lock); 2862 spin_lock_init(&target->tx_lock); 2863 2864 INIT_LIST_HEAD(&target->free_ctrl_txbuf); 2865 INIT_LIST_HEAD(&target->free_ctrl_rxbuf); 2866 INIT_LIST_HEAD(&target->cred_dist_list); 2867 2868 target->dev->ar = ar; 2869 target->dev->htc_cnxt = target; 2870 target->ep_waiting = ENDPOINT_MAX; 2871 2872 status = ath6kl_hif_setup(target->dev); 2873 if (status) 2874 goto err_htc_cleanup; 2875 2876 status = ath6kl_htc_reset(target); 2877 if (status) 2878 goto err_htc_cleanup; 2879 2880 return target; 2881 2882 err_htc_cleanup: 2883 ath6kl_htc_mbox_cleanup(target); 2884 2885 return NULL; 2886 } 2887 2888 /* cleanup the HTC instance */ 2889 static void ath6kl_htc_mbox_cleanup(struct htc_target *target) 2890 { 2891 struct htc_packet *packet, *tmp_packet; 2892 2893 /* FIXME: remove check once USB support is implemented */ 2894 if (target->dev->ar->hif_type != ATH6KL_HIF_TYPE_USB) 2895 ath6kl_hif_cleanup_scatter(target->dev->ar); 2896 2897 list_for_each_entry_safe(packet, tmp_packet, 2898 &target->free_ctrl_txbuf, list) { 2899 list_del(&packet->list); 2900 kfree(packet->buf_start); 2901 kfree(packet); 2902 } 2903 2904 list_for_each_entry_safe(packet, tmp_packet, 2905 &target->free_ctrl_rxbuf, list) { 2906 list_del(&packet->list); 2907 kfree(packet->buf_start); 2908 kfree(packet); 2909 } 2910 2911 kfree(target->dev); 2912 kfree(target); 2913 } 2914 2915 static const struct ath6kl_htc_ops ath6kl_htc_mbox_ops = { 2916 .create = ath6kl_htc_mbox_create, 2917 .wait_target = ath6kl_htc_mbox_wait_target, 2918 .start = ath6kl_htc_mbox_start, 2919 .conn_service = ath6kl_htc_mbox_conn_service, 2920 .tx = ath6kl_htc_mbox_tx, 2921 .stop = ath6kl_htc_mbox_stop, 2922 .cleanup = ath6kl_htc_mbox_cleanup, 2923 .flush_txep = ath6kl_htc_mbox_flush_txep, 2924 .flush_rx_buf = ath6kl_htc_mbox_flush_rx_buf, 2925 .activity_changed = ath6kl_htc_mbox_activity_changed, 2926 .get_rxbuf_num = ath6kl_htc_mbox_get_rxbuf_num, 2927 .add_rxbuf_multiple = ath6kl_htc_mbox_add_rxbuf_multiple, 2928 .credit_setup = ath6kl_htc_mbox_credit_setup, 2929 }; 2930 2931 void ath6kl_htc_mbox_attach(struct ath6kl *ar) 2932 { 2933 ar->htc_ops = &ath6kl_htc_mbox_ops; 2934 } 2935