1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include "core.h" 19 #include "hif.h" 20 #include "debug.h" 21 22 /********/ 23 /* Send */ 24 /********/ 25 26 static void ath10k_htc_control_tx_complete(struct ath10k *ar, 27 struct sk_buff *skb) 28 { 29 kfree_skb(skb); 30 } 31 32 static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar) 33 { 34 struct sk_buff *skb; 35 struct ath10k_skb_cb *skb_cb; 36 37 skb = dev_alloc_skb(ATH10K_HTC_CONTROL_BUFFER_SIZE); 38 if (!skb) 39 return NULL; 40 41 skb_reserve(skb, 20); /* FIXME: why 20 bytes? */ 42 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb"); 43 44 skb_cb = ATH10K_SKB_CB(skb); 45 memset(skb_cb, 0, sizeof(*skb_cb)); 46 47 ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %pK\n", __func__, skb); 48 return skb; 49 } 50 51 static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc, 52 struct sk_buff *skb) 53 { 54 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); 55 56 if (htc->ar->dev_type != ATH10K_DEV_TYPE_HL) 57 dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE); 58 skb_pull(skb, sizeof(struct ath10k_htc_hdr)); 59 } 60 61 void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep, 62 struct sk_buff *skb) 63 { 64 struct ath10k *ar = ep->htc->ar; 65 66 ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %pK\n", __func__, 67 ep->eid, skb); 68 69 ath10k_htc_restore_tx_skb(ep->htc, skb); 70 71 if (!ep->ep_ops.ep_tx_complete) { 72 ath10k_warn(ar, "no tx handler for eid %d\n", ep->eid); 73 dev_kfree_skb_any(skb); 74 return; 75 } 76 77 ep->ep_ops.ep_tx_complete(ep->htc->ar, skb); 78 } 79 EXPORT_SYMBOL(ath10k_htc_notify_tx_completion); 80 81 static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep, 82 struct sk_buff *skb) 83 { 84 struct ath10k_htc_hdr *hdr; 85 86 hdr = (struct ath10k_htc_hdr *)skb->data; 87 88 hdr->eid = ep->eid; 89 hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr)); 90 hdr->flags = 0; 91 hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE; 92 93 spin_lock_bh(&ep->htc->tx_lock); 94 hdr->seq_no = ep->seq_no++; 95 spin_unlock_bh(&ep->htc->tx_lock); 96 } 97 98 int ath10k_htc_send(struct ath10k_htc *htc, 99 enum ath10k_htc_ep_id eid, 100 struct sk_buff *skb) 101 { 102 struct ath10k *ar = htc->ar; 103 struct ath10k_htc_ep *ep = &htc->endpoint[eid]; 104 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); 105 struct ath10k_hif_sg_item sg_item; 106 struct device *dev = htc->ar->dev; 107 int credits = 0; 108 int ret; 109 110 if (htc->ar->state == ATH10K_STATE_WEDGED) 111 return -ECOMM; 112 113 if (eid >= ATH10K_HTC_EP_COUNT) { 114 ath10k_warn(ar, "Invalid endpoint id: %d\n", eid); 115 return -ENOENT; 116 } 117 118 skb_push(skb, sizeof(struct ath10k_htc_hdr)); 119 120 if (ep->tx_credit_flow_enabled) { 121 credits = DIV_ROUND_UP(skb->len, htc->target_credit_size); 122 spin_lock_bh(&htc->tx_lock); 123 if (ep->tx_credits < credits) { 124 ath10k_dbg(ar, ATH10K_DBG_HTC, 125 "htc insufficient credits ep %d required %d available %d\n", 126 eid, credits, ep->tx_credits); 127 spin_unlock_bh(&htc->tx_lock); 128 ret = -EAGAIN; 129 goto err_pull; 130 } 131 ep->tx_credits -= credits; 132 ath10k_dbg(ar, ATH10K_DBG_HTC, 133 "htc ep %d consumed %d credits (total %d)\n", 134 eid, credits, ep->tx_credits); 135 spin_unlock_bh(&htc->tx_lock); 136 } 137 138 ath10k_htc_prepare_tx_skb(ep, skb); 139 140 skb_cb->eid = eid; 141 if (ar->dev_type != ATH10K_DEV_TYPE_HL) { 142 skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, 143 DMA_TO_DEVICE); 144 ret = dma_mapping_error(dev, skb_cb->paddr); 145 if (ret) { 146 ret = -EIO; 147 goto err_credits; 148 } 149 } 150 151 sg_item.transfer_id = ep->eid; 152 sg_item.transfer_context = skb; 153 sg_item.vaddr = skb->data; 154 sg_item.paddr = skb_cb->paddr; 155 sg_item.len = skb->len; 156 157 ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1); 158 if (ret) 159 goto err_unmap; 160 161 return 0; 162 163 err_unmap: 164 if (ar->dev_type != ATH10K_DEV_TYPE_HL) 165 dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE); 166 err_credits: 167 if (ep->tx_credit_flow_enabled) { 168 spin_lock_bh(&htc->tx_lock); 169 ep->tx_credits += credits; 170 ath10k_dbg(ar, ATH10K_DBG_HTC, 171 "htc ep %d reverted %d credits back (total %d)\n", 172 eid, credits, ep->tx_credits); 173 spin_unlock_bh(&htc->tx_lock); 174 175 if (ep->ep_ops.ep_tx_credits) 176 ep->ep_ops.ep_tx_credits(htc->ar); 177 } 178 err_pull: 179 skb_pull(skb, sizeof(struct ath10k_htc_hdr)); 180 return ret; 181 } 182 183 void ath10k_htc_tx_completion_handler(struct ath10k *ar, struct sk_buff *skb) 184 { 185 struct ath10k_htc *htc = &ar->htc; 186 struct ath10k_skb_cb *skb_cb; 187 struct ath10k_htc_ep *ep; 188 189 if (WARN_ON_ONCE(!skb)) 190 return; 191 192 skb_cb = ATH10K_SKB_CB(skb); 193 ep = &htc->endpoint[skb_cb->eid]; 194 195 ath10k_htc_notify_tx_completion(ep, skb); 196 /* the skb now belongs to the completion handler */ 197 } 198 EXPORT_SYMBOL(ath10k_htc_tx_completion_handler); 199 200 /***********/ 201 /* Receive */ 202 /***********/ 203 204 static void 205 ath10k_htc_process_credit_report(struct ath10k_htc *htc, 206 const struct ath10k_htc_credit_report *report, 207 int len, 208 enum ath10k_htc_ep_id eid) 209 { 210 struct ath10k *ar = htc->ar; 211 struct ath10k_htc_ep *ep; 212 int i, n_reports; 213 214 if (len % sizeof(*report)) 215 ath10k_warn(ar, "Uneven credit report len %d", len); 216 217 n_reports = len / sizeof(*report); 218 219 spin_lock_bh(&htc->tx_lock); 220 for (i = 0; i < n_reports; i++, report++) { 221 if (report->eid >= ATH10K_HTC_EP_COUNT) 222 break; 223 224 ep = &htc->endpoint[report->eid]; 225 ep->tx_credits += report->credits; 226 227 ath10k_dbg(ar, ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n", 228 report->eid, report->credits, ep->tx_credits); 229 230 if (ep->ep_ops.ep_tx_credits) { 231 spin_unlock_bh(&htc->tx_lock); 232 ep->ep_ops.ep_tx_credits(htc->ar); 233 spin_lock_bh(&htc->tx_lock); 234 } 235 } 236 spin_unlock_bh(&htc->tx_lock); 237 } 238 239 static int 240 ath10k_htc_process_lookahead(struct ath10k_htc *htc, 241 const struct ath10k_htc_lookahead_report *report, 242 int len, 243 enum ath10k_htc_ep_id eid, 244 void *next_lookaheads, 245 int *next_lookaheads_len) 246 { 247 struct ath10k *ar = htc->ar; 248 249 /* Invalid lookahead flags are actually transmitted by 250 * the target in the HTC control message. 251 * Since this will happen at every boot we silently ignore 252 * the lookahead in this case 253 */ 254 if (report->pre_valid != ((~report->post_valid) & 0xFF)) 255 return 0; 256 257 if (next_lookaheads && next_lookaheads_len) { 258 ath10k_dbg(ar, ATH10K_DBG_HTC, 259 "htc rx lookahead found pre_valid 0x%x post_valid 0x%x\n", 260 report->pre_valid, report->post_valid); 261 262 /* look ahead bytes are valid, copy them over */ 263 memcpy((u8 *)next_lookaheads, report->lookahead, 4); 264 265 *next_lookaheads_len = 1; 266 } 267 268 return 0; 269 } 270 271 static int 272 ath10k_htc_process_lookahead_bundle(struct ath10k_htc *htc, 273 const struct ath10k_htc_lookahead_bundle *report, 274 int len, 275 enum ath10k_htc_ep_id eid, 276 void *next_lookaheads, 277 int *next_lookaheads_len) 278 { 279 struct ath10k *ar = htc->ar; 280 int bundle_cnt = len / sizeof(*report); 281 282 if (!bundle_cnt || (bundle_cnt > HTC_HOST_MAX_MSG_PER_RX_BUNDLE)) { 283 ath10k_warn(ar, "Invalid lookahead bundle count: %d\n", 284 bundle_cnt); 285 return -EINVAL; 286 } 287 288 if (next_lookaheads && next_lookaheads_len) { 289 int i; 290 291 for (i = 0; i < bundle_cnt; i++) { 292 memcpy(((u8 *)next_lookaheads) + 4 * i, 293 report->lookahead, 4); 294 report++; 295 } 296 297 *next_lookaheads_len = bundle_cnt; 298 } 299 300 return 0; 301 } 302 303 int ath10k_htc_process_trailer(struct ath10k_htc *htc, 304 u8 *buffer, 305 int length, 306 enum ath10k_htc_ep_id src_eid, 307 void *next_lookaheads, 308 int *next_lookaheads_len) 309 { 310 struct ath10k_htc_lookahead_bundle *bundle; 311 struct ath10k *ar = htc->ar; 312 int status = 0; 313 struct ath10k_htc_record *record; 314 u8 *orig_buffer; 315 int orig_length; 316 size_t len; 317 318 orig_buffer = buffer; 319 orig_length = length; 320 321 while (length > 0) { 322 record = (struct ath10k_htc_record *)buffer; 323 324 if (length < sizeof(record->hdr)) { 325 status = -EINVAL; 326 break; 327 } 328 329 if (record->hdr.len > length) { 330 /* no room left in buffer for record */ 331 ath10k_warn(ar, "Invalid record length: %d\n", 332 record->hdr.len); 333 status = -EINVAL; 334 break; 335 } 336 337 switch (record->hdr.id) { 338 case ATH10K_HTC_RECORD_CREDITS: 339 len = sizeof(struct ath10k_htc_credit_report); 340 if (record->hdr.len < len) { 341 ath10k_warn(ar, "Credit report too long\n"); 342 status = -EINVAL; 343 break; 344 } 345 ath10k_htc_process_credit_report(htc, 346 record->credit_report, 347 record->hdr.len, 348 src_eid); 349 break; 350 case ATH10K_HTC_RECORD_LOOKAHEAD: 351 len = sizeof(struct ath10k_htc_lookahead_report); 352 if (record->hdr.len < len) { 353 ath10k_warn(ar, "Lookahead report too long\n"); 354 status = -EINVAL; 355 break; 356 } 357 status = ath10k_htc_process_lookahead(htc, 358 record->lookahead_report, 359 record->hdr.len, 360 src_eid, 361 next_lookaheads, 362 next_lookaheads_len); 363 break; 364 case ATH10K_HTC_RECORD_LOOKAHEAD_BUNDLE: 365 bundle = record->lookahead_bundle; 366 status = ath10k_htc_process_lookahead_bundle(htc, 367 bundle, 368 record->hdr.len, 369 src_eid, 370 next_lookaheads, 371 next_lookaheads_len); 372 break; 373 default: 374 ath10k_warn(ar, "Unhandled record: id:%d length:%d\n", 375 record->hdr.id, record->hdr.len); 376 break; 377 } 378 379 if (status) 380 break; 381 382 /* multiple records may be present in a trailer */ 383 buffer += sizeof(record->hdr) + record->hdr.len; 384 length -= sizeof(record->hdr) + record->hdr.len; 385 } 386 387 if (status) 388 ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc rx bad trailer", "", 389 orig_buffer, orig_length); 390 391 return status; 392 } 393 EXPORT_SYMBOL(ath10k_htc_process_trailer); 394 395 void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb) 396 { 397 int status = 0; 398 struct ath10k_htc *htc = &ar->htc; 399 struct ath10k_htc_hdr *hdr; 400 struct ath10k_htc_ep *ep; 401 u16 payload_len; 402 u32 trailer_len = 0; 403 size_t min_len; 404 u8 eid; 405 bool trailer_present; 406 407 hdr = (struct ath10k_htc_hdr *)skb->data; 408 skb_pull(skb, sizeof(*hdr)); 409 410 eid = hdr->eid; 411 412 if (eid >= ATH10K_HTC_EP_COUNT) { 413 ath10k_warn(ar, "HTC Rx: invalid eid %d\n", eid); 414 ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad header", "", 415 hdr, sizeof(*hdr)); 416 goto out; 417 } 418 419 ep = &htc->endpoint[eid]; 420 421 payload_len = __le16_to_cpu(hdr->len); 422 423 if (payload_len + sizeof(*hdr) > ATH10K_HTC_MAX_LEN) { 424 ath10k_warn(ar, "HTC rx frame too long, len: %zu\n", 425 payload_len + sizeof(*hdr)); 426 ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len", "", 427 hdr, sizeof(*hdr)); 428 goto out; 429 } 430 431 if (skb->len < payload_len) { 432 ath10k_dbg(ar, ATH10K_DBG_HTC, 433 "HTC Rx: insufficient length, got %d, expected %d\n", 434 skb->len, payload_len); 435 ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len", 436 "", hdr, sizeof(*hdr)); 437 goto out; 438 } 439 440 /* get flags to check for trailer */ 441 trailer_present = hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT; 442 if (trailer_present) { 443 u8 *trailer; 444 445 trailer_len = hdr->trailer_len; 446 min_len = sizeof(struct ath10k_ath10k_htc_record_hdr); 447 448 if ((trailer_len < min_len) || 449 (trailer_len > payload_len)) { 450 ath10k_warn(ar, "Invalid trailer length: %d\n", 451 trailer_len); 452 goto out; 453 } 454 455 trailer = (u8 *)hdr; 456 trailer += sizeof(*hdr); 457 trailer += payload_len; 458 trailer -= trailer_len; 459 status = ath10k_htc_process_trailer(htc, trailer, 460 trailer_len, hdr->eid, 461 NULL, NULL); 462 if (status) 463 goto out; 464 465 skb_trim(skb, skb->len - trailer_len); 466 } 467 468 if (((int)payload_len - (int)trailer_len) <= 0) 469 /* zero length packet with trailer data, just drop these */ 470 goto out; 471 472 ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %pK\n", 473 eid, skb); 474 ep->ep_ops.ep_rx_complete(ar, skb); 475 476 /* skb is now owned by the rx completion handler */ 477 skb = NULL; 478 out: 479 kfree_skb(skb); 480 } 481 EXPORT_SYMBOL(ath10k_htc_rx_completion_handler); 482 483 static void ath10k_htc_control_rx_complete(struct ath10k *ar, 484 struct sk_buff *skb) 485 { 486 struct ath10k_htc *htc = &ar->htc; 487 struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data; 488 489 switch (__le16_to_cpu(msg->hdr.message_id)) { 490 case ATH10K_HTC_MSG_READY_ID: 491 case ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID: 492 /* handle HTC control message */ 493 if (completion_done(&htc->ctl_resp)) { 494 /* this is a fatal error, target should not be 495 * sending unsolicited messages on the ep 0 496 */ 497 ath10k_warn(ar, "HTC rx ctrl still processing\n"); 498 complete(&htc->ctl_resp); 499 goto out; 500 } 501 502 htc->control_resp_len = 503 min_t(int, skb->len, 504 ATH10K_HTC_MAX_CTRL_MSG_LEN); 505 506 memcpy(htc->control_resp_buffer, skb->data, 507 htc->control_resp_len); 508 509 complete(&htc->ctl_resp); 510 break; 511 case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE: 512 htc->htc_ops.target_send_suspend_complete(ar); 513 break; 514 default: 515 ath10k_warn(ar, "ignoring unsolicited htc ep0 event\n"); 516 break; 517 } 518 519 out: 520 kfree_skb(skb); 521 } 522 523 /***************/ 524 /* Init/Deinit */ 525 /***************/ 526 527 static const char *htc_service_name(enum ath10k_htc_svc_id id) 528 { 529 switch (id) { 530 case ATH10K_HTC_SVC_ID_RESERVED: 531 return "Reserved"; 532 case ATH10K_HTC_SVC_ID_RSVD_CTRL: 533 return "Control"; 534 case ATH10K_HTC_SVC_ID_WMI_CONTROL: 535 return "WMI"; 536 case ATH10K_HTC_SVC_ID_WMI_DATA_BE: 537 return "DATA BE"; 538 case ATH10K_HTC_SVC_ID_WMI_DATA_BK: 539 return "DATA BK"; 540 case ATH10K_HTC_SVC_ID_WMI_DATA_VI: 541 return "DATA VI"; 542 case ATH10K_HTC_SVC_ID_WMI_DATA_VO: 543 return "DATA VO"; 544 case ATH10K_HTC_SVC_ID_NMI_CONTROL: 545 return "NMI Control"; 546 case ATH10K_HTC_SVC_ID_NMI_DATA: 547 return "NMI Data"; 548 case ATH10K_HTC_SVC_ID_HTT_DATA_MSG: 549 return "HTT Data"; 550 case ATH10K_HTC_SVC_ID_HTT_DATA2_MSG: 551 return "HTT Data"; 552 case ATH10K_HTC_SVC_ID_HTT_DATA3_MSG: 553 return "HTT Data"; 554 case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS: 555 return "RAW"; 556 case ATH10K_HTC_SVC_ID_HTT_LOG_MSG: 557 return "PKTLOG"; 558 } 559 560 return "Unknown"; 561 } 562 563 static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc) 564 { 565 struct ath10k_htc_ep *ep; 566 int i; 567 568 for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) { 569 ep = &htc->endpoint[i]; 570 ep->service_id = ATH10K_HTC_SVC_ID_UNUSED; 571 ep->max_ep_message_len = 0; 572 ep->max_tx_queue_depth = 0; 573 ep->eid = i; 574 ep->htc = htc; 575 ep->tx_credit_flow_enabled = true; 576 } 577 } 578 579 static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc, 580 u16 service_id) 581 { 582 u8 allocation = 0; 583 584 /* The WMI control service is the only service with flow control. 585 * Let it have all transmit credits. 586 */ 587 if (service_id == ATH10K_HTC_SVC_ID_WMI_CONTROL) 588 allocation = htc->total_transmit_credits; 589 590 return allocation; 591 } 592 593 int ath10k_htc_wait_target(struct ath10k_htc *htc) 594 { 595 struct ath10k *ar = htc->ar; 596 int i, status = 0; 597 unsigned long time_left; 598 struct ath10k_htc_msg *msg; 599 u16 message_id; 600 601 time_left = wait_for_completion_timeout(&htc->ctl_resp, 602 ATH10K_HTC_WAIT_TIMEOUT_HZ); 603 if (!time_left) { 604 /* Workaround: In some cases the PCI HIF doesn't 605 * receive interrupt for the control response message 606 * even if the buffer was completed. It is suspected 607 * iomap writes unmasking PCI CE irqs aren't propagated 608 * properly in KVM PCI-passthrough sometimes. 609 */ 610 ath10k_warn(ar, "failed to receive control response completion, polling..\n"); 611 612 for (i = 0; i < CE_COUNT; i++) 613 ath10k_hif_send_complete_check(htc->ar, i, 1); 614 615 time_left = 616 wait_for_completion_timeout(&htc->ctl_resp, 617 ATH10K_HTC_WAIT_TIMEOUT_HZ); 618 619 if (!time_left) 620 status = -ETIMEDOUT; 621 } 622 623 if (status < 0) { 624 ath10k_err(ar, "ctl_resp never came in (%d)\n", status); 625 return status; 626 } 627 628 if (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->ready)) { 629 ath10k_err(ar, "Invalid HTC ready msg len:%d\n", 630 htc->control_resp_len); 631 return -ECOMM; 632 } 633 634 msg = (struct ath10k_htc_msg *)htc->control_resp_buffer; 635 message_id = __le16_to_cpu(msg->hdr.message_id); 636 637 if (message_id != ATH10K_HTC_MSG_READY_ID) { 638 ath10k_err(ar, "Invalid HTC ready msg: 0x%x\n", message_id); 639 return -ECOMM; 640 } 641 642 htc->total_transmit_credits = __le16_to_cpu(msg->ready.credit_count); 643 htc->target_credit_size = __le16_to_cpu(msg->ready.credit_size); 644 645 ath10k_dbg(ar, ATH10K_DBG_HTC, 646 "Target ready! transmit resources: %d size:%d\n", 647 htc->total_transmit_credits, 648 htc->target_credit_size); 649 650 if ((htc->total_transmit_credits == 0) || 651 (htc->target_credit_size == 0)) { 652 ath10k_err(ar, "Invalid credit size received\n"); 653 return -ECOMM; 654 } 655 656 /* The only way to determine if the ready message is an extended 657 * message is from the size. 658 */ 659 if (htc->control_resp_len >= 660 sizeof(msg->hdr) + sizeof(msg->ready_ext)) { 661 htc->max_msgs_per_htc_bundle = 662 min_t(u8, msg->ready_ext.max_msgs_per_htc_bundle, 663 HTC_HOST_MAX_MSG_PER_RX_BUNDLE); 664 ath10k_dbg(ar, ATH10K_DBG_HTC, 665 "Extended ready message. RX bundle size: %d\n", 666 htc->max_msgs_per_htc_bundle); 667 } 668 669 return 0; 670 } 671 672 int ath10k_htc_connect_service(struct ath10k_htc *htc, 673 struct ath10k_htc_svc_conn_req *conn_req, 674 struct ath10k_htc_svc_conn_resp *conn_resp) 675 { 676 struct ath10k *ar = htc->ar; 677 struct ath10k_htc_msg *msg; 678 struct ath10k_htc_conn_svc *req_msg; 679 struct ath10k_htc_conn_svc_response resp_msg_dummy; 680 struct ath10k_htc_conn_svc_response *resp_msg = &resp_msg_dummy; 681 enum ath10k_htc_ep_id assigned_eid = ATH10K_HTC_EP_COUNT; 682 struct ath10k_htc_ep *ep; 683 struct sk_buff *skb; 684 unsigned int max_msg_size = 0; 685 int length, status; 686 unsigned long time_left; 687 bool disable_credit_flow_ctrl = false; 688 u16 message_id, service_id, flags = 0; 689 u8 tx_alloc = 0; 690 691 /* special case for HTC pseudo control service */ 692 if (conn_req->service_id == ATH10K_HTC_SVC_ID_RSVD_CTRL) { 693 disable_credit_flow_ctrl = true; 694 assigned_eid = ATH10K_HTC_EP_0; 695 max_msg_size = ATH10K_HTC_MAX_CTRL_MSG_LEN; 696 memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy)); 697 goto setup; 698 } 699 700 tx_alloc = ath10k_htc_get_credit_allocation(htc, 701 conn_req->service_id); 702 if (!tx_alloc) 703 ath10k_dbg(ar, ATH10K_DBG_BOOT, 704 "boot htc service %s does not allocate target credits\n", 705 htc_service_name(conn_req->service_id)); 706 707 skb = ath10k_htc_build_tx_ctrl_skb(htc->ar); 708 if (!skb) { 709 ath10k_err(ar, "Failed to allocate HTC packet\n"); 710 return -ENOMEM; 711 } 712 713 length = sizeof(msg->hdr) + sizeof(msg->connect_service); 714 skb_put(skb, length); 715 memset(skb->data, 0, length); 716 717 msg = (struct ath10k_htc_msg *)skb->data; 718 msg->hdr.message_id = 719 __cpu_to_le16(ATH10K_HTC_MSG_CONNECT_SERVICE_ID); 720 721 flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC); 722 723 /* Only enable credit flow control for WMI ctrl service */ 724 if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) { 725 flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL; 726 disable_credit_flow_ctrl = true; 727 } 728 729 req_msg = &msg->connect_service; 730 req_msg->flags = __cpu_to_le16(flags); 731 req_msg->service_id = __cpu_to_le16(conn_req->service_id); 732 733 reinit_completion(&htc->ctl_resp); 734 735 status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb); 736 if (status) { 737 kfree_skb(skb); 738 return status; 739 } 740 741 /* wait for response */ 742 time_left = wait_for_completion_timeout(&htc->ctl_resp, 743 ATH10K_HTC_CONN_SVC_TIMEOUT_HZ); 744 if (!time_left) { 745 ath10k_err(ar, "Service connect timeout\n"); 746 return -ETIMEDOUT; 747 } 748 749 /* we controlled the buffer creation, it's aligned */ 750 msg = (struct ath10k_htc_msg *)htc->control_resp_buffer; 751 resp_msg = &msg->connect_service_response; 752 message_id = __le16_to_cpu(msg->hdr.message_id); 753 service_id = __le16_to_cpu(resp_msg->service_id); 754 755 if ((message_id != ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID) || 756 (htc->control_resp_len < sizeof(msg->hdr) + 757 sizeof(msg->connect_service_response))) { 758 ath10k_err(ar, "Invalid resp message ID 0x%x", message_id); 759 return -EPROTO; 760 } 761 762 ath10k_dbg(ar, ATH10K_DBG_HTC, 763 "HTC Service %s connect response: status: 0x%x, assigned ep: 0x%x\n", 764 htc_service_name(service_id), 765 resp_msg->status, resp_msg->eid); 766 767 conn_resp->connect_resp_code = resp_msg->status; 768 769 /* check response status */ 770 if (resp_msg->status != ATH10K_HTC_CONN_SVC_STATUS_SUCCESS) { 771 ath10k_err(ar, "HTC Service %s connect request failed: 0x%x)\n", 772 htc_service_name(service_id), 773 resp_msg->status); 774 return -EPROTO; 775 } 776 777 assigned_eid = (enum ath10k_htc_ep_id)resp_msg->eid; 778 max_msg_size = __le16_to_cpu(resp_msg->max_msg_size); 779 780 setup: 781 782 if (assigned_eid >= ATH10K_HTC_EP_COUNT) 783 return -EPROTO; 784 785 if (max_msg_size == 0) 786 return -EPROTO; 787 788 ep = &htc->endpoint[assigned_eid]; 789 ep->eid = assigned_eid; 790 791 if (ep->service_id != ATH10K_HTC_SVC_ID_UNUSED) 792 return -EPROTO; 793 794 /* return assigned endpoint to caller */ 795 conn_resp->eid = assigned_eid; 796 conn_resp->max_msg_len = __le16_to_cpu(resp_msg->max_msg_size); 797 798 /* setup the endpoint */ 799 ep->service_id = conn_req->service_id; 800 ep->max_tx_queue_depth = conn_req->max_send_queue_depth; 801 ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size); 802 ep->tx_credits = tx_alloc; 803 804 /* copy all the callbacks */ 805 ep->ep_ops = conn_req->ep_ops; 806 807 status = ath10k_hif_map_service_to_pipe(htc->ar, 808 ep->service_id, 809 &ep->ul_pipe_id, 810 &ep->dl_pipe_id); 811 if (status) { 812 ath10k_warn(ar, "unsupported HTC service id: %d\n", 813 ep->service_id); 814 return status; 815 } 816 817 ath10k_dbg(ar, ATH10K_DBG_BOOT, 818 "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n", 819 htc_service_name(ep->service_id), ep->ul_pipe_id, 820 ep->dl_pipe_id, ep->eid); 821 822 if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) { 823 ep->tx_credit_flow_enabled = false; 824 ath10k_dbg(ar, ATH10K_DBG_BOOT, 825 "boot htc service '%s' eid %d TX flow control disabled\n", 826 htc_service_name(ep->service_id), assigned_eid); 827 } 828 829 return status; 830 } 831 832 struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size) 833 { 834 struct sk_buff *skb; 835 836 skb = dev_alloc_skb(size + sizeof(struct ath10k_htc_hdr)); 837 if (!skb) 838 return NULL; 839 840 skb_reserve(skb, sizeof(struct ath10k_htc_hdr)); 841 842 /* FW/HTC requires 4-byte aligned streams */ 843 if (!IS_ALIGNED((unsigned long)skb->data, 4)) 844 ath10k_warn(ar, "Unaligned HTC tx skb\n"); 845 846 return skb; 847 } 848 849 static void ath10k_htc_pktlog_process_rx(struct ath10k *ar, struct sk_buff *skb) 850 { 851 trace_ath10k_htt_pktlog(ar, skb->data, skb->len); 852 dev_kfree_skb_any(skb); 853 } 854 855 static int ath10k_htc_pktlog_connect(struct ath10k *ar) 856 { 857 struct ath10k_htc_svc_conn_resp conn_resp; 858 struct ath10k_htc_svc_conn_req conn_req; 859 int status; 860 861 memset(&conn_req, 0, sizeof(conn_req)); 862 memset(&conn_resp, 0, sizeof(conn_resp)); 863 864 conn_req.ep_ops.ep_tx_complete = NULL; 865 conn_req.ep_ops.ep_rx_complete = ath10k_htc_pktlog_process_rx; 866 conn_req.ep_ops.ep_tx_credits = NULL; 867 868 /* connect to control service */ 869 conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_LOG_MSG; 870 status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp); 871 if (status) { 872 ath10k_warn(ar, "failed to connect to PKTLOG service: %d\n", 873 status); 874 return status; 875 } 876 877 return 0; 878 } 879 880 static bool ath10k_htc_pktlog_svc_supported(struct ath10k *ar) 881 { 882 u8 ul_pipe_id; 883 u8 dl_pipe_id; 884 int status; 885 886 status = ath10k_hif_map_service_to_pipe(ar, ATH10K_HTC_SVC_ID_HTT_LOG_MSG, 887 &ul_pipe_id, 888 &dl_pipe_id); 889 if (status) { 890 ath10k_warn(ar, "unsupported HTC service id: %d\n", 891 ATH10K_HTC_SVC_ID_HTT_LOG_MSG); 892 893 return false; 894 } 895 896 return true; 897 } 898 899 int ath10k_htc_start(struct ath10k_htc *htc) 900 { 901 struct ath10k *ar = htc->ar; 902 struct sk_buff *skb; 903 int status = 0; 904 struct ath10k_htc_msg *msg; 905 906 skb = ath10k_htc_build_tx_ctrl_skb(htc->ar); 907 if (!skb) 908 return -ENOMEM; 909 910 skb_put(skb, sizeof(msg->hdr) + sizeof(msg->setup_complete_ext)); 911 memset(skb->data, 0, skb->len); 912 913 msg = (struct ath10k_htc_msg *)skb->data; 914 msg->hdr.message_id = 915 __cpu_to_le16(ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID); 916 917 if (ar->hif.bus == ATH10K_BUS_SDIO) { 918 /* Extra setup params used by SDIO */ 919 msg->setup_complete_ext.flags = 920 __cpu_to_le32(ATH10K_HTC_SETUP_COMPLETE_FLAGS_RX_BNDL_EN); 921 msg->setup_complete_ext.max_msgs_per_bundled_recv = 922 htc->max_msgs_per_htc_bundle; 923 } 924 ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC is using TX credit flow control\n"); 925 926 status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb); 927 if (status) { 928 kfree_skb(skb); 929 return status; 930 } 931 932 if (ath10k_htc_pktlog_svc_supported(ar)) { 933 status = ath10k_htc_pktlog_connect(ar); 934 if (status) { 935 ath10k_err(ar, "failed to connect to pktlog: %d\n", status); 936 return status; 937 } 938 } 939 940 return 0; 941 } 942 943 /* registered target arrival callback from the HIF layer */ 944 int ath10k_htc_init(struct ath10k *ar) 945 { 946 int status; 947 struct ath10k_htc *htc = &ar->htc; 948 struct ath10k_htc_svc_conn_req conn_req; 949 struct ath10k_htc_svc_conn_resp conn_resp; 950 951 spin_lock_init(&htc->tx_lock); 952 953 ath10k_htc_reset_endpoint_states(htc); 954 955 htc->ar = ar; 956 957 /* setup our pseudo HTC control endpoint connection */ 958 memset(&conn_req, 0, sizeof(conn_req)); 959 memset(&conn_resp, 0, sizeof(conn_resp)); 960 conn_req.ep_ops.ep_tx_complete = ath10k_htc_control_tx_complete; 961 conn_req.ep_ops.ep_rx_complete = ath10k_htc_control_rx_complete; 962 conn_req.max_send_queue_depth = ATH10K_NUM_CONTROL_TX_BUFFERS; 963 conn_req.service_id = ATH10K_HTC_SVC_ID_RSVD_CTRL; 964 965 /* connect fake service */ 966 status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp); 967 if (status) { 968 ath10k_err(ar, "could not connect to htc service (%d)\n", 969 status); 970 return status; 971 } 972 973 init_completion(&htc->ctl_resp); 974 975 return 0; 976 } 977