1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (c) 2014 Broadcom Corporation 4 */ 5 6 /******************************************************************************* 7 * Communicates with the dongle by using dcmd codes. 8 * For certain dcmd codes, the dongle interprets string data from the host. 9 ******************************************************************************/ 10 11 #include <linux/types.h> 12 #include <linux/netdevice.h> 13 #include <linux/etherdevice.h> 14 15 #include <brcmu_utils.h> 16 #include <brcmu_wifi.h> 17 18 #include "core.h" 19 #include "debug.h" 20 #include "proto.h" 21 #include "msgbuf.h" 22 #include "commonring.h" 23 #include "flowring.h" 24 #include "bus.h" 25 #include "tracepoint.h" 26 27 28 #define MSGBUF_IOCTL_RESP_TIMEOUT msecs_to_jiffies(2000) 29 30 #define MSGBUF_TYPE_GEN_STATUS 0x1 31 #define MSGBUF_TYPE_RING_STATUS 0x2 32 #define MSGBUF_TYPE_FLOW_RING_CREATE 0x3 33 #define MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT 0x4 34 #define MSGBUF_TYPE_FLOW_RING_DELETE 0x5 35 #define MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT 0x6 36 #define MSGBUF_TYPE_FLOW_RING_FLUSH 0x7 37 #define MSGBUF_TYPE_FLOW_RING_FLUSH_CMPLT 0x8 38 #define MSGBUF_TYPE_IOCTLPTR_REQ 0x9 39 #define MSGBUF_TYPE_IOCTLPTR_REQ_ACK 0xA 40 #define MSGBUF_TYPE_IOCTLRESP_BUF_POST 0xB 41 #define MSGBUF_TYPE_IOCTL_CMPLT 0xC 42 #define MSGBUF_TYPE_EVENT_BUF_POST 0xD 43 #define MSGBUF_TYPE_WL_EVENT 0xE 44 #define MSGBUF_TYPE_TX_POST 0xF 45 #define MSGBUF_TYPE_TX_STATUS 0x10 46 #define MSGBUF_TYPE_RXBUF_POST 0x11 47 #define MSGBUF_TYPE_RX_CMPLT 0x12 48 #define MSGBUF_TYPE_LPBK_DMAXFER 0x13 49 #define MSGBUF_TYPE_LPBK_DMAXFER_CMPLT 0x14 50 51 #define NR_TX_PKTIDS 2048 52 #define NR_RX_PKTIDS 1024 53 54 #define BRCMF_IOCTL_REQ_PKTID 0xFFFE 55 56 #define BRCMF_MSGBUF_MAX_PKT_SIZE 2048 57 #define BRCMF_MSGBUF_MAX_CTL_PKT_SIZE 8192 58 #define BRCMF_MSGBUF_RXBUFPOST_THRESHOLD 32 59 #define BRCMF_MSGBUF_MAX_IOCTLRESPBUF_POST 8 60 #define BRCMF_MSGBUF_MAX_EVENTBUF_POST 8 61 62 #define BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3 0x01 63 #define BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_11 0x02 64 #define BRCMF_MSGBUF_PKT_FLAGS_FRAME_MASK 0x07 65 #define BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT 5 66 67 #define BRCMF_MSGBUF_TX_FLUSH_CNT1 32 68 #define BRCMF_MSGBUF_TX_FLUSH_CNT2 96 69 70 #define BRCMF_MSGBUF_DELAY_TXWORKER_THRS 96 71 #define BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS 32 72 #define BRCMF_MSGBUF_UPDATE_RX_PTR_THRS 48 73 74 #define BRCMF_MAX_TXSTATUS_WAIT_RETRIES 10 75 76 struct msgbuf_common_hdr { 77 u8 msgtype; 78 u8 ifidx; 79 u8 flags; 80 u8 rsvd0; 81 __le32 request_id; 82 }; 83 84 struct msgbuf_ioctl_req_hdr { 85 struct msgbuf_common_hdr msg; 86 __le32 cmd; 87 __le16 trans_id; 88 __le16 input_buf_len; 89 __le16 output_buf_len; 90 __le16 rsvd0[3]; 91 struct msgbuf_buf_addr req_buf_addr; 92 __le32 rsvd1[2]; 93 }; 94 95 struct msgbuf_tx_msghdr { 96 struct msgbuf_common_hdr msg; 97 u8 txhdr[ETH_HLEN]; 98 u8 flags; 99 u8 seg_cnt; 100 struct msgbuf_buf_addr metadata_buf_addr; 101 struct msgbuf_buf_addr data_buf_addr; 102 __le16 metadata_buf_len; 103 __le16 data_len; 104 __le32 rsvd0; 105 }; 106 107 struct msgbuf_rx_bufpost { 108 struct msgbuf_common_hdr msg; 109 __le16 metadata_buf_len; 110 __le16 data_buf_len; 111 __le32 rsvd0; 112 struct msgbuf_buf_addr metadata_buf_addr; 113 struct msgbuf_buf_addr data_buf_addr; 114 }; 115 116 struct msgbuf_rx_ioctl_resp_or_event { 117 struct msgbuf_common_hdr msg; 118 __le16 host_buf_len; 119 __le16 rsvd0[3]; 120 struct msgbuf_buf_addr host_buf_addr; 121 __le32 rsvd1[4]; 122 }; 123 124 struct msgbuf_completion_hdr { 125 __le16 status; 126 __le16 flow_ring_id; 127 }; 128 129 /* Data struct for the MSGBUF_TYPE_GEN_STATUS */ 130 struct msgbuf_gen_status { 131 struct msgbuf_common_hdr msg; 132 struct msgbuf_completion_hdr compl_hdr; 133 __le16 write_idx; 134 __le32 rsvd0[3]; 135 }; 136 137 /* Data struct for the MSGBUF_TYPE_RING_STATUS */ 138 struct msgbuf_ring_status { 139 struct msgbuf_common_hdr msg; 140 struct msgbuf_completion_hdr compl_hdr; 141 __le16 write_idx; 142 __le16 rsvd0[5]; 143 }; 144 145 struct msgbuf_rx_event { 146 struct msgbuf_common_hdr msg; 147 struct msgbuf_completion_hdr compl_hdr; 148 __le16 event_data_len; 149 __le16 seqnum; 150 __le16 rsvd0[4]; 151 }; 152 153 struct msgbuf_ioctl_resp_hdr { 154 struct msgbuf_common_hdr msg; 155 struct msgbuf_completion_hdr compl_hdr; 156 __le16 resp_len; 157 __le16 trans_id; 158 __le32 cmd; 159 __le32 rsvd0; 160 }; 161 162 struct msgbuf_tx_status { 163 struct msgbuf_common_hdr msg; 164 struct msgbuf_completion_hdr compl_hdr; 165 __le16 metadata_len; 166 __le16 tx_status; 167 }; 168 169 struct msgbuf_rx_complete { 170 struct msgbuf_common_hdr msg; 171 struct msgbuf_completion_hdr compl_hdr; 172 __le16 metadata_len; 173 __le16 data_len; 174 __le16 data_offset; 175 __le16 flags; 176 __le32 rx_status_0; 177 __le32 rx_status_1; 178 __le32 rsvd0; 179 }; 180 181 struct msgbuf_tx_flowring_create_req { 182 struct msgbuf_common_hdr msg; 183 u8 da[ETH_ALEN]; 184 u8 sa[ETH_ALEN]; 185 u8 tid; 186 u8 if_flags; 187 __le16 flow_ring_id; 188 u8 tc; 189 u8 priority; 190 __le16 int_vector; 191 __le16 max_items; 192 __le16 len_item; 193 struct msgbuf_buf_addr flow_ring_addr; 194 }; 195 196 struct msgbuf_tx_flowring_delete_req { 197 struct msgbuf_common_hdr msg; 198 __le16 flow_ring_id; 199 __le16 reason; 200 __le32 rsvd0[7]; 201 }; 202 203 struct msgbuf_flowring_create_resp { 204 struct msgbuf_common_hdr msg; 205 struct msgbuf_completion_hdr compl_hdr; 206 __le32 rsvd0[3]; 207 }; 208 209 struct msgbuf_flowring_delete_resp { 210 struct msgbuf_common_hdr msg; 211 struct msgbuf_completion_hdr compl_hdr; 212 __le32 rsvd0[3]; 213 }; 214 215 struct msgbuf_flowring_flush_resp { 216 struct msgbuf_common_hdr msg; 217 struct msgbuf_completion_hdr compl_hdr; 218 __le32 rsvd0[3]; 219 }; 220 221 struct brcmf_msgbuf_work_item { 222 struct list_head queue; 223 u32 flowid; 224 int ifidx; 225 u8 sa[ETH_ALEN]; 226 u8 da[ETH_ALEN]; 227 }; 228 229 struct brcmf_msgbuf { 230 struct brcmf_pub *drvr; 231 232 struct brcmf_commonring **commonrings; 233 struct brcmf_commonring **flowrings; 234 dma_addr_t *flowring_dma_handle; 235 236 u16 max_flowrings; 237 u16 max_submissionrings; 238 u16 max_completionrings; 239 240 u16 rx_dataoffset; 241 u32 max_rxbufpost; 242 u16 rx_metadata_offset; 243 u32 rxbufpost; 244 245 u32 max_ioctlrespbuf; 246 u32 cur_ioctlrespbuf; 247 u32 max_eventbuf; 248 u32 cur_eventbuf; 249 250 void *ioctbuf; 251 dma_addr_t ioctbuf_handle; 252 u32 ioctbuf_phys_hi; 253 u32 ioctbuf_phys_lo; 254 int ioctl_resp_status; 255 u32 ioctl_resp_ret_len; 256 u32 ioctl_resp_pktid; 257 258 u16 data_seq_no; 259 u16 ioctl_seq_no; 260 u32 reqid; 261 wait_queue_head_t ioctl_resp_wait; 262 bool ctl_completed; 263 264 struct brcmf_msgbuf_pktids *tx_pktids; 265 struct brcmf_msgbuf_pktids *rx_pktids; 266 struct brcmf_flowring *flow; 267 268 struct workqueue_struct *txflow_wq; 269 struct work_struct txflow_work; 270 unsigned long *flow_map; 271 unsigned long *txstatus_done_map; 272 273 struct work_struct flowring_work; 274 spinlock_t flowring_work_lock; 275 struct list_head work_queue; 276 }; 277 278 struct brcmf_msgbuf_pktid { 279 atomic_t allocated; 280 u16 data_offset; 281 struct sk_buff *skb; 282 dma_addr_t physaddr; 283 }; 284 285 struct brcmf_msgbuf_pktids { 286 u32 array_size; 287 u32 last_allocated_idx; 288 enum dma_data_direction direction; 289 struct brcmf_msgbuf_pktid *array; 290 }; 291 292 static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf); 293 294 295 static struct brcmf_msgbuf_pktids * 296 brcmf_msgbuf_init_pktids(u32 nr_array_entries, 297 enum dma_data_direction direction) 298 { 299 struct brcmf_msgbuf_pktid *array; 300 struct brcmf_msgbuf_pktids *pktids; 301 302 array = kcalloc(nr_array_entries, sizeof(*array), GFP_KERNEL); 303 if (!array) 304 return NULL; 305 306 pktids = kzalloc(sizeof(*pktids), GFP_KERNEL); 307 if (!pktids) { 308 kfree(array); 309 return NULL; 310 } 311 pktids->array = array; 312 pktids->array_size = nr_array_entries; 313 314 return pktids; 315 } 316 317 318 static int 319 brcmf_msgbuf_alloc_pktid(struct device *dev, 320 struct brcmf_msgbuf_pktids *pktids, 321 struct sk_buff *skb, u16 data_offset, 322 dma_addr_t *physaddr, u32 *idx) 323 { 324 struct brcmf_msgbuf_pktid *array; 325 u32 count; 326 327 array = pktids->array; 328 329 *physaddr = dma_map_single(dev, skb->data + data_offset, 330 skb->len - data_offset, pktids->direction); 331 332 if (dma_mapping_error(dev, *physaddr)) { 333 brcmf_err("dma_map_single failed !!\n"); 334 return -ENOMEM; 335 } 336 337 *idx = pktids->last_allocated_idx; 338 339 count = 0; 340 do { 341 (*idx)++; 342 if (*idx == pktids->array_size) 343 *idx = 0; 344 if (array[*idx].allocated.counter == 0) 345 if (atomic_cmpxchg(&array[*idx].allocated, 0, 1) == 0) 346 break; 347 count++; 348 } while (count < pktids->array_size); 349 350 if (count == pktids->array_size) 351 return -ENOMEM; 352 353 array[*idx].data_offset = data_offset; 354 array[*idx].physaddr = *physaddr; 355 array[*idx].skb = skb; 356 357 pktids->last_allocated_idx = *idx; 358 359 return 0; 360 } 361 362 363 static struct sk_buff * 364 brcmf_msgbuf_get_pktid(struct device *dev, struct brcmf_msgbuf_pktids *pktids, 365 u32 idx) 366 { 367 struct brcmf_msgbuf_pktid *pktid; 368 struct sk_buff *skb; 369 370 if (idx >= pktids->array_size) { 371 brcmf_err("Invalid packet id %d (max %d)\n", idx, 372 pktids->array_size); 373 return NULL; 374 } 375 if (pktids->array[idx].allocated.counter) { 376 pktid = &pktids->array[idx]; 377 dma_unmap_single(dev, pktid->physaddr, 378 pktid->skb->len - pktid->data_offset, 379 pktids->direction); 380 skb = pktid->skb; 381 pktid->allocated.counter = 0; 382 return skb; 383 } else { 384 brcmf_err("Invalid packet id %d (not in use)\n", idx); 385 } 386 387 return NULL; 388 } 389 390 391 static void 392 brcmf_msgbuf_release_array(struct device *dev, 393 struct brcmf_msgbuf_pktids *pktids) 394 { 395 struct brcmf_msgbuf_pktid *array; 396 struct brcmf_msgbuf_pktid *pktid; 397 u32 count; 398 399 array = pktids->array; 400 count = 0; 401 do { 402 if (array[count].allocated.counter) { 403 pktid = &array[count]; 404 dma_unmap_single(dev, pktid->physaddr, 405 pktid->skb->len - pktid->data_offset, 406 pktids->direction); 407 brcmu_pkt_buf_free_skb(pktid->skb); 408 } 409 count++; 410 } while (count < pktids->array_size); 411 412 kfree(array); 413 kfree(pktids); 414 } 415 416 417 static void brcmf_msgbuf_release_pktids(struct brcmf_msgbuf *msgbuf) 418 { 419 if (msgbuf->rx_pktids) 420 brcmf_msgbuf_release_array(msgbuf->drvr->bus_if->dev, 421 msgbuf->rx_pktids); 422 if (msgbuf->tx_pktids) 423 brcmf_msgbuf_release_array(msgbuf->drvr->bus_if->dev, 424 msgbuf->tx_pktids); 425 } 426 427 428 static int brcmf_msgbuf_tx_ioctl(struct brcmf_pub *drvr, int ifidx, 429 uint cmd, void *buf, uint len) 430 { 431 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 432 struct brcmf_commonring *commonring; 433 struct msgbuf_ioctl_req_hdr *request; 434 u16 buf_len; 435 void *ret_ptr; 436 int err; 437 438 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT]; 439 brcmf_commonring_lock(commonring); 440 ret_ptr = brcmf_commonring_reserve_for_write(commonring); 441 if (!ret_ptr) { 442 bphy_err(drvr, "Failed to reserve space in commonring\n"); 443 brcmf_commonring_unlock(commonring); 444 return -ENOMEM; 445 } 446 447 msgbuf->reqid++; 448 449 request = (struct msgbuf_ioctl_req_hdr *)ret_ptr; 450 request->msg.msgtype = MSGBUF_TYPE_IOCTLPTR_REQ; 451 request->msg.ifidx = (u8)ifidx; 452 request->msg.flags = 0; 453 request->msg.request_id = cpu_to_le32(BRCMF_IOCTL_REQ_PKTID); 454 request->cmd = cpu_to_le32(cmd); 455 request->output_buf_len = cpu_to_le16(len); 456 request->trans_id = cpu_to_le16(msgbuf->reqid); 457 458 buf_len = min_t(u16, len, BRCMF_TX_IOCTL_MAX_MSG_SIZE); 459 request->input_buf_len = cpu_to_le16(buf_len); 460 request->req_buf_addr.high_addr = cpu_to_le32(msgbuf->ioctbuf_phys_hi); 461 request->req_buf_addr.low_addr = cpu_to_le32(msgbuf->ioctbuf_phys_lo); 462 if (buf) 463 memcpy(msgbuf->ioctbuf, buf, buf_len); 464 else 465 memset(msgbuf->ioctbuf, 0, buf_len); 466 467 err = brcmf_commonring_write_complete(commonring); 468 brcmf_commonring_unlock(commonring); 469 470 return err; 471 } 472 473 474 static int brcmf_msgbuf_ioctl_resp_wait(struct brcmf_msgbuf *msgbuf) 475 { 476 return wait_event_timeout(msgbuf->ioctl_resp_wait, 477 msgbuf->ctl_completed, 478 MSGBUF_IOCTL_RESP_TIMEOUT); 479 } 480 481 482 static void brcmf_msgbuf_ioctl_resp_wake(struct brcmf_msgbuf *msgbuf) 483 { 484 msgbuf->ctl_completed = true; 485 wake_up(&msgbuf->ioctl_resp_wait); 486 } 487 488 489 static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx, 490 uint cmd, void *buf, uint len, int *fwerr) 491 { 492 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 493 struct sk_buff *skb = NULL; 494 int timeout; 495 int err; 496 497 brcmf_dbg(MSGBUF, "ifidx=%d, cmd=%d, len=%d\n", ifidx, cmd, len); 498 *fwerr = 0; 499 msgbuf->ctl_completed = false; 500 err = brcmf_msgbuf_tx_ioctl(drvr, ifidx, cmd, buf, len); 501 if (err) 502 return err; 503 504 timeout = brcmf_msgbuf_ioctl_resp_wait(msgbuf); 505 if (!timeout) { 506 bphy_err(drvr, "Timeout on response for query command\n"); 507 return -EIO; 508 } 509 510 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, 511 msgbuf->rx_pktids, 512 msgbuf->ioctl_resp_pktid); 513 if (msgbuf->ioctl_resp_ret_len != 0) { 514 if (!skb) 515 return -EBADF; 516 517 memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ? 518 len : msgbuf->ioctl_resp_ret_len); 519 } 520 brcmu_pkt_buf_free_skb(skb); 521 522 *fwerr = msgbuf->ioctl_resp_status; 523 return 0; 524 } 525 526 527 static int brcmf_msgbuf_set_dcmd(struct brcmf_pub *drvr, int ifidx, 528 uint cmd, void *buf, uint len, int *fwerr) 529 { 530 return brcmf_msgbuf_query_dcmd(drvr, ifidx, cmd, buf, len, fwerr); 531 } 532 533 534 static int brcmf_msgbuf_hdrpull(struct brcmf_pub *drvr, bool do_fws, 535 struct sk_buff *skb, struct brcmf_if **ifp) 536 { 537 return -ENODEV; 538 } 539 540 static void brcmf_msgbuf_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb) 541 { 542 } 543 544 static void 545 brcmf_msgbuf_remove_flowring(struct brcmf_msgbuf *msgbuf, u16 flowid) 546 { 547 u32 dma_sz; 548 void *dma_buf; 549 550 brcmf_dbg(MSGBUF, "Removing flowring %d\n", flowid); 551 552 dma_sz = BRCMF_H2D_TXFLOWRING_MAX_ITEM * BRCMF_H2D_TXFLOWRING_ITEMSIZE; 553 dma_buf = msgbuf->flowrings[flowid]->buf_addr; 554 dma_free_coherent(msgbuf->drvr->bus_if->dev, dma_sz, dma_buf, 555 msgbuf->flowring_dma_handle[flowid]); 556 557 brcmf_flowring_delete(msgbuf->flow, flowid); 558 } 559 560 561 static struct brcmf_msgbuf_work_item * 562 brcmf_msgbuf_dequeue_work(struct brcmf_msgbuf *msgbuf) 563 { 564 struct brcmf_msgbuf_work_item *work = NULL; 565 ulong flags; 566 567 spin_lock_irqsave(&msgbuf->flowring_work_lock, flags); 568 if (!list_empty(&msgbuf->work_queue)) { 569 work = list_first_entry(&msgbuf->work_queue, 570 struct brcmf_msgbuf_work_item, queue); 571 list_del(&work->queue); 572 } 573 spin_unlock_irqrestore(&msgbuf->flowring_work_lock, flags); 574 575 return work; 576 } 577 578 579 static u32 580 brcmf_msgbuf_flowring_create_worker(struct brcmf_msgbuf *msgbuf, 581 struct brcmf_msgbuf_work_item *work) 582 { 583 struct brcmf_pub *drvr = msgbuf->drvr; 584 struct msgbuf_tx_flowring_create_req *create; 585 struct brcmf_commonring *commonring; 586 void *ret_ptr; 587 u32 flowid; 588 void *dma_buf; 589 u32 dma_sz; 590 u64 address; 591 int err; 592 593 flowid = work->flowid; 594 dma_sz = BRCMF_H2D_TXFLOWRING_MAX_ITEM * BRCMF_H2D_TXFLOWRING_ITEMSIZE; 595 dma_buf = dma_alloc_coherent(msgbuf->drvr->bus_if->dev, dma_sz, 596 &msgbuf->flowring_dma_handle[flowid], 597 GFP_KERNEL); 598 if (!dma_buf) { 599 bphy_err(drvr, "dma_alloc_coherent failed\n"); 600 brcmf_flowring_delete(msgbuf->flow, flowid); 601 return BRCMF_FLOWRING_INVALID_ID; 602 } 603 604 brcmf_commonring_config(msgbuf->flowrings[flowid], 605 BRCMF_H2D_TXFLOWRING_MAX_ITEM, 606 BRCMF_H2D_TXFLOWRING_ITEMSIZE, dma_buf); 607 608 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT]; 609 brcmf_commonring_lock(commonring); 610 ret_ptr = brcmf_commonring_reserve_for_write(commonring); 611 if (!ret_ptr) { 612 bphy_err(drvr, "Failed to reserve space in commonring\n"); 613 brcmf_commonring_unlock(commonring); 614 brcmf_msgbuf_remove_flowring(msgbuf, flowid); 615 return BRCMF_FLOWRING_INVALID_ID; 616 } 617 618 create = (struct msgbuf_tx_flowring_create_req *)ret_ptr; 619 create->msg.msgtype = MSGBUF_TYPE_FLOW_RING_CREATE; 620 create->msg.ifidx = work->ifidx; 621 create->msg.request_id = 0; 622 create->tid = brcmf_flowring_tid(msgbuf->flow, flowid); 623 create->flow_ring_id = cpu_to_le16(flowid + 624 BRCMF_H2D_MSGRING_FLOWRING_IDSTART); 625 memcpy(create->sa, work->sa, ETH_ALEN); 626 memcpy(create->da, work->da, ETH_ALEN); 627 address = (u64)msgbuf->flowring_dma_handle[flowid]; 628 create->flow_ring_addr.high_addr = cpu_to_le32(address >> 32); 629 create->flow_ring_addr.low_addr = cpu_to_le32(address & 0xffffffff); 630 create->max_items = cpu_to_le16(BRCMF_H2D_TXFLOWRING_MAX_ITEM); 631 create->len_item = cpu_to_le16(BRCMF_H2D_TXFLOWRING_ITEMSIZE); 632 633 brcmf_dbg(MSGBUF, "Send Flow Create Req flow ID %d for peer %pM prio %d ifindex %d\n", 634 flowid, work->da, create->tid, work->ifidx); 635 636 err = brcmf_commonring_write_complete(commonring); 637 brcmf_commonring_unlock(commonring); 638 if (err) { 639 bphy_err(drvr, "Failed to write commonring\n"); 640 brcmf_msgbuf_remove_flowring(msgbuf, flowid); 641 return BRCMF_FLOWRING_INVALID_ID; 642 } 643 644 return flowid; 645 } 646 647 648 static void brcmf_msgbuf_flowring_worker(struct work_struct *work) 649 { 650 struct brcmf_msgbuf *msgbuf; 651 struct brcmf_msgbuf_work_item *create; 652 653 msgbuf = container_of(work, struct brcmf_msgbuf, flowring_work); 654 655 while ((create = brcmf_msgbuf_dequeue_work(msgbuf))) { 656 brcmf_msgbuf_flowring_create_worker(msgbuf, create); 657 kfree(create); 658 } 659 } 660 661 662 static u32 brcmf_msgbuf_flowring_create(struct brcmf_msgbuf *msgbuf, int ifidx, 663 struct sk_buff *skb) 664 { 665 struct brcmf_msgbuf_work_item *create; 666 struct ethhdr *eh = (struct ethhdr *)(skb->data); 667 u32 flowid; 668 ulong flags; 669 670 create = kzalloc(sizeof(*create), GFP_ATOMIC); 671 if (create == NULL) 672 return BRCMF_FLOWRING_INVALID_ID; 673 674 flowid = brcmf_flowring_create(msgbuf->flow, eh->h_dest, 675 skb->priority, ifidx); 676 if (flowid == BRCMF_FLOWRING_INVALID_ID) { 677 kfree(create); 678 return flowid; 679 } 680 681 create->flowid = flowid; 682 create->ifidx = ifidx; 683 memcpy(create->sa, eh->h_source, ETH_ALEN); 684 memcpy(create->da, eh->h_dest, ETH_ALEN); 685 686 spin_lock_irqsave(&msgbuf->flowring_work_lock, flags); 687 list_add_tail(&create->queue, &msgbuf->work_queue); 688 spin_unlock_irqrestore(&msgbuf->flowring_work_lock, flags); 689 schedule_work(&msgbuf->flowring_work); 690 691 return flowid; 692 } 693 694 695 static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u16 flowid) 696 { 697 struct brcmf_flowring *flow = msgbuf->flow; 698 struct brcmf_pub *drvr = msgbuf->drvr; 699 struct brcmf_commonring *commonring; 700 void *ret_ptr; 701 u32 count; 702 struct sk_buff *skb; 703 dma_addr_t physaddr; 704 u32 pktid; 705 struct msgbuf_tx_msghdr *tx_msghdr; 706 u64 address; 707 708 commonring = msgbuf->flowrings[flowid]; 709 if (!brcmf_commonring_write_available(commonring)) 710 return; 711 712 brcmf_commonring_lock(commonring); 713 714 count = BRCMF_MSGBUF_TX_FLUSH_CNT2 - BRCMF_MSGBUF_TX_FLUSH_CNT1; 715 while (brcmf_flowring_qlen(flow, flowid)) { 716 skb = brcmf_flowring_dequeue(flow, flowid); 717 if (skb == NULL) { 718 bphy_err(drvr, "No SKB, but qlen %d\n", 719 brcmf_flowring_qlen(flow, flowid)); 720 break; 721 } 722 skb_orphan(skb); 723 if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev, 724 msgbuf->tx_pktids, skb, ETH_HLEN, 725 &physaddr, &pktid)) { 726 brcmf_flowring_reinsert(flow, flowid, skb); 727 bphy_err(drvr, "No PKTID available !!\n"); 728 break; 729 } 730 ret_ptr = brcmf_commonring_reserve_for_write(commonring); 731 if (!ret_ptr) { 732 brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, 733 msgbuf->tx_pktids, pktid); 734 brcmf_flowring_reinsert(flow, flowid, skb); 735 break; 736 } 737 count++; 738 739 tx_msghdr = (struct msgbuf_tx_msghdr *)ret_ptr; 740 741 tx_msghdr->msg.msgtype = MSGBUF_TYPE_TX_POST; 742 tx_msghdr->msg.request_id = cpu_to_le32(pktid + 1); 743 tx_msghdr->msg.ifidx = brcmf_flowring_ifidx_get(flow, flowid); 744 tx_msghdr->flags = BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3; 745 tx_msghdr->flags |= (skb->priority & 0x07) << 746 BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT; 747 tx_msghdr->seg_cnt = 1; 748 memcpy(tx_msghdr->txhdr, skb->data, ETH_HLEN); 749 tx_msghdr->data_len = cpu_to_le16(skb->len - ETH_HLEN); 750 address = (u64)physaddr; 751 tx_msghdr->data_buf_addr.high_addr = cpu_to_le32(address >> 32); 752 tx_msghdr->data_buf_addr.low_addr = 753 cpu_to_le32(address & 0xffffffff); 754 tx_msghdr->metadata_buf_len = 0; 755 tx_msghdr->metadata_buf_addr.high_addr = 0; 756 tx_msghdr->metadata_buf_addr.low_addr = 0; 757 atomic_inc(&commonring->outstanding_tx); 758 if (count >= BRCMF_MSGBUF_TX_FLUSH_CNT2) { 759 brcmf_commonring_write_complete(commonring); 760 count = 0; 761 } 762 } 763 if (count) 764 brcmf_commonring_write_complete(commonring); 765 brcmf_commonring_unlock(commonring); 766 } 767 768 769 static void brcmf_msgbuf_txflow_worker(struct work_struct *worker) 770 { 771 struct brcmf_msgbuf *msgbuf; 772 u32 flowid; 773 774 msgbuf = container_of(worker, struct brcmf_msgbuf, txflow_work); 775 for_each_set_bit(flowid, msgbuf->flow_map, msgbuf->max_flowrings) { 776 clear_bit(flowid, msgbuf->flow_map); 777 brcmf_msgbuf_txflow(msgbuf, flowid); 778 } 779 } 780 781 782 static int brcmf_msgbuf_schedule_txdata(struct brcmf_msgbuf *msgbuf, u32 flowid, 783 bool force) 784 { 785 struct brcmf_commonring *commonring; 786 787 set_bit(flowid, msgbuf->flow_map); 788 commonring = msgbuf->flowrings[flowid]; 789 if ((force) || (atomic_read(&commonring->outstanding_tx) < 790 BRCMF_MSGBUF_DELAY_TXWORKER_THRS)) 791 queue_work(msgbuf->txflow_wq, &msgbuf->txflow_work); 792 793 return 0; 794 } 795 796 797 static int brcmf_msgbuf_tx_queue_data(struct brcmf_pub *drvr, int ifidx, 798 struct sk_buff *skb) 799 { 800 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 801 struct brcmf_flowring *flow = msgbuf->flow; 802 struct ethhdr *eh = (struct ethhdr *)(skb->data); 803 u32 flowid; 804 u32 queue_count; 805 bool force; 806 807 flowid = brcmf_flowring_lookup(flow, eh->h_dest, skb->priority, ifidx); 808 if (flowid == BRCMF_FLOWRING_INVALID_ID) { 809 flowid = brcmf_msgbuf_flowring_create(msgbuf, ifidx, skb); 810 if (flowid == BRCMF_FLOWRING_INVALID_ID) { 811 return -ENOMEM; 812 } else { 813 brcmf_flowring_enqueue(flow, flowid, skb); 814 return 0; 815 } 816 } 817 queue_count = brcmf_flowring_enqueue(flow, flowid, skb); 818 force = ((queue_count % BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) == 0); 819 brcmf_msgbuf_schedule_txdata(msgbuf, flowid, force); 820 821 return 0; 822 } 823 824 825 static void 826 brcmf_msgbuf_configure_addr_mode(struct brcmf_pub *drvr, int ifidx, 827 enum proto_addr_mode addr_mode) 828 { 829 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 830 831 brcmf_flowring_configure_addr_mode(msgbuf->flow, ifidx, addr_mode); 832 } 833 834 835 static void 836 brcmf_msgbuf_delete_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN]) 837 { 838 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 839 840 brcmf_flowring_delete_peer(msgbuf->flow, ifidx, peer); 841 } 842 843 844 static void 845 brcmf_msgbuf_add_tdls_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN]) 846 { 847 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 848 849 brcmf_flowring_add_tdls_peer(msgbuf->flow, ifidx, peer); 850 } 851 852 853 static void 854 brcmf_msgbuf_process_ioctl_complete(struct brcmf_msgbuf *msgbuf, void *buf) 855 { 856 struct msgbuf_ioctl_resp_hdr *ioctl_resp; 857 858 ioctl_resp = (struct msgbuf_ioctl_resp_hdr *)buf; 859 860 msgbuf->ioctl_resp_status = 861 (s16)le16_to_cpu(ioctl_resp->compl_hdr.status); 862 msgbuf->ioctl_resp_ret_len = le16_to_cpu(ioctl_resp->resp_len); 863 msgbuf->ioctl_resp_pktid = le32_to_cpu(ioctl_resp->msg.request_id); 864 865 brcmf_msgbuf_ioctl_resp_wake(msgbuf); 866 867 if (msgbuf->cur_ioctlrespbuf) 868 msgbuf->cur_ioctlrespbuf--; 869 brcmf_msgbuf_rxbuf_ioctlresp_post(msgbuf); 870 } 871 872 873 static void 874 brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf) 875 { 876 struct brcmf_commonring *commonring; 877 struct msgbuf_tx_status *tx_status; 878 u32 idx; 879 struct sk_buff *skb; 880 u16 flowid; 881 882 tx_status = (struct msgbuf_tx_status *)buf; 883 idx = le32_to_cpu(tx_status->msg.request_id) - 1; 884 flowid = le16_to_cpu(tx_status->compl_hdr.flow_ring_id); 885 flowid -= BRCMF_H2D_MSGRING_FLOWRING_IDSTART; 886 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, 887 msgbuf->tx_pktids, idx); 888 if (!skb) 889 return; 890 891 set_bit(flowid, msgbuf->txstatus_done_map); 892 commonring = msgbuf->flowrings[flowid]; 893 atomic_dec(&commonring->outstanding_tx); 894 895 brcmf_txfinalize(brcmf_get_ifp(msgbuf->drvr, tx_status->msg.ifidx), 896 skb, true); 897 } 898 899 900 static u32 brcmf_msgbuf_rxbuf_data_post(struct brcmf_msgbuf *msgbuf, u32 count) 901 { 902 struct brcmf_pub *drvr = msgbuf->drvr; 903 struct brcmf_commonring *commonring; 904 void *ret_ptr; 905 struct sk_buff *skb; 906 u16 alloced; 907 u32 pktlen; 908 dma_addr_t physaddr; 909 struct msgbuf_rx_bufpost *rx_bufpost; 910 u64 address; 911 u32 pktid; 912 u32 i; 913 914 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_RXPOST_SUBMIT]; 915 ret_ptr = brcmf_commonring_reserve_for_write_multiple(commonring, 916 count, 917 &alloced); 918 if (!ret_ptr) { 919 brcmf_dbg(MSGBUF, "Failed to reserve space in commonring\n"); 920 return 0; 921 } 922 923 for (i = 0; i < alloced; i++) { 924 rx_bufpost = (struct msgbuf_rx_bufpost *)ret_ptr; 925 memset(rx_bufpost, 0, sizeof(*rx_bufpost)); 926 927 skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_PKT_SIZE); 928 929 if (skb == NULL) { 930 bphy_err(drvr, "Failed to alloc SKB\n"); 931 brcmf_commonring_write_cancel(commonring, alloced - i); 932 break; 933 } 934 935 pktlen = skb->len; 936 if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev, 937 msgbuf->rx_pktids, skb, 0, 938 &physaddr, &pktid)) { 939 dev_kfree_skb_any(skb); 940 bphy_err(drvr, "No PKTID available !!\n"); 941 brcmf_commonring_write_cancel(commonring, alloced - i); 942 break; 943 } 944 945 if (msgbuf->rx_metadata_offset) { 946 address = (u64)physaddr; 947 rx_bufpost->metadata_buf_len = 948 cpu_to_le16(msgbuf->rx_metadata_offset); 949 rx_bufpost->metadata_buf_addr.high_addr = 950 cpu_to_le32(address >> 32); 951 rx_bufpost->metadata_buf_addr.low_addr = 952 cpu_to_le32(address & 0xffffffff); 953 954 skb_pull(skb, msgbuf->rx_metadata_offset); 955 pktlen = skb->len; 956 physaddr += msgbuf->rx_metadata_offset; 957 } 958 rx_bufpost->msg.msgtype = MSGBUF_TYPE_RXBUF_POST; 959 rx_bufpost->msg.request_id = cpu_to_le32(pktid); 960 961 address = (u64)physaddr; 962 rx_bufpost->data_buf_len = cpu_to_le16((u16)pktlen); 963 rx_bufpost->data_buf_addr.high_addr = 964 cpu_to_le32(address >> 32); 965 rx_bufpost->data_buf_addr.low_addr = 966 cpu_to_le32(address & 0xffffffff); 967 968 ret_ptr += brcmf_commonring_len_item(commonring); 969 } 970 971 if (i) 972 brcmf_commonring_write_complete(commonring); 973 974 return i; 975 } 976 977 978 static void 979 brcmf_msgbuf_rxbuf_data_fill(struct brcmf_msgbuf *msgbuf) 980 { 981 u32 fillbufs; 982 u32 retcount; 983 984 fillbufs = msgbuf->max_rxbufpost - msgbuf->rxbufpost; 985 986 while (fillbufs) { 987 retcount = brcmf_msgbuf_rxbuf_data_post(msgbuf, fillbufs); 988 if (!retcount) 989 break; 990 msgbuf->rxbufpost += retcount; 991 fillbufs -= retcount; 992 } 993 } 994 995 996 static void 997 brcmf_msgbuf_update_rxbufpost_count(struct brcmf_msgbuf *msgbuf, u16 rxcnt) 998 { 999 msgbuf->rxbufpost -= rxcnt; 1000 if (msgbuf->rxbufpost <= (msgbuf->max_rxbufpost - 1001 BRCMF_MSGBUF_RXBUFPOST_THRESHOLD)) 1002 brcmf_msgbuf_rxbuf_data_fill(msgbuf); 1003 } 1004 1005 1006 static u32 1007 brcmf_msgbuf_rxbuf_ctrl_post(struct brcmf_msgbuf *msgbuf, bool event_buf, 1008 u32 count) 1009 { 1010 struct brcmf_pub *drvr = msgbuf->drvr; 1011 struct brcmf_commonring *commonring; 1012 void *ret_ptr; 1013 struct sk_buff *skb; 1014 u16 alloced; 1015 u32 pktlen; 1016 dma_addr_t physaddr; 1017 struct msgbuf_rx_ioctl_resp_or_event *rx_bufpost; 1018 u64 address; 1019 u32 pktid; 1020 u32 i; 1021 1022 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT]; 1023 brcmf_commonring_lock(commonring); 1024 ret_ptr = brcmf_commonring_reserve_for_write_multiple(commonring, 1025 count, 1026 &alloced); 1027 if (!ret_ptr) { 1028 bphy_err(drvr, "Failed to reserve space in commonring\n"); 1029 brcmf_commonring_unlock(commonring); 1030 return 0; 1031 } 1032 1033 for (i = 0; i < alloced; i++) { 1034 rx_bufpost = (struct msgbuf_rx_ioctl_resp_or_event *)ret_ptr; 1035 memset(rx_bufpost, 0, sizeof(*rx_bufpost)); 1036 1037 skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_CTL_PKT_SIZE); 1038 1039 if (skb == NULL) { 1040 bphy_err(drvr, "Failed to alloc SKB\n"); 1041 brcmf_commonring_write_cancel(commonring, alloced - i); 1042 break; 1043 } 1044 1045 pktlen = skb->len; 1046 if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev, 1047 msgbuf->rx_pktids, skb, 0, 1048 &physaddr, &pktid)) { 1049 dev_kfree_skb_any(skb); 1050 bphy_err(drvr, "No PKTID available !!\n"); 1051 brcmf_commonring_write_cancel(commonring, alloced - i); 1052 break; 1053 } 1054 if (event_buf) 1055 rx_bufpost->msg.msgtype = MSGBUF_TYPE_EVENT_BUF_POST; 1056 else 1057 rx_bufpost->msg.msgtype = 1058 MSGBUF_TYPE_IOCTLRESP_BUF_POST; 1059 rx_bufpost->msg.request_id = cpu_to_le32(pktid); 1060 1061 address = (u64)physaddr; 1062 rx_bufpost->host_buf_len = cpu_to_le16((u16)pktlen); 1063 rx_bufpost->host_buf_addr.high_addr = 1064 cpu_to_le32(address >> 32); 1065 rx_bufpost->host_buf_addr.low_addr = 1066 cpu_to_le32(address & 0xffffffff); 1067 1068 ret_ptr += brcmf_commonring_len_item(commonring); 1069 } 1070 1071 if (i) 1072 brcmf_commonring_write_complete(commonring); 1073 1074 brcmf_commonring_unlock(commonring); 1075 1076 return i; 1077 } 1078 1079 1080 static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf) 1081 { 1082 u32 count; 1083 1084 count = msgbuf->max_ioctlrespbuf - msgbuf->cur_ioctlrespbuf; 1085 count = brcmf_msgbuf_rxbuf_ctrl_post(msgbuf, false, count); 1086 msgbuf->cur_ioctlrespbuf += count; 1087 } 1088 1089 1090 static void brcmf_msgbuf_rxbuf_event_post(struct brcmf_msgbuf *msgbuf) 1091 { 1092 u32 count; 1093 1094 count = msgbuf->max_eventbuf - msgbuf->cur_eventbuf; 1095 count = brcmf_msgbuf_rxbuf_ctrl_post(msgbuf, true, count); 1096 msgbuf->cur_eventbuf += count; 1097 } 1098 1099 1100 static void brcmf_msgbuf_process_event(struct brcmf_msgbuf *msgbuf, void *buf) 1101 { 1102 struct brcmf_pub *drvr = msgbuf->drvr; 1103 struct msgbuf_rx_event *event; 1104 u32 idx; 1105 u16 buflen; 1106 struct sk_buff *skb; 1107 struct brcmf_if *ifp; 1108 1109 event = (struct msgbuf_rx_event *)buf; 1110 idx = le32_to_cpu(event->msg.request_id); 1111 buflen = le16_to_cpu(event->event_data_len); 1112 1113 if (msgbuf->cur_eventbuf) 1114 msgbuf->cur_eventbuf--; 1115 brcmf_msgbuf_rxbuf_event_post(msgbuf); 1116 1117 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, 1118 msgbuf->rx_pktids, idx); 1119 if (!skb) 1120 return; 1121 1122 if (msgbuf->rx_dataoffset) 1123 skb_pull(skb, msgbuf->rx_dataoffset); 1124 1125 skb_trim(skb, buflen); 1126 1127 ifp = brcmf_get_ifp(msgbuf->drvr, event->msg.ifidx); 1128 if (!ifp || !ifp->ndev) { 1129 bphy_err(drvr, "Received pkt for invalid ifidx %d\n", 1130 event->msg.ifidx); 1131 goto exit; 1132 } 1133 1134 skb->protocol = eth_type_trans(skb, ifp->ndev); 1135 1136 brcmf_fweh_process_skb(ifp->drvr, skb, 0, GFP_KERNEL); 1137 1138 exit: 1139 brcmu_pkt_buf_free_skb(skb); 1140 } 1141 1142 1143 static void 1144 brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf) 1145 { 1146 struct brcmf_pub *drvr = msgbuf->drvr; 1147 struct msgbuf_rx_complete *rx_complete; 1148 struct sk_buff *skb; 1149 u16 data_offset; 1150 u16 buflen; 1151 u16 flags; 1152 u32 idx; 1153 struct brcmf_if *ifp; 1154 1155 brcmf_msgbuf_update_rxbufpost_count(msgbuf, 1); 1156 1157 rx_complete = (struct msgbuf_rx_complete *)buf; 1158 data_offset = le16_to_cpu(rx_complete->data_offset); 1159 buflen = le16_to_cpu(rx_complete->data_len); 1160 idx = le32_to_cpu(rx_complete->msg.request_id); 1161 flags = le16_to_cpu(rx_complete->flags); 1162 1163 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, 1164 msgbuf->rx_pktids, idx); 1165 if (!skb) 1166 return; 1167 1168 if (data_offset) 1169 skb_pull(skb, data_offset); 1170 else if (msgbuf->rx_dataoffset) 1171 skb_pull(skb, msgbuf->rx_dataoffset); 1172 1173 skb_trim(skb, buflen); 1174 1175 if ((flags & BRCMF_MSGBUF_PKT_FLAGS_FRAME_MASK) == 1176 BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_11) { 1177 ifp = msgbuf->drvr->mon_if; 1178 1179 if (!ifp) { 1180 bphy_err(drvr, "Received unexpected monitor pkt\n"); 1181 brcmu_pkt_buf_free_skb(skb); 1182 return; 1183 } 1184 1185 brcmf_netif_mon_rx(ifp, skb); 1186 return; 1187 } 1188 1189 ifp = brcmf_get_ifp(msgbuf->drvr, rx_complete->msg.ifidx); 1190 if (!ifp || !ifp->ndev) { 1191 bphy_err(drvr, "Received pkt for invalid ifidx %d\n", 1192 rx_complete->msg.ifidx); 1193 brcmu_pkt_buf_free_skb(skb); 1194 return; 1195 } 1196 1197 skb->protocol = eth_type_trans(skb, ifp->ndev); 1198 brcmf_netif_rx(ifp, skb); 1199 } 1200 1201 static void brcmf_msgbuf_process_gen_status(struct brcmf_msgbuf *msgbuf, 1202 void *buf) 1203 { 1204 struct msgbuf_gen_status *gen_status = buf; 1205 struct brcmf_pub *drvr = msgbuf->drvr; 1206 int err; 1207 1208 err = le16_to_cpu(gen_status->compl_hdr.status); 1209 if (err) 1210 bphy_err(drvr, "Firmware reported general error: %d\n", err); 1211 } 1212 1213 static void brcmf_msgbuf_process_ring_status(struct brcmf_msgbuf *msgbuf, 1214 void *buf) 1215 { 1216 struct msgbuf_ring_status *ring_status = buf; 1217 struct brcmf_pub *drvr = msgbuf->drvr; 1218 int err; 1219 1220 err = le16_to_cpu(ring_status->compl_hdr.status); 1221 if (err) { 1222 int ring = le16_to_cpu(ring_status->compl_hdr.flow_ring_id); 1223 1224 bphy_err(drvr, "Firmware reported ring %d error: %d\n", ring, 1225 err); 1226 } 1227 } 1228 1229 static void 1230 brcmf_msgbuf_process_flow_ring_create_response(struct brcmf_msgbuf *msgbuf, 1231 void *buf) 1232 { 1233 struct brcmf_pub *drvr = msgbuf->drvr; 1234 struct msgbuf_flowring_create_resp *flowring_create_resp; 1235 u16 status; 1236 u16 flowid; 1237 1238 flowring_create_resp = (struct msgbuf_flowring_create_resp *)buf; 1239 1240 flowid = le16_to_cpu(flowring_create_resp->compl_hdr.flow_ring_id); 1241 flowid -= BRCMF_H2D_MSGRING_FLOWRING_IDSTART; 1242 status = le16_to_cpu(flowring_create_resp->compl_hdr.status); 1243 1244 if (status) { 1245 bphy_err(drvr, "Flowring creation failed, code %d\n", status); 1246 brcmf_msgbuf_remove_flowring(msgbuf, flowid); 1247 return; 1248 } 1249 brcmf_dbg(MSGBUF, "Flowring %d Create response status %d\n", flowid, 1250 status); 1251 1252 brcmf_flowring_open(msgbuf->flow, flowid); 1253 1254 brcmf_msgbuf_schedule_txdata(msgbuf, flowid, true); 1255 } 1256 1257 1258 static void 1259 brcmf_msgbuf_process_flow_ring_delete_response(struct brcmf_msgbuf *msgbuf, 1260 void *buf) 1261 { 1262 struct brcmf_pub *drvr = msgbuf->drvr; 1263 struct msgbuf_flowring_delete_resp *flowring_delete_resp; 1264 u16 status; 1265 u16 flowid; 1266 1267 flowring_delete_resp = (struct msgbuf_flowring_delete_resp *)buf; 1268 1269 flowid = le16_to_cpu(flowring_delete_resp->compl_hdr.flow_ring_id); 1270 flowid -= BRCMF_H2D_MSGRING_FLOWRING_IDSTART; 1271 status = le16_to_cpu(flowring_delete_resp->compl_hdr.status); 1272 1273 if (status) { 1274 bphy_err(drvr, "Flowring deletion failed, code %d\n", status); 1275 brcmf_flowring_delete(msgbuf->flow, flowid); 1276 return; 1277 } 1278 brcmf_dbg(MSGBUF, "Flowring %d Delete response status %d\n", flowid, 1279 status); 1280 1281 brcmf_msgbuf_remove_flowring(msgbuf, flowid); 1282 } 1283 1284 1285 static void brcmf_msgbuf_process_msgtype(struct brcmf_msgbuf *msgbuf, void *buf) 1286 { 1287 struct brcmf_pub *drvr = msgbuf->drvr; 1288 struct msgbuf_common_hdr *msg; 1289 1290 msg = (struct msgbuf_common_hdr *)buf; 1291 switch (msg->msgtype) { 1292 case MSGBUF_TYPE_GEN_STATUS: 1293 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_GEN_STATUS\n"); 1294 brcmf_msgbuf_process_gen_status(msgbuf, buf); 1295 break; 1296 case MSGBUF_TYPE_RING_STATUS: 1297 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_RING_STATUS\n"); 1298 brcmf_msgbuf_process_ring_status(msgbuf, buf); 1299 break; 1300 case MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT: 1301 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT\n"); 1302 brcmf_msgbuf_process_flow_ring_create_response(msgbuf, buf); 1303 break; 1304 case MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT: 1305 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT\n"); 1306 brcmf_msgbuf_process_flow_ring_delete_response(msgbuf, buf); 1307 break; 1308 case MSGBUF_TYPE_IOCTLPTR_REQ_ACK: 1309 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_IOCTLPTR_REQ_ACK\n"); 1310 break; 1311 case MSGBUF_TYPE_IOCTL_CMPLT: 1312 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_IOCTL_CMPLT\n"); 1313 brcmf_msgbuf_process_ioctl_complete(msgbuf, buf); 1314 break; 1315 case MSGBUF_TYPE_WL_EVENT: 1316 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_WL_EVENT\n"); 1317 brcmf_msgbuf_process_event(msgbuf, buf); 1318 break; 1319 case MSGBUF_TYPE_TX_STATUS: 1320 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_TX_STATUS\n"); 1321 brcmf_msgbuf_process_txstatus(msgbuf, buf); 1322 break; 1323 case MSGBUF_TYPE_RX_CMPLT: 1324 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_RX_CMPLT\n"); 1325 brcmf_msgbuf_process_rx_complete(msgbuf, buf); 1326 break; 1327 default: 1328 bphy_err(drvr, "Unsupported msgtype %d\n", msg->msgtype); 1329 break; 1330 } 1331 } 1332 1333 1334 static void brcmf_msgbuf_process_rx(struct brcmf_msgbuf *msgbuf, 1335 struct brcmf_commonring *commonring) 1336 { 1337 void *buf; 1338 u16 count; 1339 u16 processed; 1340 1341 again: 1342 buf = brcmf_commonring_get_read_ptr(commonring, &count); 1343 if (buf == NULL) 1344 return; 1345 1346 processed = 0; 1347 while (count) { 1348 brcmf_msgbuf_process_msgtype(msgbuf, 1349 buf + msgbuf->rx_dataoffset); 1350 buf += brcmf_commonring_len_item(commonring); 1351 processed++; 1352 if (processed == BRCMF_MSGBUF_UPDATE_RX_PTR_THRS) { 1353 brcmf_commonring_read_complete(commonring, processed); 1354 processed = 0; 1355 } 1356 count--; 1357 } 1358 if (processed) 1359 brcmf_commonring_read_complete(commonring, processed); 1360 1361 if (commonring->r_ptr == 0) 1362 goto again; 1363 } 1364 1365 1366 int brcmf_proto_msgbuf_rx_trigger(struct device *dev) 1367 { 1368 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 1369 struct brcmf_pub *drvr = bus_if->drvr; 1370 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 1371 struct brcmf_commonring *commonring; 1372 void *buf; 1373 u32 flowid; 1374 int qlen; 1375 1376 buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_RX_COMPLETE]; 1377 brcmf_msgbuf_process_rx(msgbuf, buf); 1378 buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_TX_COMPLETE]; 1379 brcmf_msgbuf_process_rx(msgbuf, buf); 1380 buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_CONTROL_COMPLETE]; 1381 brcmf_msgbuf_process_rx(msgbuf, buf); 1382 1383 for_each_set_bit(flowid, msgbuf->txstatus_done_map, 1384 msgbuf->max_flowrings) { 1385 clear_bit(flowid, msgbuf->txstatus_done_map); 1386 commonring = msgbuf->flowrings[flowid]; 1387 qlen = brcmf_flowring_qlen(msgbuf->flow, flowid); 1388 if ((qlen > BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) || 1389 ((qlen) && (atomic_read(&commonring->outstanding_tx) < 1390 BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS))) 1391 brcmf_msgbuf_schedule_txdata(msgbuf, flowid, true); 1392 } 1393 1394 return 0; 1395 } 1396 1397 1398 void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid) 1399 { 1400 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 1401 struct msgbuf_tx_flowring_delete_req *delete; 1402 struct brcmf_commonring *commonring; 1403 struct brcmf_commonring *commonring_del = msgbuf->flowrings[flowid]; 1404 struct brcmf_flowring *flow = msgbuf->flow; 1405 void *ret_ptr; 1406 u8 ifidx; 1407 int err; 1408 int retry = BRCMF_MAX_TXSTATUS_WAIT_RETRIES; 1409 1410 /* make sure it is not in txflow */ 1411 brcmf_commonring_lock(commonring_del); 1412 flow->rings[flowid]->status = RING_CLOSING; 1413 brcmf_commonring_unlock(commonring_del); 1414 1415 /* wait for commonring txflow finished */ 1416 while (retry && atomic_read(&commonring_del->outstanding_tx)) { 1417 usleep_range(5000, 10000); 1418 retry--; 1419 } 1420 if (!retry) { 1421 brcmf_err("timed out waiting for txstatus\n"); 1422 atomic_set(&commonring_del->outstanding_tx, 0); 1423 } 1424 1425 /* no need to submit if firmware can not be reached */ 1426 if (drvr->bus_if->state != BRCMF_BUS_UP) { 1427 brcmf_dbg(MSGBUF, "bus down, flowring will be removed\n"); 1428 brcmf_msgbuf_remove_flowring(msgbuf, flowid); 1429 return; 1430 } 1431 1432 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT]; 1433 brcmf_commonring_lock(commonring); 1434 ret_ptr = brcmf_commonring_reserve_for_write(commonring); 1435 if (!ret_ptr) { 1436 bphy_err(drvr, "FW unaware, flowring will be removed !!\n"); 1437 brcmf_commonring_unlock(commonring); 1438 brcmf_msgbuf_remove_flowring(msgbuf, flowid); 1439 return; 1440 } 1441 1442 delete = (struct msgbuf_tx_flowring_delete_req *)ret_ptr; 1443 1444 ifidx = brcmf_flowring_ifidx_get(msgbuf->flow, flowid); 1445 1446 delete->msg.msgtype = MSGBUF_TYPE_FLOW_RING_DELETE; 1447 delete->msg.ifidx = ifidx; 1448 delete->msg.request_id = 0; 1449 1450 delete->flow_ring_id = cpu_to_le16(flowid + 1451 BRCMF_H2D_MSGRING_FLOWRING_IDSTART); 1452 delete->reason = 0; 1453 1454 brcmf_dbg(MSGBUF, "Send Flow Delete Req flow ID %d, ifindex %d\n", 1455 flowid, ifidx); 1456 1457 err = brcmf_commonring_write_complete(commonring); 1458 brcmf_commonring_unlock(commonring); 1459 if (err) { 1460 bphy_err(drvr, "Failed to submit RING_DELETE, flowring will be removed\n"); 1461 brcmf_msgbuf_remove_flowring(msgbuf, flowid); 1462 } 1463 } 1464 1465 #ifdef DEBUG 1466 static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data) 1467 { 1468 struct brcmf_bus *bus_if = dev_get_drvdata(seq->private); 1469 struct brcmf_pub *drvr = bus_if->drvr; 1470 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 1471 struct brcmf_commonring *commonring; 1472 u16 i; 1473 struct brcmf_flowring_ring *ring; 1474 struct brcmf_flowring_hash *hash; 1475 1476 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT]; 1477 seq_printf(seq, "h2d_ctl_submit: rp %4u, wp %4u, depth %4u\n", 1478 commonring->r_ptr, commonring->w_ptr, commonring->depth); 1479 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_RXPOST_SUBMIT]; 1480 seq_printf(seq, "h2d_rx_submit: rp %4u, wp %4u, depth %4u\n", 1481 commonring->r_ptr, commonring->w_ptr, commonring->depth); 1482 commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_CONTROL_COMPLETE]; 1483 seq_printf(seq, "d2h_ctl_cmplt: rp %4u, wp %4u, depth %4u\n", 1484 commonring->r_ptr, commonring->w_ptr, commonring->depth); 1485 commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_TX_COMPLETE]; 1486 seq_printf(seq, "d2h_tx_cmplt: rp %4u, wp %4u, depth %4u\n", 1487 commonring->r_ptr, commonring->w_ptr, commonring->depth); 1488 commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_RX_COMPLETE]; 1489 seq_printf(seq, "d2h_rx_cmplt: rp %4u, wp %4u, depth %4u\n", 1490 commonring->r_ptr, commonring->w_ptr, commonring->depth); 1491 1492 seq_printf(seq, "\nh2d_flowrings: depth %u\n", 1493 BRCMF_H2D_TXFLOWRING_MAX_ITEM); 1494 seq_puts(seq, "Active flowrings:\n"); 1495 for (i = 0; i < msgbuf->flow->nrofrings; i++) { 1496 if (!msgbuf->flow->rings[i]) 1497 continue; 1498 ring = msgbuf->flow->rings[i]; 1499 if (ring->status != RING_OPEN) 1500 continue; 1501 commonring = msgbuf->flowrings[i]; 1502 hash = &msgbuf->flow->hash[ring->hash_id]; 1503 seq_printf(seq, "id %3u: rp %4u, wp %4u, qlen %4u, blocked %u\n" 1504 " ifidx %u, fifo %u, da %pM\n", 1505 i, commonring->r_ptr, commonring->w_ptr, 1506 skb_queue_len(&ring->skblist), ring->blocked, 1507 hash->ifidx, hash->fifo, hash->mac); 1508 } 1509 1510 return 0; 1511 } 1512 #else 1513 static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data) 1514 { 1515 return 0; 1516 } 1517 #endif 1518 1519 static void brcmf_msgbuf_debugfs_create(struct brcmf_pub *drvr) 1520 { 1521 brcmf_debugfs_add_entry(drvr, "msgbuf_stats", brcmf_msgbuf_stats_read); 1522 } 1523 1524 int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr) 1525 { 1526 struct brcmf_bus_msgbuf *if_msgbuf; 1527 struct brcmf_msgbuf *msgbuf; 1528 u64 address; 1529 u32 count; 1530 1531 if_msgbuf = drvr->bus_if->msgbuf; 1532 1533 if (if_msgbuf->max_flowrings >= BRCMF_FLOWRING_HASHSIZE) { 1534 bphy_err(drvr, "driver not configured for this many flowrings %d\n", 1535 if_msgbuf->max_flowrings); 1536 if_msgbuf->max_flowrings = BRCMF_FLOWRING_HASHSIZE - 1; 1537 } 1538 1539 msgbuf = kzalloc(sizeof(*msgbuf), GFP_KERNEL); 1540 if (!msgbuf) 1541 goto fail; 1542 1543 msgbuf->txflow_wq = create_singlethread_workqueue("msgbuf_txflow"); 1544 if (msgbuf->txflow_wq == NULL) { 1545 bphy_err(drvr, "workqueue creation failed\n"); 1546 goto fail; 1547 } 1548 INIT_WORK(&msgbuf->txflow_work, brcmf_msgbuf_txflow_worker); 1549 count = BITS_TO_LONGS(if_msgbuf->max_flowrings); 1550 count = count * sizeof(unsigned long); 1551 msgbuf->flow_map = kzalloc(count, GFP_KERNEL); 1552 if (!msgbuf->flow_map) 1553 goto fail; 1554 1555 msgbuf->txstatus_done_map = kzalloc(count, GFP_KERNEL); 1556 if (!msgbuf->txstatus_done_map) 1557 goto fail; 1558 1559 msgbuf->drvr = drvr; 1560 msgbuf->ioctbuf = dma_alloc_coherent(drvr->bus_if->dev, 1561 BRCMF_TX_IOCTL_MAX_MSG_SIZE, 1562 &msgbuf->ioctbuf_handle, 1563 GFP_KERNEL); 1564 if (!msgbuf->ioctbuf) 1565 goto fail; 1566 address = (u64)msgbuf->ioctbuf_handle; 1567 msgbuf->ioctbuf_phys_hi = address >> 32; 1568 msgbuf->ioctbuf_phys_lo = address & 0xffffffff; 1569 1570 drvr->proto->hdrpull = brcmf_msgbuf_hdrpull; 1571 drvr->proto->query_dcmd = brcmf_msgbuf_query_dcmd; 1572 drvr->proto->set_dcmd = brcmf_msgbuf_set_dcmd; 1573 drvr->proto->tx_queue_data = brcmf_msgbuf_tx_queue_data; 1574 drvr->proto->configure_addr_mode = brcmf_msgbuf_configure_addr_mode; 1575 drvr->proto->delete_peer = brcmf_msgbuf_delete_peer; 1576 drvr->proto->add_tdls_peer = brcmf_msgbuf_add_tdls_peer; 1577 drvr->proto->rxreorder = brcmf_msgbuf_rxreorder; 1578 drvr->proto->debugfs_create = brcmf_msgbuf_debugfs_create; 1579 drvr->proto->pd = msgbuf; 1580 1581 init_waitqueue_head(&msgbuf->ioctl_resp_wait); 1582 1583 msgbuf->commonrings = 1584 (struct brcmf_commonring **)if_msgbuf->commonrings; 1585 msgbuf->flowrings = (struct brcmf_commonring **)if_msgbuf->flowrings; 1586 msgbuf->max_flowrings = if_msgbuf->max_flowrings; 1587 msgbuf->flowring_dma_handle = 1588 kcalloc(msgbuf->max_flowrings, 1589 sizeof(*msgbuf->flowring_dma_handle), GFP_KERNEL); 1590 if (!msgbuf->flowring_dma_handle) 1591 goto fail; 1592 1593 msgbuf->rx_dataoffset = if_msgbuf->rx_dataoffset; 1594 msgbuf->max_rxbufpost = if_msgbuf->max_rxbufpost; 1595 1596 msgbuf->max_ioctlrespbuf = BRCMF_MSGBUF_MAX_IOCTLRESPBUF_POST; 1597 msgbuf->max_eventbuf = BRCMF_MSGBUF_MAX_EVENTBUF_POST; 1598 1599 msgbuf->tx_pktids = brcmf_msgbuf_init_pktids(NR_TX_PKTIDS, 1600 DMA_TO_DEVICE); 1601 if (!msgbuf->tx_pktids) 1602 goto fail; 1603 msgbuf->rx_pktids = brcmf_msgbuf_init_pktids(NR_RX_PKTIDS, 1604 DMA_FROM_DEVICE); 1605 if (!msgbuf->rx_pktids) 1606 goto fail; 1607 1608 msgbuf->flow = brcmf_flowring_attach(drvr->bus_if->dev, 1609 if_msgbuf->max_flowrings); 1610 if (!msgbuf->flow) 1611 goto fail; 1612 1613 1614 brcmf_dbg(MSGBUF, "Feeding buffers, rx data %d, rx event %d, rx ioctl resp %d\n", 1615 msgbuf->max_rxbufpost, msgbuf->max_eventbuf, 1616 msgbuf->max_ioctlrespbuf); 1617 count = 0; 1618 do { 1619 brcmf_msgbuf_rxbuf_data_fill(msgbuf); 1620 if (msgbuf->max_rxbufpost != msgbuf->rxbufpost) 1621 msleep(10); 1622 else 1623 break; 1624 count++; 1625 } while (count < 10); 1626 brcmf_msgbuf_rxbuf_event_post(msgbuf); 1627 brcmf_msgbuf_rxbuf_ioctlresp_post(msgbuf); 1628 1629 INIT_WORK(&msgbuf->flowring_work, brcmf_msgbuf_flowring_worker); 1630 spin_lock_init(&msgbuf->flowring_work_lock); 1631 INIT_LIST_HEAD(&msgbuf->work_queue); 1632 1633 return 0; 1634 1635 fail: 1636 if (msgbuf) { 1637 kfree(msgbuf->flow_map); 1638 kfree(msgbuf->txstatus_done_map); 1639 brcmf_msgbuf_release_pktids(msgbuf); 1640 kfree(msgbuf->flowring_dma_handle); 1641 if (msgbuf->ioctbuf) 1642 dma_free_coherent(drvr->bus_if->dev, 1643 BRCMF_TX_IOCTL_MAX_MSG_SIZE, 1644 msgbuf->ioctbuf, 1645 msgbuf->ioctbuf_handle); 1646 if (msgbuf->txflow_wq) 1647 destroy_workqueue(msgbuf->txflow_wq); 1648 kfree(msgbuf); 1649 } 1650 return -ENOMEM; 1651 } 1652 1653 1654 void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr) 1655 { 1656 struct brcmf_msgbuf *msgbuf; 1657 struct brcmf_msgbuf_work_item *work; 1658 1659 brcmf_dbg(TRACE, "Enter\n"); 1660 if (drvr->proto->pd) { 1661 msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 1662 cancel_work_sync(&msgbuf->flowring_work); 1663 while (!list_empty(&msgbuf->work_queue)) { 1664 work = list_first_entry(&msgbuf->work_queue, 1665 struct brcmf_msgbuf_work_item, 1666 queue); 1667 list_del(&work->queue); 1668 kfree(work); 1669 } 1670 kfree(msgbuf->flow_map); 1671 kfree(msgbuf->txstatus_done_map); 1672 if (msgbuf->txflow_wq) 1673 destroy_workqueue(msgbuf->txflow_wq); 1674 1675 brcmf_flowring_detach(msgbuf->flow); 1676 dma_free_coherent(drvr->bus_if->dev, 1677 BRCMF_TX_IOCTL_MAX_MSG_SIZE, 1678 msgbuf->ioctbuf, msgbuf->ioctbuf_handle); 1679 brcmf_msgbuf_release_pktids(msgbuf); 1680 kfree(msgbuf->flowring_dma_handle); 1681 kfree(msgbuf); 1682 drvr->proto->pd = NULL; 1683 } 1684 } 1685