1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (c) 2014 Broadcom Corporation 4 */ 5 6 /******************************************************************************* 7 * Communicates with the dongle by using dcmd codes. 8 * For certain dcmd codes, the dongle interprets string data from the host. 9 ******************************************************************************/ 10 11 #include <linux/types.h> 12 #include <linux/netdevice.h> 13 #include <linux/etherdevice.h> 14 15 #include <brcmu_utils.h> 16 #include <brcmu_wifi.h> 17 18 #include "core.h" 19 #include "debug.h" 20 #include "proto.h" 21 #include "msgbuf.h" 22 #include "commonring.h" 23 #include "flowring.h" 24 #include "bus.h" 25 #include "tracepoint.h" 26 27 28 #define MSGBUF_IOCTL_RESP_TIMEOUT msecs_to_jiffies(2000) 29 30 #define MSGBUF_TYPE_GEN_STATUS 0x1 31 #define MSGBUF_TYPE_RING_STATUS 0x2 32 #define MSGBUF_TYPE_FLOW_RING_CREATE 0x3 33 #define MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT 0x4 34 #define MSGBUF_TYPE_FLOW_RING_DELETE 0x5 35 #define MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT 0x6 36 #define MSGBUF_TYPE_FLOW_RING_FLUSH 0x7 37 #define MSGBUF_TYPE_FLOW_RING_FLUSH_CMPLT 0x8 38 #define MSGBUF_TYPE_IOCTLPTR_REQ 0x9 39 #define MSGBUF_TYPE_IOCTLPTR_REQ_ACK 0xA 40 #define MSGBUF_TYPE_IOCTLRESP_BUF_POST 0xB 41 #define MSGBUF_TYPE_IOCTL_CMPLT 0xC 42 #define MSGBUF_TYPE_EVENT_BUF_POST 0xD 43 #define MSGBUF_TYPE_WL_EVENT 0xE 44 #define MSGBUF_TYPE_TX_POST 0xF 45 #define MSGBUF_TYPE_TX_STATUS 0x10 46 #define MSGBUF_TYPE_RXBUF_POST 0x11 47 #define MSGBUF_TYPE_RX_CMPLT 0x12 48 #define MSGBUF_TYPE_LPBK_DMAXFER 0x13 49 #define MSGBUF_TYPE_LPBK_DMAXFER_CMPLT 0x14 50 51 #define NR_TX_PKTIDS 2048 52 #define NR_RX_PKTIDS 1024 53 54 #define BRCMF_IOCTL_REQ_PKTID 0xFFFE 55 56 #define BRCMF_MSGBUF_MAX_PKT_SIZE 2048 57 #define BRCMF_MSGBUF_RXBUFPOST_THRESHOLD 32 58 #define BRCMF_MSGBUF_MAX_IOCTLRESPBUF_POST 8 59 #define BRCMF_MSGBUF_MAX_EVENTBUF_POST 8 60 61 #define BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3 0x01 62 #define BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_11 0x02 63 #define BRCMF_MSGBUF_PKT_FLAGS_FRAME_MASK 0x07 64 #define BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT 5 65 66 #define BRCMF_MSGBUF_TX_FLUSH_CNT1 32 67 #define BRCMF_MSGBUF_TX_FLUSH_CNT2 96 68 69 #define BRCMF_MSGBUF_DELAY_TXWORKER_THRS 96 70 #define BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS 32 71 #define BRCMF_MSGBUF_UPDATE_RX_PTR_THRS 48 72 73 74 struct msgbuf_common_hdr { 75 u8 msgtype; 76 u8 ifidx; 77 u8 flags; 78 u8 rsvd0; 79 __le32 request_id; 80 }; 81 82 struct msgbuf_ioctl_req_hdr { 83 struct msgbuf_common_hdr msg; 84 __le32 cmd; 85 __le16 trans_id; 86 __le16 input_buf_len; 87 __le16 output_buf_len; 88 __le16 rsvd0[3]; 89 struct msgbuf_buf_addr req_buf_addr; 90 __le32 rsvd1[2]; 91 }; 92 93 struct msgbuf_tx_msghdr { 94 struct msgbuf_common_hdr msg; 95 u8 txhdr[ETH_HLEN]; 96 u8 flags; 97 u8 seg_cnt; 98 struct msgbuf_buf_addr metadata_buf_addr; 99 struct msgbuf_buf_addr data_buf_addr; 100 __le16 metadata_buf_len; 101 __le16 data_len; 102 __le32 rsvd0; 103 }; 104 105 struct msgbuf_rx_bufpost { 106 struct msgbuf_common_hdr msg; 107 __le16 metadata_buf_len; 108 __le16 data_buf_len; 109 __le32 rsvd0; 110 struct msgbuf_buf_addr metadata_buf_addr; 111 struct msgbuf_buf_addr data_buf_addr; 112 }; 113 114 struct msgbuf_rx_ioctl_resp_or_event { 115 struct msgbuf_common_hdr msg; 116 __le16 host_buf_len; 117 __le16 rsvd0[3]; 118 struct msgbuf_buf_addr host_buf_addr; 119 __le32 rsvd1[4]; 120 }; 121 122 struct msgbuf_completion_hdr { 123 __le16 status; 124 __le16 flow_ring_id; 125 }; 126 127 /* Data struct for the MSGBUF_TYPE_GEN_STATUS */ 128 struct msgbuf_gen_status { 129 struct msgbuf_common_hdr msg; 130 struct msgbuf_completion_hdr compl_hdr; 131 __le16 write_idx; 132 __le32 rsvd0[3]; 133 }; 134 135 /* Data struct for the MSGBUF_TYPE_RING_STATUS */ 136 struct msgbuf_ring_status { 137 struct msgbuf_common_hdr msg; 138 struct msgbuf_completion_hdr compl_hdr; 139 __le16 write_idx; 140 __le16 rsvd0[5]; 141 }; 142 143 struct msgbuf_rx_event { 144 struct msgbuf_common_hdr msg; 145 struct msgbuf_completion_hdr compl_hdr; 146 __le16 event_data_len; 147 __le16 seqnum; 148 __le16 rsvd0[4]; 149 }; 150 151 struct msgbuf_ioctl_resp_hdr { 152 struct msgbuf_common_hdr msg; 153 struct msgbuf_completion_hdr compl_hdr; 154 __le16 resp_len; 155 __le16 trans_id; 156 __le32 cmd; 157 __le32 rsvd0; 158 }; 159 160 struct msgbuf_tx_status { 161 struct msgbuf_common_hdr msg; 162 struct msgbuf_completion_hdr compl_hdr; 163 __le16 metadata_len; 164 __le16 tx_status; 165 }; 166 167 struct msgbuf_rx_complete { 168 struct msgbuf_common_hdr msg; 169 struct msgbuf_completion_hdr compl_hdr; 170 __le16 metadata_len; 171 __le16 data_len; 172 __le16 data_offset; 173 __le16 flags; 174 __le32 rx_status_0; 175 __le32 rx_status_1; 176 __le32 rsvd0; 177 }; 178 179 struct msgbuf_tx_flowring_create_req { 180 struct msgbuf_common_hdr msg; 181 u8 da[ETH_ALEN]; 182 u8 sa[ETH_ALEN]; 183 u8 tid; 184 u8 if_flags; 185 __le16 flow_ring_id; 186 u8 tc; 187 u8 priority; 188 __le16 int_vector; 189 __le16 max_items; 190 __le16 len_item; 191 struct msgbuf_buf_addr flow_ring_addr; 192 }; 193 194 struct msgbuf_tx_flowring_delete_req { 195 struct msgbuf_common_hdr msg; 196 __le16 flow_ring_id; 197 __le16 reason; 198 __le32 rsvd0[7]; 199 }; 200 201 struct msgbuf_flowring_create_resp { 202 struct msgbuf_common_hdr msg; 203 struct msgbuf_completion_hdr compl_hdr; 204 __le32 rsvd0[3]; 205 }; 206 207 struct msgbuf_flowring_delete_resp { 208 struct msgbuf_common_hdr msg; 209 struct msgbuf_completion_hdr compl_hdr; 210 __le32 rsvd0[3]; 211 }; 212 213 struct msgbuf_flowring_flush_resp { 214 struct msgbuf_common_hdr msg; 215 struct msgbuf_completion_hdr compl_hdr; 216 __le32 rsvd0[3]; 217 }; 218 219 struct brcmf_msgbuf_work_item { 220 struct list_head queue; 221 u32 flowid; 222 int ifidx; 223 u8 sa[ETH_ALEN]; 224 u8 da[ETH_ALEN]; 225 }; 226 227 struct brcmf_msgbuf { 228 struct brcmf_pub *drvr; 229 230 struct brcmf_commonring **commonrings; 231 struct brcmf_commonring **flowrings; 232 dma_addr_t *flowring_dma_handle; 233 234 u16 max_flowrings; 235 u16 max_submissionrings; 236 u16 max_completionrings; 237 238 u16 rx_dataoffset; 239 u32 max_rxbufpost; 240 u16 rx_metadata_offset; 241 u32 rxbufpost; 242 243 u32 max_ioctlrespbuf; 244 u32 cur_ioctlrespbuf; 245 u32 max_eventbuf; 246 u32 cur_eventbuf; 247 248 void *ioctbuf; 249 dma_addr_t ioctbuf_handle; 250 u32 ioctbuf_phys_hi; 251 u32 ioctbuf_phys_lo; 252 int ioctl_resp_status; 253 u32 ioctl_resp_ret_len; 254 u32 ioctl_resp_pktid; 255 256 u16 data_seq_no; 257 u16 ioctl_seq_no; 258 u32 reqid; 259 wait_queue_head_t ioctl_resp_wait; 260 bool ctl_completed; 261 262 struct brcmf_msgbuf_pktids *tx_pktids; 263 struct brcmf_msgbuf_pktids *rx_pktids; 264 struct brcmf_flowring *flow; 265 266 struct workqueue_struct *txflow_wq; 267 struct work_struct txflow_work; 268 unsigned long *flow_map; 269 unsigned long *txstatus_done_map; 270 271 struct work_struct flowring_work; 272 spinlock_t flowring_work_lock; 273 struct list_head work_queue; 274 }; 275 276 struct brcmf_msgbuf_pktid { 277 atomic_t allocated; 278 u16 data_offset; 279 struct sk_buff *skb; 280 dma_addr_t physaddr; 281 }; 282 283 struct brcmf_msgbuf_pktids { 284 u32 array_size; 285 u32 last_allocated_idx; 286 enum dma_data_direction direction; 287 struct brcmf_msgbuf_pktid *array; 288 }; 289 290 static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf); 291 292 293 static struct brcmf_msgbuf_pktids * 294 brcmf_msgbuf_init_pktids(u32 nr_array_entries, 295 enum dma_data_direction direction) 296 { 297 struct brcmf_msgbuf_pktid *array; 298 struct brcmf_msgbuf_pktids *pktids; 299 300 array = kcalloc(nr_array_entries, sizeof(*array), GFP_KERNEL); 301 if (!array) 302 return NULL; 303 304 pktids = kzalloc(sizeof(*pktids), GFP_KERNEL); 305 if (!pktids) { 306 kfree(array); 307 return NULL; 308 } 309 pktids->array = array; 310 pktids->array_size = nr_array_entries; 311 312 return pktids; 313 } 314 315 316 static int 317 brcmf_msgbuf_alloc_pktid(struct device *dev, 318 struct brcmf_msgbuf_pktids *pktids, 319 struct sk_buff *skb, u16 data_offset, 320 dma_addr_t *physaddr, u32 *idx) 321 { 322 struct brcmf_msgbuf_pktid *array; 323 u32 count; 324 325 array = pktids->array; 326 327 *physaddr = dma_map_single(dev, skb->data + data_offset, 328 skb->len - data_offset, pktids->direction); 329 330 if (dma_mapping_error(dev, *physaddr)) { 331 brcmf_err("dma_map_single failed !!\n"); 332 return -ENOMEM; 333 } 334 335 *idx = pktids->last_allocated_idx; 336 337 count = 0; 338 do { 339 (*idx)++; 340 if (*idx == pktids->array_size) 341 *idx = 0; 342 if (array[*idx].allocated.counter == 0) 343 if (atomic_cmpxchg(&array[*idx].allocated, 0, 1) == 0) 344 break; 345 count++; 346 } while (count < pktids->array_size); 347 348 if (count == pktids->array_size) 349 return -ENOMEM; 350 351 array[*idx].data_offset = data_offset; 352 array[*idx].physaddr = *physaddr; 353 array[*idx].skb = skb; 354 355 pktids->last_allocated_idx = *idx; 356 357 return 0; 358 } 359 360 361 static struct sk_buff * 362 brcmf_msgbuf_get_pktid(struct device *dev, struct brcmf_msgbuf_pktids *pktids, 363 u32 idx) 364 { 365 struct brcmf_msgbuf_pktid *pktid; 366 struct sk_buff *skb; 367 368 if (idx < 0 || idx >= pktids->array_size) { 369 brcmf_err("Invalid packet id %d (max %d)\n", idx, 370 pktids->array_size); 371 return NULL; 372 } 373 if (pktids->array[idx].allocated.counter) { 374 pktid = &pktids->array[idx]; 375 dma_unmap_single(dev, pktid->physaddr, 376 pktid->skb->len - pktid->data_offset, 377 pktids->direction); 378 skb = pktid->skb; 379 pktid->allocated.counter = 0; 380 return skb; 381 } else { 382 brcmf_err("Invalid packet id %d (not in use)\n", idx); 383 } 384 385 return NULL; 386 } 387 388 389 static void 390 brcmf_msgbuf_release_array(struct device *dev, 391 struct brcmf_msgbuf_pktids *pktids) 392 { 393 struct brcmf_msgbuf_pktid *array; 394 struct brcmf_msgbuf_pktid *pktid; 395 u32 count; 396 397 array = pktids->array; 398 count = 0; 399 do { 400 if (array[count].allocated.counter) { 401 pktid = &array[count]; 402 dma_unmap_single(dev, pktid->physaddr, 403 pktid->skb->len - pktid->data_offset, 404 pktids->direction); 405 brcmu_pkt_buf_free_skb(pktid->skb); 406 } 407 count++; 408 } while (count < pktids->array_size); 409 410 kfree(array); 411 kfree(pktids); 412 } 413 414 415 static void brcmf_msgbuf_release_pktids(struct brcmf_msgbuf *msgbuf) 416 { 417 if (msgbuf->rx_pktids) 418 brcmf_msgbuf_release_array(msgbuf->drvr->bus_if->dev, 419 msgbuf->rx_pktids); 420 if (msgbuf->tx_pktids) 421 brcmf_msgbuf_release_array(msgbuf->drvr->bus_if->dev, 422 msgbuf->tx_pktids); 423 } 424 425 426 static int brcmf_msgbuf_tx_ioctl(struct brcmf_pub *drvr, int ifidx, 427 uint cmd, void *buf, uint len) 428 { 429 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 430 struct brcmf_commonring *commonring; 431 struct msgbuf_ioctl_req_hdr *request; 432 u16 buf_len; 433 void *ret_ptr; 434 int err; 435 436 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT]; 437 brcmf_commonring_lock(commonring); 438 ret_ptr = brcmf_commonring_reserve_for_write(commonring); 439 if (!ret_ptr) { 440 bphy_err(drvr, "Failed to reserve space in commonring\n"); 441 brcmf_commonring_unlock(commonring); 442 return -ENOMEM; 443 } 444 445 msgbuf->reqid++; 446 447 request = (struct msgbuf_ioctl_req_hdr *)ret_ptr; 448 request->msg.msgtype = MSGBUF_TYPE_IOCTLPTR_REQ; 449 request->msg.ifidx = (u8)ifidx; 450 request->msg.flags = 0; 451 request->msg.request_id = cpu_to_le32(BRCMF_IOCTL_REQ_PKTID); 452 request->cmd = cpu_to_le32(cmd); 453 request->output_buf_len = cpu_to_le16(len); 454 request->trans_id = cpu_to_le16(msgbuf->reqid); 455 456 buf_len = min_t(u16, len, BRCMF_TX_IOCTL_MAX_MSG_SIZE); 457 request->input_buf_len = cpu_to_le16(buf_len); 458 request->req_buf_addr.high_addr = cpu_to_le32(msgbuf->ioctbuf_phys_hi); 459 request->req_buf_addr.low_addr = cpu_to_le32(msgbuf->ioctbuf_phys_lo); 460 if (buf) 461 memcpy(msgbuf->ioctbuf, buf, buf_len); 462 else 463 memset(msgbuf->ioctbuf, 0, buf_len); 464 465 err = brcmf_commonring_write_complete(commonring); 466 brcmf_commonring_unlock(commonring); 467 468 return err; 469 } 470 471 472 static int brcmf_msgbuf_ioctl_resp_wait(struct brcmf_msgbuf *msgbuf) 473 { 474 return wait_event_timeout(msgbuf->ioctl_resp_wait, 475 msgbuf->ctl_completed, 476 MSGBUF_IOCTL_RESP_TIMEOUT); 477 } 478 479 480 static void brcmf_msgbuf_ioctl_resp_wake(struct brcmf_msgbuf *msgbuf) 481 { 482 msgbuf->ctl_completed = true; 483 wake_up(&msgbuf->ioctl_resp_wait); 484 } 485 486 487 static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx, 488 uint cmd, void *buf, uint len, int *fwerr) 489 { 490 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 491 struct sk_buff *skb = NULL; 492 int timeout; 493 int err; 494 495 brcmf_dbg(MSGBUF, "ifidx=%d, cmd=%d, len=%d\n", ifidx, cmd, len); 496 *fwerr = 0; 497 msgbuf->ctl_completed = false; 498 err = brcmf_msgbuf_tx_ioctl(drvr, ifidx, cmd, buf, len); 499 if (err) 500 return err; 501 502 timeout = brcmf_msgbuf_ioctl_resp_wait(msgbuf); 503 if (!timeout) { 504 bphy_err(drvr, "Timeout on response for query command\n"); 505 return -EIO; 506 } 507 508 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, 509 msgbuf->rx_pktids, 510 msgbuf->ioctl_resp_pktid); 511 if (msgbuf->ioctl_resp_ret_len != 0) { 512 if (!skb) 513 return -EBADF; 514 515 memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ? 516 len : msgbuf->ioctl_resp_ret_len); 517 } 518 brcmu_pkt_buf_free_skb(skb); 519 520 *fwerr = msgbuf->ioctl_resp_status; 521 return 0; 522 } 523 524 525 static int brcmf_msgbuf_set_dcmd(struct brcmf_pub *drvr, int ifidx, 526 uint cmd, void *buf, uint len, int *fwerr) 527 { 528 return brcmf_msgbuf_query_dcmd(drvr, ifidx, cmd, buf, len, fwerr); 529 } 530 531 532 static int brcmf_msgbuf_hdrpull(struct brcmf_pub *drvr, bool do_fws, 533 struct sk_buff *skb, struct brcmf_if **ifp) 534 { 535 return -ENODEV; 536 } 537 538 static void brcmf_msgbuf_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb) 539 { 540 } 541 542 static void 543 brcmf_msgbuf_remove_flowring(struct brcmf_msgbuf *msgbuf, u16 flowid) 544 { 545 u32 dma_sz; 546 void *dma_buf; 547 548 brcmf_dbg(MSGBUF, "Removing flowring %d\n", flowid); 549 550 dma_sz = BRCMF_H2D_TXFLOWRING_MAX_ITEM * BRCMF_H2D_TXFLOWRING_ITEMSIZE; 551 dma_buf = msgbuf->flowrings[flowid]->buf_addr; 552 dma_free_coherent(msgbuf->drvr->bus_if->dev, dma_sz, dma_buf, 553 msgbuf->flowring_dma_handle[flowid]); 554 555 brcmf_flowring_delete(msgbuf->flow, flowid); 556 } 557 558 559 static struct brcmf_msgbuf_work_item * 560 brcmf_msgbuf_dequeue_work(struct brcmf_msgbuf *msgbuf) 561 { 562 struct brcmf_msgbuf_work_item *work = NULL; 563 ulong flags; 564 565 spin_lock_irqsave(&msgbuf->flowring_work_lock, flags); 566 if (!list_empty(&msgbuf->work_queue)) { 567 work = list_first_entry(&msgbuf->work_queue, 568 struct brcmf_msgbuf_work_item, queue); 569 list_del(&work->queue); 570 } 571 spin_unlock_irqrestore(&msgbuf->flowring_work_lock, flags); 572 573 return work; 574 } 575 576 577 static u32 578 brcmf_msgbuf_flowring_create_worker(struct brcmf_msgbuf *msgbuf, 579 struct brcmf_msgbuf_work_item *work) 580 { 581 struct brcmf_pub *drvr = msgbuf->drvr; 582 struct msgbuf_tx_flowring_create_req *create; 583 struct brcmf_commonring *commonring; 584 void *ret_ptr; 585 u32 flowid; 586 void *dma_buf; 587 u32 dma_sz; 588 u64 address; 589 int err; 590 591 flowid = work->flowid; 592 dma_sz = BRCMF_H2D_TXFLOWRING_MAX_ITEM * BRCMF_H2D_TXFLOWRING_ITEMSIZE; 593 dma_buf = dma_alloc_coherent(msgbuf->drvr->bus_if->dev, dma_sz, 594 &msgbuf->flowring_dma_handle[flowid], 595 GFP_KERNEL); 596 if (!dma_buf) { 597 bphy_err(drvr, "dma_alloc_coherent failed\n"); 598 brcmf_flowring_delete(msgbuf->flow, flowid); 599 return BRCMF_FLOWRING_INVALID_ID; 600 } 601 602 brcmf_commonring_config(msgbuf->flowrings[flowid], 603 BRCMF_H2D_TXFLOWRING_MAX_ITEM, 604 BRCMF_H2D_TXFLOWRING_ITEMSIZE, dma_buf); 605 606 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT]; 607 brcmf_commonring_lock(commonring); 608 ret_ptr = brcmf_commonring_reserve_for_write(commonring); 609 if (!ret_ptr) { 610 bphy_err(drvr, "Failed to reserve space in commonring\n"); 611 brcmf_commonring_unlock(commonring); 612 brcmf_msgbuf_remove_flowring(msgbuf, flowid); 613 return BRCMF_FLOWRING_INVALID_ID; 614 } 615 616 create = (struct msgbuf_tx_flowring_create_req *)ret_ptr; 617 create->msg.msgtype = MSGBUF_TYPE_FLOW_RING_CREATE; 618 create->msg.ifidx = work->ifidx; 619 create->msg.request_id = 0; 620 create->tid = brcmf_flowring_tid(msgbuf->flow, flowid); 621 create->flow_ring_id = cpu_to_le16(flowid + 622 BRCMF_H2D_MSGRING_FLOWRING_IDSTART); 623 memcpy(create->sa, work->sa, ETH_ALEN); 624 memcpy(create->da, work->da, ETH_ALEN); 625 address = (u64)msgbuf->flowring_dma_handle[flowid]; 626 create->flow_ring_addr.high_addr = cpu_to_le32(address >> 32); 627 create->flow_ring_addr.low_addr = cpu_to_le32(address & 0xffffffff); 628 create->max_items = cpu_to_le16(BRCMF_H2D_TXFLOWRING_MAX_ITEM); 629 create->len_item = cpu_to_le16(BRCMF_H2D_TXFLOWRING_ITEMSIZE); 630 631 brcmf_dbg(MSGBUF, "Send Flow Create Req flow ID %d for peer %pM prio %d ifindex %d\n", 632 flowid, work->da, create->tid, work->ifidx); 633 634 err = brcmf_commonring_write_complete(commonring); 635 brcmf_commonring_unlock(commonring); 636 if (err) { 637 bphy_err(drvr, "Failed to write commonring\n"); 638 brcmf_msgbuf_remove_flowring(msgbuf, flowid); 639 return BRCMF_FLOWRING_INVALID_ID; 640 } 641 642 return flowid; 643 } 644 645 646 static void brcmf_msgbuf_flowring_worker(struct work_struct *work) 647 { 648 struct brcmf_msgbuf *msgbuf; 649 struct brcmf_msgbuf_work_item *create; 650 651 msgbuf = container_of(work, struct brcmf_msgbuf, flowring_work); 652 653 while ((create = brcmf_msgbuf_dequeue_work(msgbuf))) { 654 brcmf_msgbuf_flowring_create_worker(msgbuf, create); 655 kfree(create); 656 } 657 } 658 659 660 static u32 brcmf_msgbuf_flowring_create(struct brcmf_msgbuf *msgbuf, int ifidx, 661 struct sk_buff *skb) 662 { 663 struct brcmf_msgbuf_work_item *create; 664 struct ethhdr *eh = (struct ethhdr *)(skb->data); 665 u32 flowid; 666 ulong flags; 667 668 create = kzalloc(sizeof(*create), GFP_ATOMIC); 669 if (create == NULL) 670 return BRCMF_FLOWRING_INVALID_ID; 671 672 flowid = brcmf_flowring_create(msgbuf->flow, eh->h_dest, 673 skb->priority, ifidx); 674 if (flowid == BRCMF_FLOWRING_INVALID_ID) { 675 kfree(create); 676 return flowid; 677 } 678 679 create->flowid = flowid; 680 create->ifidx = ifidx; 681 memcpy(create->sa, eh->h_source, ETH_ALEN); 682 memcpy(create->da, eh->h_dest, ETH_ALEN); 683 684 spin_lock_irqsave(&msgbuf->flowring_work_lock, flags); 685 list_add_tail(&create->queue, &msgbuf->work_queue); 686 spin_unlock_irqrestore(&msgbuf->flowring_work_lock, flags); 687 schedule_work(&msgbuf->flowring_work); 688 689 return flowid; 690 } 691 692 693 static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u16 flowid) 694 { 695 struct brcmf_flowring *flow = msgbuf->flow; 696 struct brcmf_pub *drvr = msgbuf->drvr; 697 struct brcmf_commonring *commonring; 698 void *ret_ptr; 699 u32 count; 700 struct sk_buff *skb; 701 dma_addr_t physaddr; 702 u32 pktid; 703 struct msgbuf_tx_msghdr *tx_msghdr; 704 u64 address; 705 706 commonring = msgbuf->flowrings[flowid]; 707 if (!brcmf_commonring_write_available(commonring)) 708 return; 709 710 brcmf_commonring_lock(commonring); 711 712 count = BRCMF_MSGBUF_TX_FLUSH_CNT2 - BRCMF_MSGBUF_TX_FLUSH_CNT1; 713 while (brcmf_flowring_qlen(flow, flowid)) { 714 skb = brcmf_flowring_dequeue(flow, flowid); 715 if (skb == NULL) { 716 bphy_err(drvr, "No SKB, but qlen %d\n", 717 brcmf_flowring_qlen(flow, flowid)); 718 break; 719 } 720 skb_orphan(skb); 721 if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev, 722 msgbuf->tx_pktids, skb, ETH_HLEN, 723 &physaddr, &pktid)) { 724 brcmf_flowring_reinsert(flow, flowid, skb); 725 bphy_err(drvr, "No PKTID available !!\n"); 726 break; 727 } 728 ret_ptr = brcmf_commonring_reserve_for_write(commonring); 729 if (!ret_ptr) { 730 brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, 731 msgbuf->tx_pktids, pktid); 732 brcmf_flowring_reinsert(flow, flowid, skb); 733 break; 734 } 735 count++; 736 737 tx_msghdr = (struct msgbuf_tx_msghdr *)ret_ptr; 738 739 tx_msghdr->msg.msgtype = MSGBUF_TYPE_TX_POST; 740 tx_msghdr->msg.request_id = cpu_to_le32(pktid + 1); 741 tx_msghdr->msg.ifidx = brcmf_flowring_ifidx_get(flow, flowid); 742 tx_msghdr->flags = BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3; 743 tx_msghdr->flags |= (skb->priority & 0x07) << 744 BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT; 745 tx_msghdr->seg_cnt = 1; 746 memcpy(tx_msghdr->txhdr, skb->data, ETH_HLEN); 747 tx_msghdr->data_len = cpu_to_le16(skb->len - ETH_HLEN); 748 address = (u64)physaddr; 749 tx_msghdr->data_buf_addr.high_addr = cpu_to_le32(address >> 32); 750 tx_msghdr->data_buf_addr.low_addr = 751 cpu_to_le32(address & 0xffffffff); 752 tx_msghdr->metadata_buf_len = 0; 753 tx_msghdr->metadata_buf_addr.high_addr = 0; 754 tx_msghdr->metadata_buf_addr.low_addr = 0; 755 atomic_inc(&commonring->outstanding_tx); 756 if (count >= BRCMF_MSGBUF_TX_FLUSH_CNT2) { 757 brcmf_commonring_write_complete(commonring); 758 count = 0; 759 } 760 } 761 if (count) 762 brcmf_commonring_write_complete(commonring); 763 brcmf_commonring_unlock(commonring); 764 } 765 766 767 static void brcmf_msgbuf_txflow_worker(struct work_struct *worker) 768 { 769 struct brcmf_msgbuf *msgbuf; 770 u32 flowid; 771 772 msgbuf = container_of(worker, struct brcmf_msgbuf, txflow_work); 773 for_each_set_bit(flowid, msgbuf->flow_map, msgbuf->max_flowrings) { 774 clear_bit(flowid, msgbuf->flow_map); 775 brcmf_msgbuf_txflow(msgbuf, flowid); 776 } 777 } 778 779 780 static int brcmf_msgbuf_schedule_txdata(struct brcmf_msgbuf *msgbuf, u32 flowid, 781 bool force) 782 { 783 struct brcmf_commonring *commonring; 784 785 set_bit(flowid, msgbuf->flow_map); 786 commonring = msgbuf->flowrings[flowid]; 787 if ((force) || (atomic_read(&commonring->outstanding_tx) < 788 BRCMF_MSGBUF_DELAY_TXWORKER_THRS)) 789 queue_work(msgbuf->txflow_wq, &msgbuf->txflow_work); 790 791 return 0; 792 } 793 794 795 static int brcmf_msgbuf_tx_queue_data(struct brcmf_pub *drvr, int ifidx, 796 struct sk_buff *skb) 797 { 798 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 799 struct brcmf_flowring *flow = msgbuf->flow; 800 struct ethhdr *eh = (struct ethhdr *)(skb->data); 801 u32 flowid; 802 u32 queue_count; 803 bool force; 804 805 flowid = brcmf_flowring_lookup(flow, eh->h_dest, skb->priority, ifidx); 806 if (flowid == BRCMF_FLOWRING_INVALID_ID) { 807 flowid = brcmf_msgbuf_flowring_create(msgbuf, ifidx, skb); 808 if (flowid == BRCMF_FLOWRING_INVALID_ID) 809 return -ENOMEM; 810 } 811 queue_count = brcmf_flowring_enqueue(flow, flowid, skb); 812 force = ((queue_count % BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) == 0); 813 brcmf_msgbuf_schedule_txdata(msgbuf, flowid, force); 814 815 return 0; 816 } 817 818 819 static void 820 brcmf_msgbuf_configure_addr_mode(struct brcmf_pub *drvr, int ifidx, 821 enum proto_addr_mode addr_mode) 822 { 823 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 824 825 brcmf_flowring_configure_addr_mode(msgbuf->flow, ifidx, addr_mode); 826 } 827 828 829 static void 830 brcmf_msgbuf_delete_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN]) 831 { 832 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 833 834 brcmf_flowring_delete_peer(msgbuf->flow, ifidx, peer); 835 } 836 837 838 static void 839 brcmf_msgbuf_add_tdls_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN]) 840 { 841 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 842 843 brcmf_flowring_add_tdls_peer(msgbuf->flow, ifidx, peer); 844 } 845 846 847 static void 848 brcmf_msgbuf_process_ioctl_complete(struct brcmf_msgbuf *msgbuf, void *buf) 849 { 850 struct msgbuf_ioctl_resp_hdr *ioctl_resp; 851 852 ioctl_resp = (struct msgbuf_ioctl_resp_hdr *)buf; 853 854 msgbuf->ioctl_resp_status = 855 (s16)le16_to_cpu(ioctl_resp->compl_hdr.status); 856 msgbuf->ioctl_resp_ret_len = le16_to_cpu(ioctl_resp->resp_len); 857 msgbuf->ioctl_resp_pktid = le32_to_cpu(ioctl_resp->msg.request_id); 858 859 brcmf_msgbuf_ioctl_resp_wake(msgbuf); 860 861 if (msgbuf->cur_ioctlrespbuf) 862 msgbuf->cur_ioctlrespbuf--; 863 brcmf_msgbuf_rxbuf_ioctlresp_post(msgbuf); 864 } 865 866 867 static void 868 brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf) 869 { 870 struct brcmf_commonring *commonring; 871 struct msgbuf_tx_status *tx_status; 872 u32 idx; 873 struct sk_buff *skb; 874 u16 flowid; 875 876 tx_status = (struct msgbuf_tx_status *)buf; 877 idx = le32_to_cpu(tx_status->msg.request_id) - 1; 878 flowid = le16_to_cpu(tx_status->compl_hdr.flow_ring_id); 879 flowid -= BRCMF_H2D_MSGRING_FLOWRING_IDSTART; 880 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, 881 msgbuf->tx_pktids, idx); 882 if (!skb) 883 return; 884 885 set_bit(flowid, msgbuf->txstatus_done_map); 886 commonring = msgbuf->flowrings[flowid]; 887 atomic_dec(&commonring->outstanding_tx); 888 889 brcmf_txfinalize(brcmf_get_ifp(msgbuf->drvr, tx_status->msg.ifidx), 890 skb, true); 891 } 892 893 894 static u32 brcmf_msgbuf_rxbuf_data_post(struct brcmf_msgbuf *msgbuf, u32 count) 895 { 896 struct brcmf_pub *drvr = msgbuf->drvr; 897 struct brcmf_commonring *commonring; 898 void *ret_ptr; 899 struct sk_buff *skb; 900 u16 alloced; 901 u32 pktlen; 902 dma_addr_t physaddr; 903 struct msgbuf_rx_bufpost *rx_bufpost; 904 u64 address; 905 u32 pktid; 906 u32 i; 907 908 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_RXPOST_SUBMIT]; 909 ret_ptr = brcmf_commonring_reserve_for_write_multiple(commonring, 910 count, 911 &alloced); 912 if (!ret_ptr) { 913 brcmf_dbg(MSGBUF, "Failed to reserve space in commonring\n"); 914 return 0; 915 } 916 917 for (i = 0; i < alloced; i++) { 918 rx_bufpost = (struct msgbuf_rx_bufpost *)ret_ptr; 919 memset(rx_bufpost, 0, sizeof(*rx_bufpost)); 920 921 skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_PKT_SIZE); 922 923 if (skb == NULL) { 924 bphy_err(drvr, "Failed to alloc SKB\n"); 925 brcmf_commonring_write_cancel(commonring, alloced - i); 926 break; 927 } 928 929 pktlen = skb->len; 930 if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev, 931 msgbuf->rx_pktids, skb, 0, 932 &physaddr, &pktid)) { 933 dev_kfree_skb_any(skb); 934 bphy_err(drvr, "No PKTID available !!\n"); 935 brcmf_commonring_write_cancel(commonring, alloced - i); 936 break; 937 } 938 939 if (msgbuf->rx_metadata_offset) { 940 address = (u64)physaddr; 941 rx_bufpost->metadata_buf_len = 942 cpu_to_le16(msgbuf->rx_metadata_offset); 943 rx_bufpost->metadata_buf_addr.high_addr = 944 cpu_to_le32(address >> 32); 945 rx_bufpost->metadata_buf_addr.low_addr = 946 cpu_to_le32(address & 0xffffffff); 947 948 skb_pull(skb, msgbuf->rx_metadata_offset); 949 pktlen = skb->len; 950 physaddr += msgbuf->rx_metadata_offset; 951 } 952 rx_bufpost->msg.msgtype = MSGBUF_TYPE_RXBUF_POST; 953 rx_bufpost->msg.request_id = cpu_to_le32(pktid); 954 955 address = (u64)physaddr; 956 rx_bufpost->data_buf_len = cpu_to_le16((u16)pktlen); 957 rx_bufpost->data_buf_addr.high_addr = 958 cpu_to_le32(address >> 32); 959 rx_bufpost->data_buf_addr.low_addr = 960 cpu_to_le32(address & 0xffffffff); 961 962 ret_ptr += brcmf_commonring_len_item(commonring); 963 } 964 965 if (i) 966 brcmf_commonring_write_complete(commonring); 967 968 return i; 969 } 970 971 972 static void 973 brcmf_msgbuf_rxbuf_data_fill(struct brcmf_msgbuf *msgbuf) 974 { 975 u32 fillbufs; 976 u32 retcount; 977 978 fillbufs = msgbuf->max_rxbufpost - msgbuf->rxbufpost; 979 980 while (fillbufs) { 981 retcount = brcmf_msgbuf_rxbuf_data_post(msgbuf, fillbufs); 982 if (!retcount) 983 break; 984 msgbuf->rxbufpost += retcount; 985 fillbufs -= retcount; 986 } 987 } 988 989 990 static void 991 brcmf_msgbuf_update_rxbufpost_count(struct brcmf_msgbuf *msgbuf, u16 rxcnt) 992 { 993 msgbuf->rxbufpost -= rxcnt; 994 if (msgbuf->rxbufpost <= (msgbuf->max_rxbufpost - 995 BRCMF_MSGBUF_RXBUFPOST_THRESHOLD)) 996 brcmf_msgbuf_rxbuf_data_fill(msgbuf); 997 } 998 999 1000 static u32 1001 brcmf_msgbuf_rxbuf_ctrl_post(struct brcmf_msgbuf *msgbuf, bool event_buf, 1002 u32 count) 1003 { 1004 struct brcmf_pub *drvr = msgbuf->drvr; 1005 struct brcmf_commonring *commonring; 1006 void *ret_ptr; 1007 struct sk_buff *skb; 1008 u16 alloced; 1009 u32 pktlen; 1010 dma_addr_t physaddr; 1011 struct msgbuf_rx_ioctl_resp_or_event *rx_bufpost; 1012 u64 address; 1013 u32 pktid; 1014 u32 i; 1015 1016 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT]; 1017 brcmf_commonring_lock(commonring); 1018 ret_ptr = brcmf_commonring_reserve_for_write_multiple(commonring, 1019 count, 1020 &alloced); 1021 if (!ret_ptr) { 1022 bphy_err(drvr, "Failed to reserve space in commonring\n"); 1023 brcmf_commonring_unlock(commonring); 1024 return 0; 1025 } 1026 1027 for (i = 0; i < alloced; i++) { 1028 rx_bufpost = (struct msgbuf_rx_ioctl_resp_or_event *)ret_ptr; 1029 memset(rx_bufpost, 0, sizeof(*rx_bufpost)); 1030 1031 skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_PKT_SIZE); 1032 1033 if (skb == NULL) { 1034 bphy_err(drvr, "Failed to alloc SKB\n"); 1035 brcmf_commonring_write_cancel(commonring, alloced - i); 1036 break; 1037 } 1038 1039 pktlen = skb->len; 1040 if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev, 1041 msgbuf->rx_pktids, skb, 0, 1042 &physaddr, &pktid)) { 1043 dev_kfree_skb_any(skb); 1044 bphy_err(drvr, "No PKTID available !!\n"); 1045 brcmf_commonring_write_cancel(commonring, alloced - i); 1046 break; 1047 } 1048 if (event_buf) 1049 rx_bufpost->msg.msgtype = MSGBUF_TYPE_EVENT_BUF_POST; 1050 else 1051 rx_bufpost->msg.msgtype = 1052 MSGBUF_TYPE_IOCTLRESP_BUF_POST; 1053 rx_bufpost->msg.request_id = cpu_to_le32(pktid); 1054 1055 address = (u64)physaddr; 1056 rx_bufpost->host_buf_len = cpu_to_le16((u16)pktlen); 1057 rx_bufpost->host_buf_addr.high_addr = 1058 cpu_to_le32(address >> 32); 1059 rx_bufpost->host_buf_addr.low_addr = 1060 cpu_to_le32(address & 0xffffffff); 1061 1062 ret_ptr += brcmf_commonring_len_item(commonring); 1063 } 1064 1065 if (i) 1066 brcmf_commonring_write_complete(commonring); 1067 1068 brcmf_commonring_unlock(commonring); 1069 1070 return i; 1071 } 1072 1073 1074 static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf) 1075 { 1076 u32 count; 1077 1078 count = msgbuf->max_ioctlrespbuf - msgbuf->cur_ioctlrespbuf; 1079 count = brcmf_msgbuf_rxbuf_ctrl_post(msgbuf, false, count); 1080 msgbuf->cur_ioctlrespbuf += count; 1081 } 1082 1083 1084 static void brcmf_msgbuf_rxbuf_event_post(struct brcmf_msgbuf *msgbuf) 1085 { 1086 u32 count; 1087 1088 count = msgbuf->max_eventbuf - msgbuf->cur_eventbuf; 1089 count = brcmf_msgbuf_rxbuf_ctrl_post(msgbuf, true, count); 1090 msgbuf->cur_eventbuf += count; 1091 } 1092 1093 1094 static void brcmf_msgbuf_process_event(struct brcmf_msgbuf *msgbuf, void *buf) 1095 { 1096 struct brcmf_pub *drvr = msgbuf->drvr; 1097 struct msgbuf_rx_event *event; 1098 u32 idx; 1099 u16 buflen; 1100 struct sk_buff *skb; 1101 struct brcmf_if *ifp; 1102 1103 event = (struct msgbuf_rx_event *)buf; 1104 idx = le32_to_cpu(event->msg.request_id); 1105 buflen = le16_to_cpu(event->event_data_len); 1106 1107 if (msgbuf->cur_eventbuf) 1108 msgbuf->cur_eventbuf--; 1109 brcmf_msgbuf_rxbuf_event_post(msgbuf); 1110 1111 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, 1112 msgbuf->rx_pktids, idx); 1113 if (!skb) 1114 return; 1115 1116 if (msgbuf->rx_dataoffset) 1117 skb_pull(skb, msgbuf->rx_dataoffset); 1118 1119 skb_trim(skb, buflen); 1120 1121 ifp = brcmf_get_ifp(msgbuf->drvr, event->msg.ifidx); 1122 if (!ifp || !ifp->ndev) { 1123 bphy_err(drvr, "Received pkt for invalid ifidx %d\n", 1124 event->msg.ifidx); 1125 goto exit; 1126 } 1127 1128 skb->protocol = eth_type_trans(skb, ifp->ndev); 1129 1130 brcmf_fweh_process_skb(ifp->drvr, skb, 0); 1131 1132 exit: 1133 brcmu_pkt_buf_free_skb(skb); 1134 } 1135 1136 1137 static void 1138 brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf) 1139 { 1140 struct brcmf_pub *drvr = msgbuf->drvr; 1141 struct msgbuf_rx_complete *rx_complete; 1142 struct sk_buff *skb; 1143 u16 data_offset; 1144 u16 buflen; 1145 u16 flags; 1146 u32 idx; 1147 struct brcmf_if *ifp; 1148 1149 brcmf_msgbuf_update_rxbufpost_count(msgbuf, 1); 1150 1151 rx_complete = (struct msgbuf_rx_complete *)buf; 1152 data_offset = le16_to_cpu(rx_complete->data_offset); 1153 buflen = le16_to_cpu(rx_complete->data_len); 1154 idx = le32_to_cpu(rx_complete->msg.request_id); 1155 flags = le16_to_cpu(rx_complete->flags); 1156 1157 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, 1158 msgbuf->rx_pktids, idx); 1159 if (!skb) 1160 return; 1161 1162 if (data_offset) 1163 skb_pull(skb, data_offset); 1164 else if (msgbuf->rx_dataoffset) 1165 skb_pull(skb, msgbuf->rx_dataoffset); 1166 1167 skb_trim(skb, buflen); 1168 1169 if ((flags & BRCMF_MSGBUF_PKT_FLAGS_FRAME_MASK) == 1170 BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_11) { 1171 ifp = msgbuf->drvr->mon_if; 1172 1173 if (!ifp) { 1174 bphy_err(drvr, "Received unexpected monitor pkt\n"); 1175 brcmu_pkt_buf_free_skb(skb); 1176 return; 1177 } 1178 1179 brcmf_netif_mon_rx(ifp, skb); 1180 return; 1181 } 1182 1183 ifp = brcmf_get_ifp(msgbuf->drvr, rx_complete->msg.ifidx); 1184 if (!ifp || !ifp->ndev) { 1185 bphy_err(drvr, "Received pkt for invalid ifidx %d\n", 1186 rx_complete->msg.ifidx); 1187 brcmu_pkt_buf_free_skb(skb); 1188 return; 1189 } 1190 1191 skb->protocol = eth_type_trans(skb, ifp->ndev); 1192 brcmf_netif_rx(ifp, skb); 1193 } 1194 1195 static void brcmf_msgbuf_process_gen_status(struct brcmf_msgbuf *msgbuf, 1196 void *buf) 1197 { 1198 struct msgbuf_gen_status *gen_status = buf; 1199 struct brcmf_pub *drvr = msgbuf->drvr; 1200 int err; 1201 1202 err = le16_to_cpu(gen_status->compl_hdr.status); 1203 if (err) 1204 bphy_err(drvr, "Firmware reported general error: %d\n", err); 1205 } 1206 1207 static void brcmf_msgbuf_process_ring_status(struct brcmf_msgbuf *msgbuf, 1208 void *buf) 1209 { 1210 struct msgbuf_ring_status *ring_status = buf; 1211 struct brcmf_pub *drvr = msgbuf->drvr; 1212 int err; 1213 1214 err = le16_to_cpu(ring_status->compl_hdr.status); 1215 if (err) { 1216 int ring = le16_to_cpu(ring_status->compl_hdr.flow_ring_id); 1217 1218 bphy_err(drvr, "Firmware reported ring %d error: %d\n", ring, 1219 err); 1220 } 1221 } 1222 1223 static void 1224 brcmf_msgbuf_process_flow_ring_create_response(struct brcmf_msgbuf *msgbuf, 1225 void *buf) 1226 { 1227 struct brcmf_pub *drvr = msgbuf->drvr; 1228 struct msgbuf_flowring_create_resp *flowring_create_resp; 1229 u16 status; 1230 u16 flowid; 1231 1232 flowring_create_resp = (struct msgbuf_flowring_create_resp *)buf; 1233 1234 flowid = le16_to_cpu(flowring_create_resp->compl_hdr.flow_ring_id); 1235 flowid -= BRCMF_H2D_MSGRING_FLOWRING_IDSTART; 1236 status = le16_to_cpu(flowring_create_resp->compl_hdr.status); 1237 1238 if (status) { 1239 bphy_err(drvr, "Flowring creation failed, code %d\n", status); 1240 brcmf_msgbuf_remove_flowring(msgbuf, flowid); 1241 return; 1242 } 1243 brcmf_dbg(MSGBUF, "Flowring %d Create response status %d\n", flowid, 1244 status); 1245 1246 brcmf_flowring_open(msgbuf->flow, flowid); 1247 1248 brcmf_msgbuf_schedule_txdata(msgbuf, flowid, true); 1249 } 1250 1251 1252 static void 1253 brcmf_msgbuf_process_flow_ring_delete_response(struct brcmf_msgbuf *msgbuf, 1254 void *buf) 1255 { 1256 struct brcmf_pub *drvr = msgbuf->drvr; 1257 struct msgbuf_flowring_delete_resp *flowring_delete_resp; 1258 u16 status; 1259 u16 flowid; 1260 1261 flowring_delete_resp = (struct msgbuf_flowring_delete_resp *)buf; 1262 1263 flowid = le16_to_cpu(flowring_delete_resp->compl_hdr.flow_ring_id); 1264 flowid -= BRCMF_H2D_MSGRING_FLOWRING_IDSTART; 1265 status = le16_to_cpu(flowring_delete_resp->compl_hdr.status); 1266 1267 if (status) { 1268 bphy_err(drvr, "Flowring deletion failed, code %d\n", status); 1269 brcmf_flowring_delete(msgbuf->flow, flowid); 1270 return; 1271 } 1272 brcmf_dbg(MSGBUF, "Flowring %d Delete response status %d\n", flowid, 1273 status); 1274 1275 brcmf_msgbuf_remove_flowring(msgbuf, flowid); 1276 } 1277 1278 1279 static void brcmf_msgbuf_process_msgtype(struct brcmf_msgbuf *msgbuf, void *buf) 1280 { 1281 struct brcmf_pub *drvr = msgbuf->drvr; 1282 struct msgbuf_common_hdr *msg; 1283 1284 msg = (struct msgbuf_common_hdr *)buf; 1285 switch (msg->msgtype) { 1286 case MSGBUF_TYPE_GEN_STATUS: 1287 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_GEN_STATUS\n"); 1288 brcmf_msgbuf_process_gen_status(msgbuf, buf); 1289 break; 1290 case MSGBUF_TYPE_RING_STATUS: 1291 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_RING_STATUS\n"); 1292 brcmf_msgbuf_process_ring_status(msgbuf, buf); 1293 break; 1294 case MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT: 1295 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT\n"); 1296 brcmf_msgbuf_process_flow_ring_create_response(msgbuf, buf); 1297 break; 1298 case MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT: 1299 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT\n"); 1300 brcmf_msgbuf_process_flow_ring_delete_response(msgbuf, buf); 1301 break; 1302 case MSGBUF_TYPE_IOCTLPTR_REQ_ACK: 1303 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_IOCTLPTR_REQ_ACK\n"); 1304 break; 1305 case MSGBUF_TYPE_IOCTL_CMPLT: 1306 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_IOCTL_CMPLT\n"); 1307 brcmf_msgbuf_process_ioctl_complete(msgbuf, buf); 1308 break; 1309 case MSGBUF_TYPE_WL_EVENT: 1310 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_WL_EVENT\n"); 1311 brcmf_msgbuf_process_event(msgbuf, buf); 1312 break; 1313 case MSGBUF_TYPE_TX_STATUS: 1314 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_TX_STATUS\n"); 1315 brcmf_msgbuf_process_txstatus(msgbuf, buf); 1316 break; 1317 case MSGBUF_TYPE_RX_CMPLT: 1318 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_RX_CMPLT\n"); 1319 brcmf_msgbuf_process_rx_complete(msgbuf, buf); 1320 break; 1321 default: 1322 bphy_err(drvr, "Unsupported msgtype %d\n", msg->msgtype); 1323 break; 1324 } 1325 } 1326 1327 1328 static void brcmf_msgbuf_process_rx(struct brcmf_msgbuf *msgbuf, 1329 struct brcmf_commonring *commonring) 1330 { 1331 void *buf; 1332 u16 count; 1333 u16 processed; 1334 1335 again: 1336 buf = brcmf_commonring_get_read_ptr(commonring, &count); 1337 if (buf == NULL) 1338 return; 1339 1340 processed = 0; 1341 while (count) { 1342 brcmf_msgbuf_process_msgtype(msgbuf, 1343 buf + msgbuf->rx_dataoffset); 1344 buf += brcmf_commonring_len_item(commonring); 1345 processed++; 1346 if (processed == BRCMF_MSGBUF_UPDATE_RX_PTR_THRS) { 1347 brcmf_commonring_read_complete(commonring, processed); 1348 processed = 0; 1349 } 1350 count--; 1351 } 1352 if (processed) 1353 brcmf_commonring_read_complete(commonring, processed); 1354 1355 if (commonring->r_ptr == 0) 1356 goto again; 1357 } 1358 1359 1360 int brcmf_proto_msgbuf_rx_trigger(struct device *dev) 1361 { 1362 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 1363 struct brcmf_pub *drvr = bus_if->drvr; 1364 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 1365 struct brcmf_commonring *commonring; 1366 void *buf; 1367 u32 flowid; 1368 int qlen; 1369 1370 buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_RX_COMPLETE]; 1371 brcmf_msgbuf_process_rx(msgbuf, buf); 1372 buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_TX_COMPLETE]; 1373 brcmf_msgbuf_process_rx(msgbuf, buf); 1374 buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_CONTROL_COMPLETE]; 1375 brcmf_msgbuf_process_rx(msgbuf, buf); 1376 1377 for_each_set_bit(flowid, msgbuf->txstatus_done_map, 1378 msgbuf->max_flowrings) { 1379 clear_bit(flowid, msgbuf->txstatus_done_map); 1380 commonring = msgbuf->flowrings[flowid]; 1381 qlen = brcmf_flowring_qlen(msgbuf->flow, flowid); 1382 if ((qlen > BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) || 1383 ((qlen) && (atomic_read(&commonring->outstanding_tx) < 1384 BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS))) 1385 brcmf_msgbuf_schedule_txdata(msgbuf, flowid, true); 1386 } 1387 1388 return 0; 1389 } 1390 1391 1392 void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid) 1393 { 1394 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 1395 struct msgbuf_tx_flowring_delete_req *delete; 1396 struct brcmf_commonring *commonring; 1397 void *ret_ptr; 1398 u8 ifidx; 1399 int err; 1400 1401 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT]; 1402 brcmf_commonring_lock(commonring); 1403 ret_ptr = brcmf_commonring_reserve_for_write(commonring); 1404 if (!ret_ptr) { 1405 bphy_err(drvr, "FW unaware, flowring will be removed !!\n"); 1406 brcmf_commonring_unlock(commonring); 1407 brcmf_msgbuf_remove_flowring(msgbuf, flowid); 1408 return; 1409 } 1410 1411 delete = (struct msgbuf_tx_flowring_delete_req *)ret_ptr; 1412 1413 ifidx = brcmf_flowring_ifidx_get(msgbuf->flow, flowid); 1414 1415 delete->msg.msgtype = MSGBUF_TYPE_FLOW_RING_DELETE; 1416 delete->msg.ifidx = ifidx; 1417 delete->msg.request_id = 0; 1418 1419 delete->flow_ring_id = cpu_to_le16(flowid + 1420 BRCMF_H2D_MSGRING_FLOWRING_IDSTART); 1421 delete->reason = 0; 1422 1423 brcmf_dbg(MSGBUF, "Send Flow Delete Req flow ID %d, ifindex %d\n", 1424 flowid, ifidx); 1425 1426 err = brcmf_commonring_write_complete(commonring); 1427 brcmf_commonring_unlock(commonring); 1428 if (err) { 1429 bphy_err(drvr, "Failed to submit RING_DELETE, flowring will be removed\n"); 1430 brcmf_msgbuf_remove_flowring(msgbuf, flowid); 1431 } 1432 } 1433 1434 #ifdef DEBUG 1435 static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data) 1436 { 1437 struct brcmf_bus *bus_if = dev_get_drvdata(seq->private); 1438 struct brcmf_pub *drvr = bus_if->drvr; 1439 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 1440 struct brcmf_commonring *commonring; 1441 u16 i; 1442 struct brcmf_flowring_ring *ring; 1443 struct brcmf_flowring_hash *hash; 1444 1445 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT]; 1446 seq_printf(seq, "h2d_ctl_submit: rp %4u, wp %4u, depth %4u\n", 1447 commonring->r_ptr, commonring->w_ptr, commonring->depth); 1448 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_RXPOST_SUBMIT]; 1449 seq_printf(seq, "h2d_rx_submit: rp %4u, wp %4u, depth %4u\n", 1450 commonring->r_ptr, commonring->w_ptr, commonring->depth); 1451 commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_CONTROL_COMPLETE]; 1452 seq_printf(seq, "d2h_ctl_cmplt: rp %4u, wp %4u, depth %4u\n", 1453 commonring->r_ptr, commonring->w_ptr, commonring->depth); 1454 commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_TX_COMPLETE]; 1455 seq_printf(seq, "d2h_tx_cmplt: rp %4u, wp %4u, depth %4u\n", 1456 commonring->r_ptr, commonring->w_ptr, commonring->depth); 1457 commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_RX_COMPLETE]; 1458 seq_printf(seq, "d2h_rx_cmplt: rp %4u, wp %4u, depth %4u\n", 1459 commonring->r_ptr, commonring->w_ptr, commonring->depth); 1460 1461 seq_printf(seq, "\nh2d_flowrings: depth %u\n", 1462 BRCMF_H2D_TXFLOWRING_MAX_ITEM); 1463 seq_puts(seq, "Active flowrings:\n"); 1464 hash = msgbuf->flow->hash; 1465 for (i = 0; i < msgbuf->flow->nrofrings; i++) { 1466 if (!msgbuf->flow->rings[i]) 1467 continue; 1468 ring = msgbuf->flow->rings[i]; 1469 if (ring->status != RING_OPEN) 1470 continue; 1471 commonring = msgbuf->flowrings[i]; 1472 hash = &msgbuf->flow->hash[ring->hash_id]; 1473 seq_printf(seq, "id %3u: rp %4u, wp %4u, qlen %4u, blocked %u\n" 1474 " ifidx %u, fifo %u, da %pM\n", 1475 i, commonring->r_ptr, commonring->w_ptr, 1476 skb_queue_len(&ring->skblist), ring->blocked, 1477 hash->ifidx, hash->fifo, hash->mac); 1478 } 1479 1480 return 0; 1481 } 1482 #else 1483 static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data) 1484 { 1485 return 0; 1486 } 1487 #endif 1488 1489 static void brcmf_msgbuf_debugfs_create(struct brcmf_pub *drvr) 1490 { 1491 brcmf_debugfs_add_entry(drvr, "msgbuf_stats", brcmf_msgbuf_stats_read); 1492 } 1493 1494 int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr) 1495 { 1496 struct brcmf_bus_msgbuf *if_msgbuf; 1497 struct brcmf_msgbuf *msgbuf; 1498 u64 address; 1499 u32 count; 1500 1501 if_msgbuf = drvr->bus_if->msgbuf; 1502 1503 if (if_msgbuf->max_flowrings >= BRCMF_FLOWRING_HASHSIZE) { 1504 bphy_err(drvr, "driver not configured for this many flowrings %d\n", 1505 if_msgbuf->max_flowrings); 1506 if_msgbuf->max_flowrings = BRCMF_FLOWRING_HASHSIZE - 1; 1507 } 1508 1509 msgbuf = kzalloc(sizeof(*msgbuf), GFP_KERNEL); 1510 if (!msgbuf) 1511 goto fail; 1512 1513 msgbuf->txflow_wq = create_singlethread_workqueue("msgbuf_txflow"); 1514 if (msgbuf->txflow_wq == NULL) { 1515 bphy_err(drvr, "workqueue creation failed\n"); 1516 goto fail; 1517 } 1518 INIT_WORK(&msgbuf->txflow_work, brcmf_msgbuf_txflow_worker); 1519 count = BITS_TO_LONGS(if_msgbuf->max_flowrings); 1520 count = count * sizeof(unsigned long); 1521 msgbuf->flow_map = kzalloc(count, GFP_KERNEL); 1522 if (!msgbuf->flow_map) 1523 goto fail; 1524 1525 msgbuf->txstatus_done_map = kzalloc(count, GFP_KERNEL); 1526 if (!msgbuf->txstatus_done_map) 1527 goto fail; 1528 1529 msgbuf->drvr = drvr; 1530 msgbuf->ioctbuf = dma_alloc_coherent(drvr->bus_if->dev, 1531 BRCMF_TX_IOCTL_MAX_MSG_SIZE, 1532 &msgbuf->ioctbuf_handle, 1533 GFP_KERNEL); 1534 if (!msgbuf->ioctbuf) 1535 goto fail; 1536 address = (u64)msgbuf->ioctbuf_handle; 1537 msgbuf->ioctbuf_phys_hi = address >> 32; 1538 msgbuf->ioctbuf_phys_lo = address & 0xffffffff; 1539 1540 drvr->proto->hdrpull = brcmf_msgbuf_hdrpull; 1541 drvr->proto->query_dcmd = brcmf_msgbuf_query_dcmd; 1542 drvr->proto->set_dcmd = brcmf_msgbuf_set_dcmd; 1543 drvr->proto->tx_queue_data = brcmf_msgbuf_tx_queue_data; 1544 drvr->proto->configure_addr_mode = brcmf_msgbuf_configure_addr_mode; 1545 drvr->proto->delete_peer = brcmf_msgbuf_delete_peer; 1546 drvr->proto->add_tdls_peer = brcmf_msgbuf_add_tdls_peer; 1547 drvr->proto->rxreorder = brcmf_msgbuf_rxreorder; 1548 drvr->proto->debugfs_create = brcmf_msgbuf_debugfs_create; 1549 drvr->proto->pd = msgbuf; 1550 1551 init_waitqueue_head(&msgbuf->ioctl_resp_wait); 1552 1553 msgbuf->commonrings = 1554 (struct brcmf_commonring **)if_msgbuf->commonrings; 1555 msgbuf->flowrings = (struct brcmf_commonring **)if_msgbuf->flowrings; 1556 msgbuf->max_flowrings = if_msgbuf->max_flowrings; 1557 msgbuf->flowring_dma_handle = 1558 kcalloc(msgbuf->max_flowrings, 1559 sizeof(*msgbuf->flowring_dma_handle), GFP_KERNEL); 1560 if (!msgbuf->flowring_dma_handle) 1561 goto fail; 1562 1563 msgbuf->rx_dataoffset = if_msgbuf->rx_dataoffset; 1564 msgbuf->max_rxbufpost = if_msgbuf->max_rxbufpost; 1565 1566 msgbuf->max_ioctlrespbuf = BRCMF_MSGBUF_MAX_IOCTLRESPBUF_POST; 1567 msgbuf->max_eventbuf = BRCMF_MSGBUF_MAX_EVENTBUF_POST; 1568 1569 msgbuf->tx_pktids = brcmf_msgbuf_init_pktids(NR_TX_PKTIDS, 1570 DMA_TO_DEVICE); 1571 if (!msgbuf->tx_pktids) 1572 goto fail; 1573 msgbuf->rx_pktids = brcmf_msgbuf_init_pktids(NR_RX_PKTIDS, 1574 DMA_FROM_DEVICE); 1575 if (!msgbuf->rx_pktids) 1576 goto fail; 1577 1578 msgbuf->flow = brcmf_flowring_attach(drvr->bus_if->dev, 1579 if_msgbuf->max_flowrings); 1580 if (!msgbuf->flow) 1581 goto fail; 1582 1583 1584 brcmf_dbg(MSGBUF, "Feeding buffers, rx data %d, rx event %d, rx ioctl resp %d\n", 1585 msgbuf->max_rxbufpost, msgbuf->max_eventbuf, 1586 msgbuf->max_ioctlrespbuf); 1587 count = 0; 1588 do { 1589 brcmf_msgbuf_rxbuf_data_fill(msgbuf); 1590 if (msgbuf->max_rxbufpost != msgbuf->rxbufpost) 1591 msleep(10); 1592 else 1593 break; 1594 count++; 1595 } while (count < 10); 1596 brcmf_msgbuf_rxbuf_event_post(msgbuf); 1597 brcmf_msgbuf_rxbuf_ioctlresp_post(msgbuf); 1598 1599 INIT_WORK(&msgbuf->flowring_work, brcmf_msgbuf_flowring_worker); 1600 spin_lock_init(&msgbuf->flowring_work_lock); 1601 INIT_LIST_HEAD(&msgbuf->work_queue); 1602 1603 return 0; 1604 1605 fail: 1606 if (msgbuf) { 1607 kfree(msgbuf->flow_map); 1608 kfree(msgbuf->txstatus_done_map); 1609 brcmf_msgbuf_release_pktids(msgbuf); 1610 kfree(msgbuf->flowring_dma_handle); 1611 if (msgbuf->ioctbuf) 1612 dma_free_coherent(drvr->bus_if->dev, 1613 BRCMF_TX_IOCTL_MAX_MSG_SIZE, 1614 msgbuf->ioctbuf, 1615 msgbuf->ioctbuf_handle); 1616 kfree(msgbuf); 1617 } 1618 return -ENOMEM; 1619 } 1620 1621 1622 void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr) 1623 { 1624 struct brcmf_msgbuf *msgbuf; 1625 struct brcmf_msgbuf_work_item *work; 1626 1627 brcmf_dbg(TRACE, "Enter\n"); 1628 if (drvr->proto->pd) { 1629 msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 1630 cancel_work_sync(&msgbuf->flowring_work); 1631 while (!list_empty(&msgbuf->work_queue)) { 1632 work = list_first_entry(&msgbuf->work_queue, 1633 struct brcmf_msgbuf_work_item, 1634 queue); 1635 list_del(&work->queue); 1636 kfree(work); 1637 } 1638 kfree(msgbuf->flow_map); 1639 kfree(msgbuf->txstatus_done_map); 1640 if (msgbuf->txflow_wq) 1641 destroy_workqueue(msgbuf->txflow_wq); 1642 1643 brcmf_flowring_detach(msgbuf->flow); 1644 dma_free_coherent(drvr->bus_if->dev, 1645 BRCMF_TX_IOCTL_MAX_MSG_SIZE, 1646 msgbuf->ioctbuf, msgbuf->ioctbuf_handle); 1647 brcmf_msgbuf_release_pktids(msgbuf); 1648 kfree(msgbuf->flowring_dma_handle); 1649 kfree(msgbuf); 1650 drvr->proto->pd = NULL; 1651 } 1652 } 1653