1 /* Copyright (c) 2014 Broadcom Corporation 2 * 3 * Permission to use, copy, modify, and/or distribute this software for any 4 * purpose with or without fee is hereby granted, provided that the above 5 * copyright notice and this permission notice appear in all copies. 6 * 7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY 10 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION 12 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN 13 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 */ 15 16 /******************************************************************************* 17 * Communicates with the dongle by using dcmd codes. 18 * For certain dcmd codes, the dongle interprets string data from the host. 19 ******************************************************************************/ 20 21 #include <linux/types.h> 22 #include <linux/netdevice.h> 23 #include <linux/etherdevice.h> 24 25 #include <brcmu_utils.h> 26 #include <brcmu_wifi.h> 27 28 #include "core.h" 29 #include "debug.h" 30 #include "proto.h" 31 #include "msgbuf.h" 32 #include "commonring.h" 33 #include "flowring.h" 34 #include "bus.h" 35 #include "tracepoint.h" 36 37 38 #define MSGBUF_IOCTL_RESP_TIMEOUT msecs_to_jiffies(2000) 39 40 #define MSGBUF_TYPE_GEN_STATUS 0x1 41 #define MSGBUF_TYPE_RING_STATUS 0x2 42 #define MSGBUF_TYPE_FLOW_RING_CREATE 0x3 43 #define MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT 0x4 44 #define MSGBUF_TYPE_FLOW_RING_DELETE 0x5 45 #define MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT 0x6 46 #define MSGBUF_TYPE_FLOW_RING_FLUSH 0x7 47 #define MSGBUF_TYPE_FLOW_RING_FLUSH_CMPLT 0x8 48 #define MSGBUF_TYPE_IOCTLPTR_REQ 0x9 49 #define MSGBUF_TYPE_IOCTLPTR_REQ_ACK 0xA 50 #define MSGBUF_TYPE_IOCTLRESP_BUF_POST 0xB 51 #define MSGBUF_TYPE_IOCTL_CMPLT 0xC 52 #define MSGBUF_TYPE_EVENT_BUF_POST 0xD 53 #define MSGBUF_TYPE_WL_EVENT 0xE 54 #define MSGBUF_TYPE_TX_POST 0xF 55 #define MSGBUF_TYPE_TX_STATUS 0x10 56 #define MSGBUF_TYPE_RXBUF_POST 0x11 57 #define MSGBUF_TYPE_RX_CMPLT 0x12 58 #define MSGBUF_TYPE_LPBK_DMAXFER 0x13 59 #define MSGBUF_TYPE_LPBK_DMAXFER_CMPLT 0x14 60 61 #define NR_TX_PKTIDS 2048 62 #define NR_RX_PKTIDS 1024 63 64 #define BRCMF_IOCTL_REQ_PKTID 0xFFFE 65 66 #define BRCMF_MSGBUF_MAX_PKT_SIZE 2048 67 #define BRCMF_MSGBUF_RXBUFPOST_THRESHOLD 32 68 #define BRCMF_MSGBUF_MAX_IOCTLRESPBUF_POST 8 69 #define BRCMF_MSGBUF_MAX_EVENTBUF_POST 8 70 71 #define BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3 0x01 72 #define BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT 5 73 74 #define BRCMF_MSGBUF_TX_FLUSH_CNT1 32 75 #define BRCMF_MSGBUF_TX_FLUSH_CNT2 96 76 77 #define BRCMF_MSGBUF_DELAY_TXWORKER_THRS 96 78 #define BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS 32 79 #define BRCMF_MSGBUF_UPDATE_RX_PTR_THRS 48 80 81 82 struct msgbuf_common_hdr { 83 u8 msgtype; 84 u8 ifidx; 85 u8 flags; 86 u8 rsvd0; 87 __le32 request_id; 88 }; 89 90 struct msgbuf_buf_addr { 91 __le32 low_addr; 92 __le32 high_addr; 93 }; 94 95 struct msgbuf_ioctl_req_hdr { 96 struct msgbuf_common_hdr msg; 97 __le32 cmd; 98 __le16 trans_id; 99 __le16 input_buf_len; 100 __le16 output_buf_len; 101 __le16 rsvd0[3]; 102 struct msgbuf_buf_addr req_buf_addr; 103 __le32 rsvd1[2]; 104 }; 105 106 struct msgbuf_tx_msghdr { 107 struct msgbuf_common_hdr msg; 108 u8 txhdr[ETH_HLEN]; 109 u8 flags; 110 u8 seg_cnt; 111 struct msgbuf_buf_addr metadata_buf_addr; 112 struct msgbuf_buf_addr data_buf_addr; 113 __le16 metadata_buf_len; 114 __le16 data_len; 115 __le32 rsvd0; 116 }; 117 118 struct msgbuf_rx_bufpost { 119 struct msgbuf_common_hdr msg; 120 __le16 metadata_buf_len; 121 __le16 data_buf_len; 122 __le32 rsvd0; 123 struct msgbuf_buf_addr metadata_buf_addr; 124 struct msgbuf_buf_addr data_buf_addr; 125 }; 126 127 struct msgbuf_rx_ioctl_resp_or_event { 128 struct msgbuf_common_hdr msg; 129 __le16 host_buf_len; 130 __le16 rsvd0[3]; 131 struct msgbuf_buf_addr host_buf_addr; 132 __le32 rsvd1[4]; 133 }; 134 135 struct msgbuf_completion_hdr { 136 __le16 status; 137 __le16 flow_ring_id; 138 }; 139 140 struct msgbuf_rx_event { 141 struct msgbuf_common_hdr msg; 142 struct msgbuf_completion_hdr compl_hdr; 143 __le16 event_data_len; 144 __le16 seqnum; 145 __le16 rsvd0[4]; 146 }; 147 148 struct msgbuf_ioctl_resp_hdr { 149 struct msgbuf_common_hdr msg; 150 struct msgbuf_completion_hdr compl_hdr; 151 __le16 resp_len; 152 __le16 trans_id; 153 __le32 cmd; 154 __le32 rsvd0; 155 }; 156 157 struct msgbuf_tx_status { 158 struct msgbuf_common_hdr msg; 159 struct msgbuf_completion_hdr compl_hdr; 160 __le16 metadata_len; 161 __le16 tx_status; 162 }; 163 164 struct msgbuf_rx_complete { 165 struct msgbuf_common_hdr msg; 166 struct msgbuf_completion_hdr compl_hdr; 167 __le16 metadata_len; 168 __le16 data_len; 169 __le16 data_offset; 170 __le16 flags; 171 __le32 rx_status_0; 172 __le32 rx_status_1; 173 __le32 rsvd0; 174 }; 175 176 struct msgbuf_tx_flowring_create_req { 177 struct msgbuf_common_hdr msg; 178 u8 da[ETH_ALEN]; 179 u8 sa[ETH_ALEN]; 180 u8 tid; 181 u8 if_flags; 182 __le16 flow_ring_id; 183 u8 tc; 184 u8 priority; 185 __le16 int_vector; 186 __le16 max_items; 187 __le16 len_item; 188 struct msgbuf_buf_addr flow_ring_addr; 189 }; 190 191 struct msgbuf_tx_flowring_delete_req { 192 struct msgbuf_common_hdr msg; 193 __le16 flow_ring_id; 194 __le16 reason; 195 __le32 rsvd0[7]; 196 }; 197 198 struct msgbuf_flowring_create_resp { 199 struct msgbuf_common_hdr msg; 200 struct msgbuf_completion_hdr compl_hdr; 201 __le32 rsvd0[3]; 202 }; 203 204 struct msgbuf_flowring_delete_resp { 205 struct msgbuf_common_hdr msg; 206 struct msgbuf_completion_hdr compl_hdr; 207 __le32 rsvd0[3]; 208 }; 209 210 struct msgbuf_flowring_flush_resp { 211 struct msgbuf_common_hdr msg; 212 struct msgbuf_completion_hdr compl_hdr; 213 __le32 rsvd0[3]; 214 }; 215 216 struct brcmf_msgbuf_work_item { 217 struct list_head queue; 218 u32 flowid; 219 int ifidx; 220 u8 sa[ETH_ALEN]; 221 u8 da[ETH_ALEN]; 222 }; 223 224 struct brcmf_msgbuf { 225 struct brcmf_pub *drvr; 226 227 struct brcmf_commonring **commonrings; 228 struct brcmf_commonring **flowrings; 229 dma_addr_t *flowring_dma_handle; 230 u16 nrof_flowrings; 231 232 u16 rx_dataoffset; 233 u32 max_rxbufpost; 234 u16 rx_metadata_offset; 235 u32 rxbufpost; 236 237 u32 max_ioctlrespbuf; 238 u32 cur_ioctlrespbuf; 239 u32 max_eventbuf; 240 u32 cur_eventbuf; 241 242 void *ioctbuf; 243 dma_addr_t ioctbuf_handle; 244 u32 ioctbuf_phys_hi; 245 u32 ioctbuf_phys_lo; 246 int ioctl_resp_status; 247 u32 ioctl_resp_ret_len; 248 u32 ioctl_resp_pktid; 249 250 u16 data_seq_no; 251 u16 ioctl_seq_no; 252 u32 reqid; 253 wait_queue_head_t ioctl_resp_wait; 254 bool ctl_completed; 255 256 struct brcmf_msgbuf_pktids *tx_pktids; 257 struct brcmf_msgbuf_pktids *rx_pktids; 258 struct brcmf_flowring *flow; 259 260 struct workqueue_struct *txflow_wq; 261 struct work_struct txflow_work; 262 unsigned long *flow_map; 263 unsigned long *txstatus_done_map; 264 265 struct work_struct flowring_work; 266 spinlock_t flowring_work_lock; 267 struct list_head work_queue; 268 }; 269 270 struct brcmf_msgbuf_pktid { 271 atomic_t allocated; 272 u16 data_offset; 273 struct sk_buff *skb; 274 dma_addr_t physaddr; 275 }; 276 277 struct brcmf_msgbuf_pktids { 278 u32 array_size; 279 u32 last_allocated_idx; 280 enum dma_data_direction direction; 281 struct brcmf_msgbuf_pktid *array; 282 }; 283 284 static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf); 285 286 287 static struct brcmf_msgbuf_pktids * 288 brcmf_msgbuf_init_pktids(u32 nr_array_entries, 289 enum dma_data_direction direction) 290 { 291 struct brcmf_msgbuf_pktid *array; 292 struct brcmf_msgbuf_pktids *pktids; 293 294 array = kcalloc(nr_array_entries, sizeof(*array), GFP_KERNEL); 295 if (!array) 296 return NULL; 297 298 pktids = kzalloc(sizeof(*pktids), GFP_KERNEL); 299 if (!pktids) { 300 kfree(array); 301 return NULL; 302 } 303 pktids->array = array; 304 pktids->array_size = nr_array_entries; 305 306 return pktids; 307 } 308 309 310 static int 311 brcmf_msgbuf_alloc_pktid(struct device *dev, 312 struct brcmf_msgbuf_pktids *pktids, 313 struct sk_buff *skb, u16 data_offset, 314 dma_addr_t *physaddr, u32 *idx) 315 { 316 struct brcmf_msgbuf_pktid *array; 317 u32 count; 318 319 array = pktids->array; 320 321 *physaddr = dma_map_single(dev, skb->data + data_offset, 322 skb->len - data_offset, pktids->direction); 323 324 if (dma_mapping_error(dev, *physaddr)) { 325 brcmf_err("dma_map_single failed !!\n"); 326 return -ENOMEM; 327 } 328 329 *idx = pktids->last_allocated_idx; 330 331 count = 0; 332 do { 333 (*idx)++; 334 if (*idx == pktids->array_size) 335 *idx = 0; 336 if (array[*idx].allocated.counter == 0) 337 if (atomic_cmpxchg(&array[*idx].allocated, 0, 1) == 0) 338 break; 339 count++; 340 } while (count < pktids->array_size); 341 342 if (count == pktids->array_size) 343 return -ENOMEM; 344 345 array[*idx].data_offset = data_offset; 346 array[*idx].physaddr = *physaddr; 347 array[*idx].skb = skb; 348 349 pktids->last_allocated_idx = *idx; 350 351 return 0; 352 } 353 354 355 static struct sk_buff * 356 brcmf_msgbuf_get_pktid(struct device *dev, struct brcmf_msgbuf_pktids *pktids, 357 u32 idx) 358 { 359 struct brcmf_msgbuf_pktid *pktid; 360 struct sk_buff *skb; 361 362 if (idx >= pktids->array_size) { 363 brcmf_err("Invalid packet id %d (max %d)\n", idx, 364 pktids->array_size); 365 return NULL; 366 } 367 if (pktids->array[idx].allocated.counter) { 368 pktid = &pktids->array[idx]; 369 dma_unmap_single(dev, pktid->physaddr, 370 pktid->skb->len - pktid->data_offset, 371 pktids->direction); 372 skb = pktid->skb; 373 pktid->allocated.counter = 0; 374 return skb; 375 } else { 376 brcmf_err("Invalid packet id %d (not in use)\n", idx); 377 } 378 379 return NULL; 380 } 381 382 383 static void 384 brcmf_msgbuf_release_array(struct device *dev, 385 struct brcmf_msgbuf_pktids *pktids) 386 { 387 struct brcmf_msgbuf_pktid *array; 388 struct brcmf_msgbuf_pktid *pktid; 389 u32 count; 390 391 array = pktids->array; 392 count = 0; 393 do { 394 if (array[count].allocated.counter) { 395 pktid = &array[count]; 396 dma_unmap_single(dev, pktid->physaddr, 397 pktid->skb->len - pktid->data_offset, 398 pktids->direction); 399 brcmu_pkt_buf_free_skb(pktid->skb); 400 } 401 count++; 402 } while (count < pktids->array_size); 403 404 kfree(array); 405 kfree(pktids); 406 } 407 408 409 static void brcmf_msgbuf_release_pktids(struct brcmf_msgbuf *msgbuf) 410 { 411 if (msgbuf->rx_pktids) 412 brcmf_msgbuf_release_array(msgbuf->drvr->bus_if->dev, 413 msgbuf->rx_pktids); 414 if (msgbuf->tx_pktids) 415 brcmf_msgbuf_release_array(msgbuf->drvr->bus_if->dev, 416 msgbuf->tx_pktids); 417 } 418 419 420 static int brcmf_msgbuf_tx_ioctl(struct brcmf_pub *drvr, int ifidx, 421 uint cmd, void *buf, uint len) 422 { 423 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 424 struct brcmf_commonring *commonring; 425 struct msgbuf_ioctl_req_hdr *request; 426 u16 buf_len; 427 void *ret_ptr; 428 int err; 429 430 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT]; 431 brcmf_commonring_lock(commonring); 432 ret_ptr = brcmf_commonring_reserve_for_write(commonring); 433 if (!ret_ptr) { 434 brcmf_err("Failed to reserve space in commonring\n"); 435 brcmf_commonring_unlock(commonring); 436 return -ENOMEM; 437 } 438 439 msgbuf->reqid++; 440 441 request = (struct msgbuf_ioctl_req_hdr *)ret_ptr; 442 request->msg.msgtype = MSGBUF_TYPE_IOCTLPTR_REQ; 443 request->msg.ifidx = (u8)ifidx; 444 request->msg.flags = 0; 445 request->msg.request_id = cpu_to_le32(BRCMF_IOCTL_REQ_PKTID); 446 request->cmd = cpu_to_le32(cmd); 447 request->output_buf_len = cpu_to_le16(len); 448 request->trans_id = cpu_to_le16(msgbuf->reqid); 449 450 buf_len = min_t(u16, len, BRCMF_TX_IOCTL_MAX_MSG_SIZE); 451 request->input_buf_len = cpu_to_le16(buf_len); 452 request->req_buf_addr.high_addr = cpu_to_le32(msgbuf->ioctbuf_phys_hi); 453 request->req_buf_addr.low_addr = cpu_to_le32(msgbuf->ioctbuf_phys_lo); 454 if (buf) 455 memcpy(msgbuf->ioctbuf, buf, buf_len); 456 else 457 memset(msgbuf->ioctbuf, 0, buf_len); 458 459 err = brcmf_commonring_write_complete(commonring); 460 brcmf_commonring_unlock(commonring); 461 462 return err; 463 } 464 465 466 static int brcmf_msgbuf_ioctl_resp_wait(struct brcmf_msgbuf *msgbuf) 467 { 468 return wait_event_timeout(msgbuf->ioctl_resp_wait, 469 msgbuf->ctl_completed, 470 MSGBUF_IOCTL_RESP_TIMEOUT); 471 } 472 473 474 static void brcmf_msgbuf_ioctl_resp_wake(struct brcmf_msgbuf *msgbuf) 475 { 476 msgbuf->ctl_completed = true; 477 wake_up(&msgbuf->ioctl_resp_wait); 478 } 479 480 481 static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx, 482 uint cmd, void *buf, uint len) 483 { 484 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 485 struct sk_buff *skb = NULL; 486 int timeout; 487 int err; 488 489 brcmf_dbg(MSGBUF, "ifidx=%d, cmd=%d, len=%d\n", ifidx, cmd, len); 490 msgbuf->ctl_completed = false; 491 err = brcmf_msgbuf_tx_ioctl(drvr, ifidx, cmd, buf, len); 492 if (err) 493 return err; 494 495 timeout = brcmf_msgbuf_ioctl_resp_wait(msgbuf); 496 if (!timeout) { 497 brcmf_err("Timeout on response for query command\n"); 498 return -EIO; 499 } 500 501 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, 502 msgbuf->rx_pktids, 503 msgbuf->ioctl_resp_pktid); 504 if (msgbuf->ioctl_resp_ret_len != 0) { 505 if (!skb) 506 return -EBADF; 507 508 memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ? 509 len : msgbuf->ioctl_resp_ret_len); 510 } 511 brcmu_pkt_buf_free_skb(skb); 512 513 return msgbuf->ioctl_resp_status; 514 } 515 516 517 static int brcmf_msgbuf_set_dcmd(struct brcmf_pub *drvr, int ifidx, 518 uint cmd, void *buf, uint len) 519 { 520 return brcmf_msgbuf_query_dcmd(drvr, ifidx, cmd, buf, len); 521 } 522 523 524 static int brcmf_msgbuf_hdrpull(struct brcmf_pub *drvr, bool do_fws, 525 struct sk_buff *skb, struct brcmf_if **ifp) 526 { 527 return -ENODEV; 528 } 529 530 static void brcmf_msgbuf_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb) 531 { 532 } 533 534 static void 535 brcmf_msgbuf_remove_flowring(struct brcmf_msgbuf *msgbuf, u16 flowid) 536 { 537 u32 dma_sz; 538 void *dma_buf; 539 540 brcmf_dbg(MSGBUF, "Removing flowring %d\n", flowid); 541 542 dma_sz = BRCMF_H2D_TXFLOWRING_MAX_ITEM * BRCMF_H2D_TXFLOWRING_ITEMSIZE; 543 dma_buf = msgbuf->flowrings[flowid]->buf_addr; 544 dma_free_coherent(msgbuf->drvr->bus_if->dev, dma_sz, dma_buf, 545 msgbuf->flowring_dma_handle[flowid]); 546 547 brcmf_flowring_delete(msgbuf->flow, flowid); 548 } 549 550 551 static struct brcmf_msgbuf_work_item * 552 brcmf_msgbuf_dequeue_work(struct brcmf_msgbuf *msgbuf) 553 { 554 struct brcmf_msgbuf_work_item *work = NULL; 555 ulong flags; 556 557 spin_lock_irqsave(&msgbuf->flowring_work_lock, flags); 558 if (!list_empty(&msgbuf->work_queue)) { 559 work = list_first_entry(&msgbuf->work_queue, 560 struct brcmf_msgbuf_work_item, queue); 561 list_del(&work->queue); 562 } 563 spin_unlock_irqrestore(&msgbuf->flowring_work_lock, flags); 564 565 return work; 566 } 567 568 569 static u32 570 brcmf_msgbuf_flowring_create_worker(struct brcmf_msgbuf *msgbuf, 571 struct brcmf_msgbuf_work_item *work) 572 { 573 struct msgbuf_tx_flowring_create_req *create; 574 struct brcmf_commonring *commonring; 575 void *ret_ptr; 576 u32 flowid; 577 void *dma_buf; 578 u32 dma_sz; 579 u64 address; 580 int err; 581 582 flowid = work->flowid; 583 dma_sz = BRCMF_H2D_TXFLOWRING_MAX_ITEM * BRCMF_H2D_TXFLOWRING_ITEMSIZE; 584 dma_buf = dma_alloc_coherent(msgbuf->drvr->bus_if->dev, dma_sz, 585 &msgbuf->flowring_dma_handle[flowid], 586 GFP_KERNEL); 587 if (!dma_buf) { 588 brcmf_err("dma_alloc_coherent failed\n"); 589 brcmf_flowring_delete(msgbuf->flow, flowid); 590 return BRCMF_FLOWRING_INVALID_ID; 591 } 592 593 brcmf_commonring_config(msgbuf->flowrings[flowid], 594 BRCMF_H2D_TXFLOWRING_MAX_ITEM, 595 BRCMF_H2D_TXFLOWRING_ITEMSIZE, dma_buf); 596 597 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT]; 598 brcmf_commonring_lock(commonring); 599 ret_ptr = brcmf_commonring_reserve_for_write(commonring); 600 if (!ret_ptr) { 601 brcmf_err("Failed to reserve space in commonring\n"); 602 brcmf_commonring_unlock(commonring); 603 brcmf_msgbuf_remove_flowring(msgbuf, flowid); 604 return BRCMF_FLOWRING_INVALID_ID; 605 } 606 607 create = (struct msgbuf_tx_flowring_create_req *)ret_ptr; 608 create->msg.msgtype = MSGBUF_TYPE_FLOW_RING_CREATE; 609 create->msg.ifidx = work->ifidx; 610 create->msg.request_id = 0; 611 create->tid = brcmf_flowring_tid(msgbuf->flow, flowid); 612 create->flow_ring_id = cpu_to_le16(flowid + 613 BRCMF_NROF_H2D_COMMON_MSGRINGS); 614 memcpy(create->sa, work->sa, ETH_ALEN); 615 memcpy(create->da, work->da, ETH_ALEN); 616 address = (u64)msgbuf->flowring_dma_handle[flowid]; 617 create->flow_ring_addr.high_addr = cpu_to_le32(address >> 32); 618 create->flow_ring_addr.low_addr = cpu_to_le32(address & 0xffffffff); 619 create->max_items = cpu_to_le16(BRCMF_H2D_TXFLOWRING_MAX_ITEM); 620 create->len_item = cpu_to_le16(BRCMF_H2D_TXFLOWRING_ITEMSIZE); 621 622 brcmf_dbg(MSGBUF, "Send Flow Create Req flow ID %d for peer %pM prio %d ifindex %d\n", 623 flowid, work->da, create->tid, work->ifidx); 624 625 err = brcmf_commonring_write_complete(commonring); 626 brcmf_commonring_unlock(commonring); 627 if (err) { 628 brcmf_err("Failed to write commonring\n"); 629 brcmf_msgbuf_remove_flowring(msgbuf, flowid); 630 return BRCMF_FLOWRING_INVALID_ID; 631 } 632 633 return flowid; 634 } 635 636 637 static void brcmf_msgbuf_flowring_worker(struct work_struct *work) 638 { 639 struct brcmf_msgbuf *msgbuf; 640 struct brcmf_msgbuf_work_item *create; 641 642 msgbuf = container_of(work, struct brcmf_msgbuf, flowring_work); 643 644 while ((create = brcmf_msgbuf_dequeue_work(msgbuf))) { 645 brcmf_msgbuf_flowring_create_worker(msgbuf, create); 646 kfree(create); 647 } 648 } 649 650 651 static u32 brcmf_msgbuf_flowring_create(struct brcmf_msgbuf *msgbuf, int ifidx, 652 struct sk_buff *skb) 653 { 654 struct brcmf_msgbuf_work_item *create; 655 struct ethhdr *eh = (struct ethhdr *)(skb->data); 656 u32 flowid; 657 ulong flags; 658 659 create = kzalloc(sizeof(*create), GFP_ATOMIC); 660 if (create == NULL) 661 return BRCMF_FLOWRING_INVALID_ID; 662 663 flowid = brcmf_flowring_create(msgbuf->flow, eh->h_dest, 664 skb->priority, ifidx); 665 if (flowid == BRCMF_FLOWRING_INVALID_ID) { 666 kfree(create); 667 return flowid; 668 } 669 670 create->flowid = flowid; 671 create->ifidx = ifidx; 672 memcpy(create->sa, eh->h_source, ETH_ALEN); 673 memcpy(create->da, eh->h_dest, ETH_ALEN); 674 675 spin_lock_irqsave(&msgbuf->flowring_work_lock, flags); 676 list_add_tail(&create->queue, &msgbuf->work_queue); 677 spin_unlock_irqrestore(&msgbuf->flowring_work_lock, flags); 678 schedule_work(&msgbuf->flowring_work); 679 680 return flowid; 681 } 682 683 684 static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u16 flowid) 685 { 686 struct brcmf_flowring *flow = msgbuf->flow; 687 struct brcmf_commonring *commonring; 688 void *ret_ptr; 689 u32 count; 690 struct sk_buff *skb; 691 dma_addr_t physaddr; 692 u32 pktid; 693 struct msgbuf_tx_msghdr *tx_msghdr; 694 u64 address; 695 696 commonring = msgbuf->flowrings[flowid]; 697 if (!brcmf_commonring_write_available(commonring)) 698 return; 699 700 brcmf_commonring_lock(commonring); 701 702 count = BRCMF_MSGBUF_TX_FLUSH_CNT2 - BRCMF_MSGBUF_TX_FLUSH_CNT1; 703 while (brcmf_flowring_qlen(flow, flowid)) { 704 skb = brcmf_flowring_dequeue(flow, flowid); 705 if (skb == NULL) { 706 brcmf_err("No SKB, but qlen %d\n", 707 brcmf_flowring_qlen(flow, flowid)); 708 break; 709 } 710 skb_orphan(skb); 711 if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev, 712 msgbuf->tx_pktids, skb, ETH_HLEN, 713 &physaddr, &pktid)) { 714 brcmf_flowring_reinsert(flow, flowid, skb); 715 brcmf_err("No PKTID available !!\n"); 716 break; 717 } 718 ret_ptr = brcmf_commonring_reserve_for_write(commonring); 719 if (!ret_ptr) { 720 brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, 721 msgbuf->tx_pktids, pktid); 722 brcmf_flowring_reinsert(flow, flowid, skb); 723 break; 724 } 725 count++; 726 727 tx_msghdr = (struct msgbuf_tx_msghdr *)ret_ptr; 728 729 tx_msghdr->msg.msgtype = MSGBUF_TYPE_TX_POST; 730 tx_msghdr->msg.request_id = cpu_to_le32(pktid); 731 tx_msghdr->msg.ifidx = brcmf_flowring_ifidx_get(flow, flowid); 732 tx_msghdr->flags = BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3; 733 tx_msghdr->flags |= (skb->priority & 0x07) << 734 BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT; 735 tx_msghdr->seg_cnt = 1; 736 memcpy(tx_msghdr->txhdr, skb->data, ETH_HLEN); 737 tx_msghdr->data_len = cpu_to_le16(skb->len - ETH_HLEN); 738 address = (u64)physaddr; 739 tx_msghdr->data_buf_addr.high_addr = cpu_to_le32(address >> 32); 740 tx_msghdr->data_buf_addr.low_addr = 741 cpu_to_le32(address & 0xffffffff); 742 tx_msghdr->metadata_buf_len = 0; 743 tx_msghdr->metadata_buf_addr.high_addr = 0; 744 tx_msghdr->metadata_buf_addr.low_addr = 0; 745 atomic_inc(&commonring->outstanding_tx); 746 if (count >= BRCMF_MSGBUF_TX_FLUSH_CNT2) { 747 brcmf_commonring_write_complete(commonring); 748 count = 0; 749 } 750 } 751 if (count) 752 brcmf_commonring_write_complete(commonring); 753 brcmf_commonring_unlock(commonring); 754 } 755 756 757 static void brcmf_msgbuf_txflow_worker(struct work_struct *worker) 758 { 759 struct brcmf_msgbuf *msgbuf; 760 u32 flowid; 761 762 msgbuf = container_of(worker, struct brcmf_msgbuf, txflow_work); 763 for_each_set_bit(flowid, msgbuf->flow_map, msgbuf->nrof_flowrings) { 764 clear_bit(flowid, msgbuf->flow_map); 765 brcmf_msgbuf_txflow(msgbuf, flowid); 766 } 767 } 768 769 770 static int brcmf_msgbuf_schedule_txdata(struct brcmf_msgbuf *msgbuf, u32 flowid, 771 bool force) 772 { 773 struct brcmf_commonring *commonring; 774 775 set_bit(flowid, msgbuf->flow_map); 776 commonring = msgbuf->flowrings[flowid]; 777 if ((force) || (atomic_read(&commonring->outstanding_tx) < 778 BRCMF_MSGBUF_DELAY_TXWORKER_THRS)) 779 queue_work(msgbuf->txflow_wq, &msgbuf->txflow_work); 780 781 return 0; 782 } 783 784 785 static int brcmf_msgbuf_txdata(struct brcmf_pub *drvr, int ifidx, 786 u8 offset, struct sk_buff *skb) 787 { 788 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 789 struct brcmf_flowring *flow = msgbuf->flow; 790 struct ethhdr *eh = (struct ethhdr *)(skb->data); 791 u32 flowid; 792 u32 queue_count; 793 bool force; 794 795 flowid = brcmf_flowring_lookup(flow, eh->h_dest, skb->priority, ifidx); 796 if (flowid == BRCMF_FLOWRING_INVALID_ID) { 797 flowid = brcmf_msgbuf_flowring_create(msgbuf, ifidx, skb); 798 if (flowid == BRCMF_FLOWRING_INVALID_ID) 799 return -ENOMEM; 800 } 801 queue_count = brcmf_flowring_enqueue(flow, flowid, skb); 802 force = ((queue_count % BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) == 0); 803 brcmf_msgbuf_schedule_txdata(msgbuf, flowid, force); 804 805 return 0; 806 } 807 808 809 static void 810 brcmf_msgbuf_configure_addr_mode(struct brcmf_pub *drvr, int ifidx, 811 enum proto_addr_mode addr_mode) 812 { 813 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 814 815 brcmf_flowring_configure_addr_mode(msgbuf->flow, ifidx, addr_mode); 816 } 817 818 819 static void 820 brcmf_msgbuf_delete_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN]) 821 { 822 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 823 824 brcmf_flowring_delete_peer(msgbuf->flow, ifidx, peer); 825 } 826 827 828 static void 829 brcmf_msgbuf_add_tdls_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN]) 830 { 831 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 832 833 brcmf_flowring_add_tdls_peer(msgbuf->flow, ifidx, peer); 834 } 835 836 837 static void 838 brcmf_msgbuf_process_ioctl_complete(struct brcmf_msgbuf *msgbuf, void *buf) 839 { 840 struct msgbuf_ioctl_resp_hdr *ioctl_resp; 841 842 ioctl_resp = (struct msgbuf_ioctl_resp_hdr *)buf; 843 844 msgbuf->ioctl_resp_status = 845 (s16)le16_to_cpu(ioctl_resp->compl_hdr.status); 846 msgbuf->ioctl_resp_ret_len = le16_to_cpu(ioctl_resp->resp_len); 847 msgbuf->ioctl_resp_pktid = le32_to_cpu(ioctl_resp->msg.request_id); 848 849 brcmf_msgbuf_ioctl_resp_wake(msgbuf); 850 851 if (msgbuf->cur_ioctlrespbuf) 852 msgbuf->cur_ioctlrespbuf--; 853 brcmf_msgbuf_rxbuf_ioctlresp_post(msgbuf); 854 } 855 856 857 static void 858 brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf) 859 { 860 struct brcmf_commonring *commonring; 861 struct msgbuf_tx_status *tx_status; 862 u32 idx; 863 struct sk_buff *skb; 864 u16 flowid; 865 866 tx_status = (struct msgbuf_tx_status *)buf; 867 idx = le32_to_cpu(tx_status->msg.request_id); 868 flowid = le16_to_cpu(tx_status->compl_hdr.flow_ring_id); 869 flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS; 870 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, 871 msgbuf->tx_pktids, idx); 872 if (!skb) 873 return; 874 875 set_bit(flowid, msgbuf->txstatus_done_map); 876 commonring = msgbuf->flowrings[flowid]; 877 atomic_dec(&commonring->outstanding_tx); 878 879 brcmf_txfinalize(brcmf_get_ifp(msgbuf->drvr, tx_status->msg.ifidx), 880 skb, true); 881 } 882 883 884 static u32 brcmf_msgbuf_rxbuf_data_post(struct brcmf_msgbuf *msgbuf, u32 count) 885 { 886 struct brcmf_commonring *commonring; 887 void *ret_ptr; 888 struct sk_buff *skb; 889 u16 alloced; 890 u32 pktlen; 891 dma_addr_t physaddr; 892 struct msgbuf_rx_bufpost *rx_bufpost; 893 u64 address; 894 u32 pktid; 895 u32 i; 896 897 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_RXPOST_SUBMIT]; 898 ret_ptr = brcmf_commonring_reserve_for_write_multiple(commonring, 899 count, 900 &alloced); 901 if (!ret_ptr) { 902 brcmf_dbg(MSGBUF, "Failed to reserve space in commonring\n"); 903 return 0; 904 } 905 906 for (i = 0; i < alloced; i++) { 907 rx_bufpost = (struct msgbuf_rx_bufpost *)ret_ptr; 908 memset(rx_bufpost, 0, sizeof(*rx_bufpost)); 909 910 skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_PKT_SIZE); 911 912 if (skb == NULL) { 913 brcmf_err("Failed to alloc SKB\n"); 914 brcmf_commonring_write_cancel(commonring, alloced - i); 915 break; 916 } 917 918 pktlen = skb->len; 919 if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev, 920 msgbuf->rx_pktids, skb, 0, 921 &physaddr, &pktid)) { 922 dev_kfree_skb_any(skb); 923 brcmf_err("No PKTID available !!\n"); 924 brcmf_commonring_write_cancel(commonring, alloced - i); 925 break; 926 } 927 928 if (msgbuf->rx_metadata_offset) { 929 address = (u64)physaddr; 930 rx_bufpost->metadata_buf_len = 931 cpu_to_le16(msgbuf->rx_metadata_offset); 932 rx_bufpost->metadata_buf_addr.high_addr = 933 cpu_to_le32(address >> 32); 934 rx_bufpost->metadata_buf_addr.low_addr = 935 cpu_to_le32(address & 0xffffffff); 936 937 skb_pull(skb, msgbuf->rx_metadata_offset); 938 pktlen = skb->len; 939 physaddr += msgbuf->rx_metadata_offset; 940 } 941 rx_bufpost->msg.msgtype = MSGBUF_TYPE_RXBUF_POST; 942 rx_bufpost->msg.request_id = cpu_to_le32(pktid); 943 944 address = (u64)physaddr; 945 rx_bufpost->data_buf_len = cpu_to_le16((u16)pktlen); 946 rx_bufpost->data_buf_addr.high_addr = 947 cpu_to_le32(address >> 32); 948 rx_bufpost->data_buf_addr.low_addr = 949 cpu_to_le32(address & 0xffffffff); 950 951 ret_ptr += brcmf_commonring_len_item(commonring); 952 } 953 954 if (i) 955 brcmf_commonring_write_complete(commonring); 956 957 return i; 958 } 959 960 961 static void 962 brcmf_msgbuf_rxbuf_data_fill(struct brcmf_msgbuf *msgbuf) 963 { 964 u32 fillbufs; 965 u32 retcount; 966 967 fillbufs = msgbuf->max_rxbufpost - msgbuf->rxbufpost; 968 969 while (fillbufs) { 970 retcount = brcmf_msgbuf_rxbuf_data_post(msgbuf, fillbufs); 971 if (!retcount) 972 break; 973 msgbuf->rxbufpost += retcount; 974 fillbufs -= retcount; 975 } 976 } 977 978 979 static void 980 brcmf_msgbuf_update_rxbufpost_count(struct brcmf_msgbuf *msgbuf, u16 rxcnt) 981 { 982 msgbuf->rxbufpost -= rxcnt; 983 if (msgbuf->rxbufpost <= (msgbuf->max_rxbufpost - 984 BRCMF_MSGBUF_RXBUFPOST_THRESHOLD)) 985 brcmf_msgbuf_rxbuf_data_fill(msgbuf); 986 } 987 988 989 static u32 990 brcmf_msgbuf_rxbuf_ctrl_post(struct brcmf_msgbuf *msgbuf, bool event_buf, 991 u32 count) 992 { 993 struct brcmf_commonring *commonring; 994 void *ret_ptr; 995 struct sk_buff *skb; 996 u16 alloced; 997 u32 pktlen; 998 dma_addr_t physaddr; 999 struct msgbuf_rx_ioctl_resp_or_event *rx_bufpost; 1000 u64 address; 1001 u32 pktid; 1002 u32 i; 1003 1004 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT]; 1005 brcmf_commonring_lock(commonring); 1006 ret_ptr = brcmf_commonring_reserve_for_write_multiple(commonring, 1007 count, 1008 &alloced); 1009 if (!ret_ptr) { 1010 brcmf_err("Failed to reserve space in commonring\n"); 1011 brcmf_commonring_unlock(commonring); 1012 return 0; 1013 } 1014 1015 for (i = 0; i < alloced; i++) { 1016 rx_bufpost = (struct msgbuf_rx_ioctl_resp_or_event *)ret_ptr; 1017 memset(rx_bufpost, 0, sizeof(*rx_bufpost)); 1018 1019 skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_PKT_SIZE); 1020 1021 if (skb == NULL) { 1022 brcmf_err("Failed to alloc SKB\n"); 1023 brcmf_commonring_write_cancel(commonring, alloced - i); 1024 break; 1025 } 1026 1027 pktlen = skb->len; 1028 if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev, 1029 msgbuf->rx_pktids, skb, 0, 1030 &physaddr, &pktid)) { 1031 dev_kfree_skb_any(skb); 1032 brcmf_err("No PKTID available !!\n"); 1033 brcmf_commonring_write_cancel(commonring, alloced - i); 1034 break; 1035 } 1036 if (event_buf) 1037 rx_bufpost->msg.msgtype = MSGBUF_TYPE_EVENT_BUF_POST; 1038 else 1039 rx_bufpost->msg.msgtype = 1040 MSGBUF_TYPE_IOCTLRESP_BUF_POST; 1041 rx_bufpost->msg.request_id = cpu_to_le32(pktid); 1042 1043 address = (u64)physaddr; 1044 rx_bufpost->host_buf_len = cpu_to_le16((u16)pktlen); 1045 rx_bufpost->host_buf_addr.high_addr = 1046 cpu_to_le32(address >> 32); 1047 rx_bufpost->host_buf_addr.low_addr = 1048 cpu_to_le32(address & 0xffffffff); 1049 1050 ret_ptr += brcmf_commonring_len_item(commonring); 1051 } 1052 1053 if (i) 1054 brcmf_commonring_write_complete(commonring); 1055 1056 brcmf_commonring_unlock(commonring); 1057 1058 return i; 1059 } 1060 1061 1062 static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf) 1063 { 1064 u32 count; 1065 1066 count = msgbuf->max_ioctlrespbuf - msgbuf->cur_ioctlrespbuf; 1067 count = brcmf_msgbuf_rxbuf_ctrl_post(msgbuf, false, count); 1068 msgbuf->cur_ioctlrespbuf += count; 1069 } 1070 1071 1072 static void brcmf_msgbuf_rxbuf_event_post(struct brcmf_msgbuf *msgbuf) 1073 { 1074 u32 count; 1075 1076 count = msgbuf->max_eventbuf - msgbuf->cur_eventbuf; 1077 count = brcmf_msgbuf_rxbuf_ctrl_post(msgbuf, true, count); 1078 msgbuf->cur_eventbuf += count; 1079 } 1080 1081 1082 static void brcmf_msgbuf_process_event(struct brcmf_msgbuf *msgbuf, void *buf) 1083 { 1084 struct msgbuf_rx_event *event; 1085 u32 idx; 1086 u16 buflen; 1087 struct sk_buff *skb; 1088 struct brcmf_if *ifp; 1089 1090 event = (struct msgbuf_rx_event *)buf; 1091 idx = le32_to_cpu(event->msg.request_id); 1092 buflen = le16_to_cpu(event->event_data_len); 1093 1094 if (msgbuf->cur_eventbuf) 1095 msgbuf->cur_eventbuf--; 1096 brcmf_msgbuf_rxbuf_event_post(msgbuf); 1097 1098 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, 1099 msgbuf->rx_pktids, idx); 1100 if (!skb) 1101 return; 1102 1103 if (msgbuf->rx_dataoffset) 1104 skb_pull(skb, msgbuf->rx_dataoffset); 1105 1106 skb_trim(skb, buflen); 1107 1108 ifp = brcmf_get_ifp(msgbuf->drvr, event->msg.ifidx); 1109 if (!ifp || !ifp->ndev) { 1110 brcmf_err("Received pkt for invalid ifidx %d\n", 1111 event->msg.ifidx); 1112 goto exit; 1113 } 1114 1115 skb->protocol = eth_type_trans(skb, ifp->ndev); 1116 1117 brcmf_fweh_process_skb(ifp->drvr, skb); 1118 1119 exit: 1120 brcmu_pkt_buf_free_skb(skb); 1121 } 1122 1123 1124 static void 1125 brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf) 1126 { 1127 struct msgbuf_rx_complete *rx_complete; 1128 struct sk_buff *skb; 1129 u16 data_offset; 1130 u16 buflen; 1131 u32 idx; 1132 struct brcmf_if *ifp; 1133 1134 brcmf_msgbuf_update_rxbufpost_count(msgbuf, 1); 1135 1136 rx_complete = (struct msgbuf_rx_complete *)buf; 1137 data_offset = le16_to_cpu(rx_complete->data_offset); 1138 buflen = le16_to_cpu(rx_complete->data_len); 1139 idx = le32_to_cpu(rx_complete->msg.request_id); 1140 1141 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, 1142 msgbuf->rx_pktids, idx); 1143 if (!skb) 1144 return; 1145 1146 if (data_offset) 1147 skb_pull(skb, data_offset); 1148 else if (msgbuf->rx_dataoffset) 1149 skb_pull(skb, msgbuf->rx_dataoffset); 1150 1151 skb_trim(skb, buflen); 1152 1153 ifp = brcmf_get_ifp(msgbuf->drvr, rx_complete->msg.ifidx); 1154 if (!ifp || !ifp->ndev) { 1155 brcmf_err("Received pkt for invalid ifidx %d\n", 1156 rx_complete->msg.ifidx); 1157 brcmu_pkt_buf_free_skb(skb); 1158 return; 1159 } 1160 1161 skb->protocol = eth_type_trans(skb, ifp->ndev); 1162 brcmf_netif_rx(ifp, skb); 1163 } 1164 1165 1166 static void 1167 brcmf_msgbuf_process_flow_ring_create_response(struct brcmf_msgbuf *msgbuf, 1168 void *buf) 1169 { 1170 struct msgbuf_flowring_create_resp *flowring_create_resp; 1171 u16 status; 1172 u16 flowid; 1173 1174 flowring_create_resp = (struct msgbuf_flowring_create_resp *)buf; 1175 1176 flowid = le16_to_cpu(flowring_create_resp->compl_hdr.flow_ring_id); 1177 flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS; 1178 status = le16_to_cpu(flowring_create_resp->compl_hdr.status); 1179 1180 if (status) { 1181 brcmf_err("Flowring creation failed, code %d\n", status); 1182 brcmf_msgbuf_remove_flowring(msgbuf, flowid); 1183 return; 1184 } 1185 brcmf_dbg(MSGBUF, "Flowring %d Create response status %d\n", flowid, 1186 status); 1187 1188 brcmf_flowring_open(msgbuf->flow, flowid); 1189 1190 brcmf_msgbuf_schedule_txdata(msgbuf, flowid, true); 1191 } 1192 1193 1194 static void 1195 brcmf_msgbuf_process_flow_ring_delete_response(struct brcmf_msgbuf *msgbuf, 1196 void *buf) 1197 { 1198 struct msgbuf_flowring_delete_resp *flowring_delete_resp; 1199 u16 status; 1200 u16 flowid; 1201 1202 flowring_delete_resp = (struct msgbuf_flowring_delete_resp *)buf; 1203 1204 flowid = le16_to_cpu(flowring_delete_resp->compl_hdr.flow_ring_id); 1205 flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS; 1206 status = le16_to_cpu(flowring_delete_resp->compl_hdr.status); 1207 1208 if (status) { 1209 brcmf_err("Flowring deletion failed, code %d\n", status); 1210 brcmf_flowring_delete(msgbuf->flow, flowid); 1211 return; 1212 } 1213 brcmf_dbg(MSGBUF, "Flowring %d Delete response status %d\n", flowid, 1214 status); 1215 1216 brcmf_msgbuf_remove_flowring(msgbuf, flowid); 1217 } 1218 1219 1220 static void brcmf_msgbuf_process_msgtype(struct brcmf_msgbuf *msgbuf, void *buf) 1221 { 1222 struct msgbuf_common_hdr *msg; 1223 1224 msg = (struct msgbuf_common_hdr *)buf; 1225 switch (msg->msgtype) { 1226 case MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT: 1227 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT\n"); 1228 brcmf_msgbuf_process_flow_ring_create_response(msgbuf, buf); 1229 break; 1230 case MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT: 1231 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT\n"); 1232 brcmf_msgbuf_process_flow_ring_delete_response(msgbuf, buf); 1233 break; 1234 case MSGBUF_TYPE_IOCTLPTR_REQ_ACK: 1235 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_IOCTLPTR_REQ_ACK\n"); 1236 break; 1237 case MSGBUF_TYPE_IOCTL_CMPLT: 1238 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_IOCTL_CMPLT\n"); 1239 brcmf_msgbuf_process_ioctl_complete(msgbuf, buf); 1240 break; 1241 case MSGBUF_TYPE_WL_EVENT: 1242 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_WL_EVENT\n"); 1243 brcmf_msgbuf_process_event(msgbuf, buf); 1244 break; 1245 case MSGBUF_TYPE_TX_STATUS: 1246 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_TX_STATUS\n"); 1247 brcmf_msgbuf_process_txstatus(msgbuf, buf); 1248 break; 1249 case MSGBUF_TYPE_RX_CMPLT: 1250 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_RX_CMPLT\n"); 1251 brcmf_msgbuf_process_rx_complete(msgbuf, buf); 1252 break; 1253 default: 1254 brcmf_err("Unsupported msgtype %d\n", msg->msgtype); 1255 break; 1256 } 1257 } 1258 1259 1260 static void brcmf_msgbuf_process_rx(struct brcmf_msgbuf *msgbuf, 1261 struct brcmf_commonring *commonring) 1262 { 1263 void *buf; 1264 u16 count; 1265 u16 processed; 1266 1267 again: 1268 buf = brcmf_commonring_get_read_ptr(commonring, &count); 1269 if (buf == NULL) 1270 return; 1271 1272 processed = 0; 1273 while (count) { 1274 brcmf_msgbuf_process_msgtype(msgbuf, 1275 buf + msgbuf->rx_dataoffset); 1276 buf += brcmf_commonring_len_item(commonring); 1277 processed++; 1278 if (processed == BRCMF_MSGBUF_UPDATE_RX_PTR_THRS) { 1279 brcmf_commonring_read_complete(commonring, processed); 1280 processed = 0; 1281 } 1282 count--; 1283 } 1284 if (processed) 1285 brcmf_commonring_read_complete(commonring, processed); 1286 1287 if (commonring->r_ptr == 0) 1288 goto again; 1289 } 1290 1291 1292 int brcmf_proto_msgbuf_rx_trigger(struct device *dev) 1293 { 1294 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 1295 struct brcmf_pub *drvr = bus_if->drvr; 1296 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 1297 struct brcmf_commonring *commonring; 1298 void *buf; 1299 u32 flowid; 1300 int qlen; 1301 1302 buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_RX_COMPLETE]; 1303 brcmf_msgbuf_process_rx(msgbuf, buf); 1304 buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_TX_COMPLETE]; 1305 brcmf_msgbuf_process_rx(msgbuf, buf); 1306 buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_CONTROL_COMPLETE]; 1307 brcmf_msgbuf_process_rx(msgbuf, buf); 1308 1309 for_each_set_bit(flowid, msgbuf->txstatus_done_map, 1310 msgbuf->nrof_flowrings) { 1311 clear_bit(flowid, msgbuf->txstatus_done_map); 1312 commonring = msgbuf->flowrings[flowid]; 1313 qlen = brcmf_flowring_qlen(msgbuf->flow, flowid); 1314 if ((qlen > BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) || 1315 ((qlen) && (atomic_read(&commonring->outstanding_tx) < 1316 BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS))) 1317 brcmf_msgbuf_schedule_txdata(msgbuf, flowid, true); 1318 } 1319 1320 return 0; 1321 } 1322 1323 1324 void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid) 1325 { 1326 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 1327 struct msgbuf_tx_flowring_delete_req *delete; 1328 struct brcmf_commonring *commonring; 1329 void *ret_ptr; 1330 u8 ifidx; 1331 int err; 1332 1333 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT]; 1334 brcmf_commonring_lock(commonring); 1335 ret_ptr = brcmf_commonring_reserve_for_write(commonring); 1336 if (!ret_ptr) { 1337 brcmf_err("FW unaware, flowring will be removed !!\n"); 1338 brcmf_commonring_unlock(commonring); 1339 brcmf_msgbuf_remove_flowring(msgbuf, flowid); 1340 return; 1341 } 1342 1343 delete = (struct msgbuf_tx_flowring_delete_req *)ret_ptr; 1344 1345 ifidx = brcmf_flowring_ifidx_get(msgbuf->flow, flowid); 1346 1347 delete->msg.msgtype = MSGBUF_TYPE_FLOW_RING_DELETE; 1348 delete->msg.ifidx = ifidx; 1349 delete->msg.request_id = 0; 1350 1351 delete->flow_ring_id = cpu_to_le16(flowid + 1352 BRCMF_NROF_H2D_COMMON_MSGRINGS); 1353 delete->reason = 0; 1354 1355 brcmf_dbg(MSGBUF, "Send Flow Delete Req flow ID %d, ifindex %d\n", 1356 flowid, ifidx); 1357 1358 err = brcmf_commonring_write_complete(commonring); 1359 brcmf_commonring_unlock(commonring); 1360 if (err) { 1361 brcmf_err("Failed to submit RING_DELETE, flowring will be removed\n"); 1362 brcmf_msgbuf_remove_flowring(msgbuf, flowid); 1363 } 1364 } 1365 1366 #ifdef DEBUG 1367 static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data) 1368 { 1369 struct brcmf_bus *bus_if = dev_get_drvdata(seq->private); 1370 struct brcmf_pub *drvr = bus_if->drvr; 1371 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 1372 struct brcmf_commonring *commonring; 1373 u16 i; 1374 struct brcmf_flowring_ring *ring; 1375 struct brcmf_flowring_hash *hash; 1376 1377 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT]; 1378 seq_printf(seq, "h2d_ctl_submit: rp %4u, wp %4u, depth %4u\n", 1379 commonring->r_ptr, commonring->w_ptr, commonring->depth); 1380 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_RXPOST_SUBMIT]; 1381 seq_printf(seq, "h2d_rx_submit: rp %4u, wp %4u, depth %4u\n", 1382 commonring->r_ptr, commonring->w_ptr, commonring->depth); 1383 commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_CONTROL_COMPLETE]; 1384 seq_printf(seq, "d2h_ctl_cmplt: rp %4u, wp %4u, depth %4u\n", 1385 commonring->r_ptr, commonring->w_ptr, commonring->depth); 1386 commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_TX_COMPLETE]; 1387 seq_printf(seq, "d2h_tx_cmplt: rp %4u, wp %4u, depth %4u\n", 1388 commonring->r_ptr, commonring->w_ptr, commonring->depth); 1389 commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_RX_COMPLETE]; 1390 seq_printf(seq, "d2h_rx_cmplt: rp %4u, wp %4u, depth %4u\n", 1391 commonring->r_ptr, commonring->w_ptr, commonring->depth); 1392 1393 seq_printf(seq, "\nh2d_flowrings: depth %u\n", 1394 BRCMF_H2D_TXFLOWRING_MAX_ITEM); 1395 seq_puts(seq, "Active flowrings:\n"); 1396 hash = msgbuf->flow->hash; 1397 for (i = 0; i < msgbuf->flow->nrofrings; i++) { 1398 if (!msgbuf->flow->rings[i]) 1399 continue; 1400 ring = msgbuf->flow->rings[i]; 1401 if (ring->status != RING_OPEN) 1402 continue; 1403 commonring = msgbuf->flowrings[i]; 1404 hash = &msgbuf->flow->hash[ring->hash_id]; 1405 seq_printf(seq, "id %3u: rp %4u, wp %4u, qlen %4u, blocked %u\n" 1406 " ifidx %u, fifo %u, da %pM\n", 1407 i, commonring->r_ptr, commonring->w_ptr, 1408 skb_queue_len(&ring->skblist), ring->blocked, 1409 hash->ifidx, hash->fifo, hash->mac); 1410 } 1411 1412 return 0; 1413 } 1414 #else 1415 static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data) 1416 { 1417 return 0; 1418 } 1419 #endif 1420 1421 int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr) 1422 { 1423 struct brcmf_bus_msgbuf *if_msgbuf; 1424 struct brcmf_msgbuf *msgbuf; 1425 u64 address; 1426 u32 count; 1427 1428 if_msgbuf = drvr->bus_if->msgbuf; 1429 1430 if (if_msgbuf->nrof_flowrings >= BRCMF_FLOWRING_HASHSIZE) { 1431 brcmf_err("driver not configured for this many flowrings %d\n", 1432 if_msgbuf->nrof_flowrings); 1433 if_msgbuf->nrof_flowrings = BRCMF_FLOWRING_HASHSIZE - 1; 1434 } 1435 1436 msgbuf = kzalloc(sizeof(*msgbuf), GFP_KERNEL); 1437 if (!msgbuf) 1438 goto fail; 1439 1440 msgbuf->txflow_wq = create_singlethread_workqueue("msgbuf_txflow"); 1441 if (msgbuf->txflow_wq == NULL) { 1442 brcmf_err("workqueue creation failed\n"); 1443 goto fail; 1444 } 1445 INIT_WORK(&msgbuf->txflow_work, brcmf_msgbuf_txflow_worker); 1446 count = BITS_TO_LONGS(if_msgbuf->nrof_flowrings); 1447 count = count * sizeof(unsigned long); 1448 msgbuf->flow_map = kzalloc(count, GFP_KERNEL); 1449 if (!msgbuf->flow_map) 1450 goto fail; 1451 1452 msgbuf->txstatus_done_map = kzalloc(count, GFP_KERNEL); 1453 if (!msgbuf->txstatus_done_map) 1454 goto fail; 1455 1456 msgbuf->drvr = drvr; 1457 msgbuf->ioctbuf = dma_alloc_coherent(drvr->bus_if->dev, 1458 BRCMF_TX_IOCTL_MAX_MSG_SIZE, 1459 &msgbuf->ioctbuf_handle, 1460 GFP_KERNEL); 1461 if (!msgbuf->ioctbuf) 1462 goto fail; 1463 address = (u64)msgbuf->ioctbuf_handle; 1464 msgbuf->ioctbuf_phys_hi = address >> 32; 1465 msgbuf->ioctbuf_phys_lo = address & 0xffffffff; 1466 1467 drvr->proto->hdrpull = brcmf_msgbuf_hdrpull; 1468 drvr->proto->query_dcmd = brcmf_msgbuf_query_dcmd; 1469 drvr->proto->set_dcmd = brcmf_msgbuf_set_dcmd; 1470 drvr->proto->txdata = brcmf_msgbuf_txdata; 1471 drvr->proto->configure_addr_mode = brcmf_msgbuf_configure_addr_mode; 1472 drvr->proto->delete_peer = brcmf_msgbuf_delete_peer; 1473 drvr->proto->add_tdls_peer = brcmf_msgbuf_add_tdls_peer; 1474 drvr->proto->rxreorder = brcmf_msgbuf_rxreorder; 1475 drvr->proto->pd = msgbuf; 1476 1477 init_waitqueue_head(&msgbuf->ioctl_resp_wait); 1478 1479 msgbuf->commonrings = 1480 (struct brcmf_commonring **)if_msgbuf->commonrings; 1481 msgbuf->flowrings = (struct brcmf_commonring **)if_msgbuf->flowrings; 1482 msgbuf->nrof_flowrings = if_msgbuf->nrof_flowrings; 1483 msgbuf->flowring_dma_handle = kzalloc(msgbuf->nrof_flowrings * 1484 sizeof(*msgbuf->flowring_dma_handle), GFP_KERNEL); 1485 if (!msgbuf->flowring_dma_handle) 1486 goto fail; 1487 1488 msgbuf->rx_dataoffset = if_msgbuf->rx_dataoffset; 1489 msgbuf->max_rxbufpost = if_msgbuf->max_rxbufpost; 1490 1491 msgbuf->max_ioctlrespbuf = BRCMF_MSGBUF_MAX_IOCTLRESPBUF_POST; 1492 msgbuf->max_eventbuf = BRCMF_MSGBUF_MAX_EVENTBUF_POST; 1493 1494 msgbuf->tx_pktids = brcmf_msgbuf_init_pktids(NR_TX_PKTIDS, 1495 DMA_TO_DEVICE); 1496 if (!msgbuf->tx_pktids) 1497 goto fail; 1498 msgbuf->rx_pktids = brcmf_msgbuf_init_pktids(NR_RX_PKTIDS, 1499 DMA_FROM_DEVICE); 1500 if (!msgbuf->rx_pktids) 1501 goto fail; 1502 1503 msgbuf->flow = brcmf_flowring_attach(drvr->bus_if->dev, 1504 if_msgbuf->nrof_flowrings); 1505 if (!msgbuf->flow) 1506 goto fail; 1507 1508 1509 brcmf_dbg(MSGBUF, "Feeding buffers, rx data %d, rx event %d, rx ioctl resp %d\n", 1510 msgbuf->max_rxbufpost, msgbuf->max_eventbuf, 1511 msgbuf->max_ioctlrespbuf); 1512 count = 0; 1513 do { 1514 brcmf_msgbuf_rxbuf_data_fill(msgbuf); 1515 if (msgbuf->max_rxbufpost != msgbuf->rxbufpost) 1516 msleep(10); 1517 else 1518 break; 1519 count++; 1520 } while (count < 10); 1521 brcmf_msgbuf_rxbuf_event_post(msgbuf); 1522 brcmf_msgbuf_rxbuf_ioctlresp_post(msgbuf); 1523 1524 INIT_WORK(&msgbuf->flowring_work, brcmf_msgbuf_flowring_worker); 1525 spin_lock_init(&msgbuf->flowring_work_lock); 1526 INIT_LIST_HEAD(&msgbuf->work_queue); 1527 1528 brcmf_debugfs_add_entry(drvr, "msgbuf_stats", brcmf_msgbuf_stats_read); 1529 1530 return 0; 1531 1532 fail: 1533 if (msgbuf) { 1534 kfree(msgbuf->flow_map); 1535 kfree(msgbuf->txstatus_done_map); 1536 brcmf_msgbuf_release_pktids(msgbuf); 1537 kfree(msgbuf->flowring_dma_handle); 1538 if (msgbuf->ioctbuf) 1539 dma_free_coherent(drvr->bus_if->dev, 1540 BRCMF_TX_IOCTL_MAX_MSG_SIZE, 1541 msgbuf->ioctbuf, 1542 msgbuf->ioctbuf_handle); 1543 kfree(msgbuf); 1544 } 1545 return -ENOMEM; 1546 } 1547 1548 1549 void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr) 1550 { 1551 struct brcmf_msgbuf *msgbuf; 1552 struct brcmf_msgbuf_work_item *work; 1553 1554 brcmf_dbg(TRACE, "Enter\n"); 1555 if (drvr->proto->pd) { 1556 msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 1557 cancel_work_sync(&msgbuf->flowring_work); 1558 while (!list_empty(&msgbuf->work_queue)) { 1559 work = list_first_entry(&msgbuf->work_queue, 1560 struct brcmf_msgbuf_work_item, 1561 queue); 1562 list_del(&work->queue); 1563 kfree(work); 1564 } 1565 kfree(msgbuf->flow_map); 1566 kfree(msgbuf->txstatus_done_map); 1567 if (msgbuf->txflow_wq) 1568 destroy_workqueue(msgbuf->txflow_wq); 1569 1570 brcmf_flowring_detach(msgbuf->flow); 1571 dma_free_coherent(drvr->bus_if->dev, 1572 BRCMF_TX_IOCTL_MAX_MSG_SIZE, 1573 msgbuf->ioctbuf, msgbuf->ioctbuf_handle); 1574 brcmf_msgbuf_release_pktids(msgbuf); 1575 kfree(msgbuf->flowring_dma_handle); 1576 kfree(msgbuf); 1577 drvr->proto->pd = NULL; 1578 } 1579 } 1580