1 /* Copyright (c) 2014 Broadcom Corporation 2 * 3 * Permission to use, copy, modify, and/or distribute this software for any 4 * purpose with or without fee is hereby granted, provided that the above 5 * copyright notice and this permission notice appear in all copies. 6 * 7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY 10 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION 12 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN 13 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 */ 15 16 /******************************************************************************* 17 * Communicates with the dongle by using dcmd codes. 18 * For certain dcmd codes, the dongle interprets string data from the host. 19 ******************************************************************************/ 20 21 #include <linux/types.h> 22 #include <linux/netdevice.h> 23 #include <linux/etherdevice.h> 24 25 #include <brcmu_utils.h> 26 #include <brcmu_wifi.h> 27 28 #include "core.h" 29 #include "debug.h" 30 #include "proto.h" 31 #include "msgbuf.h" 32 #include "commonring.h" 33 #include "flowring.h" 34 #include "bus.h" 35 #include "tracepoint.h" 36 37 38 #define MSGBUF_IOCTL_RESP_TIMEOUT msecs_to_jiffies(2000) 39 40 #define MSGBUF_TYPE_GEN_STATUS 0x1 41 #define MSGBUF_TYPE_RING_STATUS 0x2 42 #define MSGBUF_TYPE_FLOW_RING_CREATE 0x3 43 #define MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT 0x4 44 #define MSGBUF_TYPE_FLOW_RING_DELETE 0x5 45 #define MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT 0x6 46 #define MSGBUF_TYPE_FLOW_RING_FLUSH 0x7 47 #define MSGBUF_TYPE_FLOW_RING_FLUSH_CMPLT 0x8 48 #define MSGBUF_TYPE_IOCTLPTR_REQ 0x9 49 #define MSGBUF_TYPE_IOCTLPTR_REQ_ACK 0xA 50 #define MSGBUF_TYPE_IOCTLRESP_BUF_POST 0xB 51 #define MSGBUF_TYPE_IOCTL_CMPLT 0xC 52 #define MSGBUF_TYPE_EVENT_BUF_POST 0xD 53 #define MSGBUF_TYPE_WL_EVENT 0xE 54 #define MSGBUF_TYPE_TX_POST 0xF 55 #define MSGBUF_TYPE_TX_STATUS 0x10 56 #define MSGBUF_TYPE_RXBUF_POST 0x11 57 #define MSGBUF_TYPE_RX_CMPLT 0x12 58 #define MSGBUF_TYPE_LPBK_DMAXFER 0x13 59 #define MSGBUF_TYPE_LPBK_DMAXFER_CMPLT 0x14 60 61 #define NR_TX_PKTIDS 2048 62 #define NR_RX_PKTIDS 1024 63 64 #define BRCMF_IOCTL_REQ_PKTID 0xFFFE 65 66 #define BRCMF_MSGBUF_MAX_PKT_SIZE 2048 67 #define BRCMF_MSGBUF_RXBUFPOST_THRESHOLD 32 68 #define BRCMF_MSGBUF_MAX_IOCTLRESPBUF_POST 8 69 #define BRCMF_MSGBUF_MAX_EVENTBUF_POST 8 70 71 #define BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3 0x01 72 #define BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_11 0x02 73 #define BRCMF_MSGBUF_PKT_FLAGS_FRAME_MASK 0x07 74 #define BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT 5 75 76 #define BRCMF_MSGBUF_TX_FLUSH_CNT1 32 77 #define BRCMF_MSGBUF_TX_FLUSH_CNT2 96 78 79 #define BRCMF_MSGBUF_DELAY_TXWORKER_THRS 96 80 #define BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS 32 81 #define BRCMF_MSGBUF_UPDATE_RX_PTR_THRS 48 82 83 84 struct msgbuf_common_hdr { 85 u8 msgtype; 86 u8 ifidx; 87 u8 flags; 88 u8 rsvd0; 89 __le32 request_id; 90 }; 91 92 struct msgbuf_ioctl_req_hdr { 93 struct msgbuf_common_hdr msg; 94 __le32 cmd; 95 __le16 trans_id; 96 __le16 input_buf_len; 97 __le16 output_buf_len; 98 __le16 rsvd0[3]; 99 struct msgbuf_buf_addr req_buf_addr; 100 __le32 rsvd1[2]; 101 }; 102 103 struct msgbuf_tx_msghdr { 104 struct msgbuf_common_hdr msg; 105 u8 txhdr[ETH_HLEN]; 106 u8 flags; 107 u8 seg_cnt; 108 struct msgbuf_buf_addr metadata_buf_addr; 109 struct msgbuf_buf_addr data_buf_addr; 110 __le16 metadata_buf_len; 111 __le16 data_len; 112 __le32 rsvd0; 113 }; 114 115 struct msgbuf_rx_bufpost { 116 struct msgbuf_common_hdr msg; 117 __le16 metadata_buf_len; 118 __le16 data_buf_len; 119 __le32 rsvd0; 120 struct msgbuf_buf_addr metadata_buf_addr; 121 struct msgbuf_buf_addr data_buf_addr; 122 }; 123 124 struct msgbuf_rx_ioctl_resp_or_event { 125 struct msgbuf_common_hdr msg; 126 __le16 host_buf_len; 127 __le16 rsvd0[3]; 128 struct msgbuf_buf_addr host_buf_addr; 129 __le32 rsvd1[4]; 130 }; 131 132 struct msgbuf_completion_hdr { 133 __le16 status; 134 __le16 flow_ring_id; 135 }; 136 137 struct msgbuf_rx_event { 138 struct msgbuf_common_hdr msg; 139 struct msgbuf_completion_hdr compl_hdr; 140 __le16 event_data_len; 141 __le16 seqnum; 142 __le16 rsvd0[4]; 143 }; 144 145 struct msgbuf_ioctl_resp_hdr { 146 struct msgbuf_common_hdr msg; 147 struct msgbuf_completion_hdr compl_hdr; 148 __le16 resp_len; 149 __le16 trans_id; 150 __le32 cmd; 151 __le32 rsvd0; 152 }; 153 154 struct msgbuf_tx_status { 155 struct msgbuf_common_hdr msg; 156 struct msgbuf_completion_hdr compl_hdr; 157 __le16 metadata_len; 158 __le16 tx_status; 159 }; 160 161 struct msgbuf_rx_complete { 162 struct msgbuf_common_hdr msg; 163 struct msgbuf_completion_hdr compl_hdr; 164 __le16 metadata_len; 165 __le16 data_len; 166 __le16 data_offset; 167 __le16 flags; 168 __le32 rx_status_0; 169 __le32 rx_status_1; 170 __le32 rsvd0; 171 }; 172 173 struct msgbuf_tx_flowring_create_req { 174 struct msgbuf_common_hdr msg; 175 u8 da[ETH_ALEN]; 176 u8 sa[ETH_ALEN]; 177 u8 tid; 178 u8 if_flags; 179 __le16 flow_ring_id; 180 u8 tc; 181 u8 priority; 182 __le16 int_vector; 183 __le16 max_items; 184 __le16 len_item; 185 struct msgbuf_buf_addr flow_ring_addr; 186 }; 187 188 struct msgbuf_tx_flowring_delete_req { 189 struct msgbuf_common_hdr msg; 190 __le16 flow_ring_id; 191 __le16 reason; 192 __le32 rsvd0[7]; 193 }; 194 195 struct msgbuf_flowring_create_resp { 196 struct msgbuf_common_hdr msg; 197 struct msgbuf_completion_hdr compl_hdr; 198 __le32 rsvd0[3]; 199 }; 200 201 struct msgbuf_flowring_delete_resp { 202 struct msgbuf_common_hdr msg; 203 struct msgbuf_completion_hdr compl_hdr; 204 __le32 rsvd0[3]; 205 }; 206 207 struct msgbuf_flowring_flush_resp { 208 struct msgbuf_common_hdr msg; 209 struct msgbuf_completion_hdr compl_hdr; 210 __le32 rsvd0[3]; 211 }; 212 213 struct brcmf_msgbuf_work_item { 214 struct list_head queue; 215 u32 flowid; 216 int ifidx; 217 u8 sa[ETH_ALEN]; 218 u8 da[ETH_ALEN]; 219 }; 220 221 struct brcmf_msgbuf { 222 struct brcmf_pub *drvr; 223 224 struct brcmf_commonring **commonrings; 225 struct brcmf_commonring **flowrings; 226 dma_addr_t *flowring_dma_handle; 227 228 u16 max_flowrings; 229 u16 max_submissionrings; 230 u16 max_completionrings; 231 232 u16 rx_dataoffset; 233 u32 max_rxbufpost; 234 u16 rx_metadata_offset; 235 u32 rxbufpost; 236 237 u32 max_ioctlrespbuf; 238 u32 cur_ioctlrespbuf; 239 u32 max_eventbuf; 240 u32 cur_eventbuf; 241 242 void *ioctbuf; 243 dma_addr_t ioctbuf_handle; 244 u32 ioctbuf_phys_hi; 245 u32 ioctbuf_phys_lo; 246 int ioctl_resp_status; 247 u32 ioctl_resp_ret_len; 248 u32 ioctl_resp_pktid; 249 250 u16 data_seq_no; 251 u16 ioctl_seq_no; 252 u32 reqid; 253 wait_queue_head_t ioctl_resp_wait; 254 bool ctl_completed; 255 256 struct brcmf_msgbuf_pktids *tx_pktids; 257 struct brcmf_msgbuf_pktids *rx_pktids; 258 struct brcmf_flowring *flow; 259 260 struct workqueue_struct *txflow_wq; 261 struct work_struct txflow_work; 262 unsigned long *flow_map; 263 unsigned long *txstatus_done_map; 264 265 struct work_struct flowring_work; 266 spinlock_t flowring_work_lock; 267 struct list_head work_queue; 268 }; 269 270 struct brcmf_msgbuf_pktid { 271 atomic_t allocated; 272 u16 data_offset; 273 struct sk_buff *skb; 274 dma_addr_t physaddr; 275 }; 276 277 struct brcmf_msgbuf_pktids { 278 u32 array_size; 279 u32 last_allocated_idx; 280 enum dma_data_direction direction; 281 struct brcmf_msgbuf_pktid *array; 282 }; 283 284 static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf); 285 286 287 static struct brcmf_msgbuf_pktids * 288 brcmf_msgbuf_init_pktids(u32 nr_array_entries, 289 enum dma_data_direction direction) 290 { 291 struct brcmf_msgbuf_pktid *array; 292 struct brcmf_msgbuf_pktids *pktids; 293 294 array = kcalloc(nr_array_entries, sizeof(*array), GFP_KERNEL); 295 if (!array) 296 return NULL; 297 298 pktids = kzalloc(sizeof(*pktids), GFP_KERNEL); 299 if (!pktids) { 300 kfree(array); 301 return NULL; 302 } 303 pktids->array = array; 304 pktids->array_size = nr_array_entries; 305 306 return pktids; 307 } 308 309 310 static int 311 brcmf_msgbuf_alloc_pktid(struct device *dev, 312 struct brcmf_msgbuf_pktids *pktids, 313 struct sk_buff *skb, u16 data_offset, 314 dma_addr_t *physaddr, u32 *idx) 315 { 316 struct brcmf_msgbuf_pktid *array; 317 u32 count; 318 319 array = pktids->array; 320 321 *physaddr = dma_map_single(dev, skb->data + data_offset, 322 skb->len - data_offset, pktids->direction); 323 324 if (dma_mapping_error(dev, *physaddr)) { 325 brcmf_err("dma_map_single failed !!\n"); 326 return -ENOMEM; 327 } 328 329 *idx = pktids->last_allocated_idx; 330 331 count = 0; 332 do { 333 (*idx)++; 334 if (*idx == pktids->array_size) 335 *idx = 0; 336 if (array[*idx].allocated.counter == 0) 337 if (atomic_cmpxchg(&array[*idx].allocated, 0, 1) == 0) 338 break; 339 count++; 340 } while (count < pktids->array_size); 341 342 if (count == pktids->array_size) 343 return -ENOMEM; 344 345 array[*idx].data_offset = data_offset; 346 array[*idx].physaddr = *physaddr; 347 array[*idx].skb = skb; 348 349 pktids->last_allocated_idx = *idx; 350 351 return 0; 352 } 353 354 355 static struct sk_buff * 356 brcmf_msgbuf_get_pktid(struct device *dev, struct brcmf_msgbuf_pktids *pktids, 357 u32 idx) 358 { 359 struct brcmf_msgbuf_pktid *pktid; 360 struct sk_buff *skb; 361 362 if (idx >= pktids->array_size) { 363 brcmf_err("Invalid packet id %d (max %d)\n", idx, 364 pktids->array_size); 365 return NULL; 366 } 367 if (pktids->array[idx].allocated.counter) { 368 pktid = &pktids->array[idx]; 369 dma_unmap_single(dev, pktid->physaddr, 370 pktid->skb->len - pktid->data_offset, 371 pktids->direction); 372 skb = pktid->skb; 373 pktid->allocated.counter = 0; 374 return skb; 375 } else { 376 brcmf_err("Invalid packet id %d (not in use)\n", idx); 377 } 378 379 return NULL; 380 } 381 382 383 static void 384 brcmf_msgbuf_release_array(struct device *dev, 385 struct brcmf_msgbuf_pktids *pktids) 386 { 387 struct brcmf_msgbuf_pktid *array; 388 struct brcmf_msgbuf_pktid *pktid; 389 u32 count; 390 391 array = pktids->array; 392 count = 0; 393 do { 394 if (array[count].allocated.counter) { 395 pktid = &array[count]; 396 dma_unmap_single(dev, pktid->physaddr, 397 pktid->skb->len - pktid->data_offset, 398 pktids->direction); 399 brcmu_pkt_buf_free_skb(pktid->skb); 400 } 401 count++; 402 } while (count < pktids->array_size); 403 404 kfree(array); 405 kfree(pktids); 406 } 407 408 409 static void brcmf_msgbuf_release_pktids(struct brcmf_msgbuf *msgbuf) 410 { 411 if (msgbuf->rx_pktids) 412 brcmf_msgbuf_release_array(msgbuf->drvr->bus_if->dev, 413 msgbuf->rx_pktids); 414 if (msgbuf->tx_pktids) 415 brcmf_msgbuf_release_array(msgbuf->drvr->bus_if->dev, 416 msgbuf->tx_pktids); 417 } 418 419 420 static int brcmf_msgbuf_tx_ioctl(struct brcmf_pub *drvr, int ifidx, 421 uint cmd, void *buf, uint len) 422 { 423 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 424 struct brcmf_commonring *commonring; 425 struct msgbuf_ioctl_req_hdr *request; 426 u16 buf_len; 427 void *ret_ptr; 428 int err; 429 430 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT]; 431 brcmf_commonring_lock(commonring); 432 ret_ptr = brcmf_commonring_reserve_for_write(commonring); 433 if (!ret_ptr) { 434 brcmf_err("Failed to reserve space in commonring\n"); 435 brcmf_commonring_unlock(commonring); 436 return -ENOMEM; 437 } 438 439 msgbuf->reqid++; 440 441 request = (struct msgbuf_ioctl_req_hdr *)ret_ptr; 442 request->msg.msgtype = MSGBUF_TYPE_IOCTLPTR_REQ; 443 request->msg.ifidx = (u8)ifidx; 444 request->msg.flags = 0; 445 request->msg.request_id = cpu_to_le32(BRCMF_IOCTL_REQ_PKTID); 446 request->cmd = cpu_to_le32(cmd); 447 request->output_buf_len = cpu_to_le16(len); 448 request->trans_id = cpu_to_le16(msgbuf->reqid); 449 450 buf_len = min_t(u16, len, BRCMF_TX_IOCTL_MAX_MSG_SIZE); 451 request->input_buf_len = cpu_to_le16(buf_len); 452 request->req_buf_addr.high_addr = cpu_to_le32(msgbuf->ioctbuf_phys_hi); 453 request->req_buf_addr.low_addr = cpu_to_le32(msgbuf->ioctbuf_phys_lo); 454 if (buf) 455 memcpy(msgbuf->ioctbuf, buf, buf_len); 456 else 457 memset(msgbuf->ioctbuf, 0, buf_len); 458 459 err = brcmf_commonring_write_complete(commonring); 460 brcmf_commonring_unlock(commonring); 461 462 return err; 463 } 464 465 466 static int brcmf_msgbuf_ioctl_resp_wait(struct brcmf_msgbuf *msgbuf) 467 { 468 return wait_event_timeout(msgbuf->ioctl_resp_wait, 469 msgbuf->ctl_completed, 470 MSGBUF_IOCTL_RESP_TIMEOUT); 471 } 472 473 474 static void brcmf_msgbuf_ioctl_resp_wake(struct brcmf_msgbuf *msgbuf) 475 { 476 msgbuf->ctl_completed = true; 477 wake_up(&msgbuf->ioctl_resp_wait); 478 } 479 480 481 static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx, 482 uint cmd, void *buf, uint len, int *fwerr) 483 { 484 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 485 struct sk_buff *skb = NULL; 486 int timeout; 487 int err; 488 489 brcmf_dbg(MSGBUF, "ifidx=%d, cmd=%d, len=%d\n", ifidx, cmd, len); 490 *fwerr = 0; 491 msgbuf->ctl_completed = false; 492 err = brcmf_msgbuf_tx_ioctl(drvr, ifidx, cmd, buf, len); 493 if (err) 494 return err; 495 496 timeout = brcmf_msgbuf_ioctl_resp_wait(msgbuf); 497 if (!timeout) { 498 brcmf_err("Timeout on response for query command\n"); 499 return -EIO; 500 } 501 502 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, 503 msgbuf->rx_pktids, 504 msgbuf->ioctl_resp_pktid); 505 if (msgbuf->ioctl_resp_ret_len != 0) { 506 if (!skb) 507 return -EBADF; 508 509 memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ? 510 len : msgbuf->ioctl_resp_ret_len); 511 } 512 brcmu_pkt_buf_free_skb(skb); 513 514 *fwerr = msgbuf->ioctl_resp_status; 515 return 0; 516 } 517 518 519 static int brcmf_msgbuf_set_dcmd(struct brcmf_pub *drvr, int ifidx, 520 uint cmd, void *buf, uint len, int *fwerr) 521 { 522 return brcmf_msgbuf_query_dcmd(drvr, ifidx, cmd, buf, len, fwerr); 523 } 524 525 526 static int brcmf_msgbuf_hdrpull(struct brcmf_pub *drvr, bool do_fws, 527 struct sk_buff *skb, struct brcmf_if **ifp) 528 { 529 return -ENODEV; 530 } 531 532 static void brcmf_msgbuf_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb) 533 { 534 } 535 536 static void 537 brcmf_msgbuf_remove_flowring(struct brcmf_msgbuf *msgbuf, u16 flowid) 538 { 539 u32 dma_sz; 540 void *dma_buf; 541 542 brcmf_dbg(MSGBUF, "Removing flowring %d\n", flowid); 543 544 dma_sz = BRCMF_H2D_TXFLOWRING_MAX_ITEM * BRCMF_H2D_TXFLOWRING_ITEMSIZE; 545 dma_buf = msgbuf->flowrings[flowid]->buf_addr; 546 dma_free_coherent(msgbuf->drvr->bus_if->dev, dma_sz, dma_buf, 547 msgbuf->flowring_dma_handle[flowid]); 548 549 brcmf_flowring_delete(msgbuf->flow, flowid); 550 } 551 552 553 static struct brcmf_msgbuf_work_item * 554 brcmf_msgbuf_dequeue_work(struct brcmf_msgbuf *msgbuf) 555 { 556 struct brcmf_msgbuf_work_item *work = NULL; 557 ulong flags; 558 559 spin_lock_irqsave(&msgbuf->flowring_work_lock, flags); 560 if (!list_empty(&msgbuf->work_queue)) { 561 work = list_first_entry(&msgbuf->work_queue, 562 struct brcmf_msgbuf_work_item, queue); 563 list_del(&work->queue); 564 } 565 spin_unlock_irqrestore(&msgbuf->flowring_work_lock, flags); 566 567 return work; 568 } 569 570 571 static u32 572 brcmf_msgbuf_flowring_create_worker(struct brcmf_msgbuf *msgbuf, 573 struct brcmf_msgbuf_work_item *work) 574 { 575 struct msgbuf_tx_flowring_create_req *create; 576 struct brcmf_commonring *commonring; 577 void *ret_ptr; 578 u32 flowid; 579 void *dma_buf; 580 u32 dma_sz; 581 u64 address; 582 int err; 583 584 flowid = work->flowid; 585 dma_sz = BRCMF_H2D_TXFLOWRING_MAX_ITEM * BRCMF_H2D_TXFLOWRING_ITEMSIZE; 586 dma_buf = dma_alloc_coherent(msgbuf->drvr->bus_if->dev, dma_sz, 587 &msgbuf->flowring_dma_handle[flowid], 588 GFP_KERNEL); 589 if (!dma_buf) { 590 brcmf_err("dma_alloc_coherent failed\n"); 591 brcmf_flowring_delete(msgbuf->flow, flowid); 592 return BRCMF_FLOWRING_INVALID_ID; 593 } 594 595 brcmf_commonring_config(msgbuf->flowrings[flowid], 596 BRCMF_H2D_TXFLOWRING_MAX_ITEM, 597 BRCMF_H2D_TXFLOWRING_ITEMSIZE, dma_buf); 598 599 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT]; 600 brcmf_commonring_lock(commonring); 601 ret_ptr = brcmf_commonring_reserve_for_write(commonring); 602 if (!ret_ptr) { 603 brcmf_err("Failed to reserve space in commonring\n"); 604 brcmf_commonring_unlock(commonring); 605 brcmf_msgbuf_remove_flowring(msgbuf, flowid); 606 return BRCMF_FLOWRING_INVALID_ID; 607 } 608 609 create = (struct msgbuf_tx_flowring_create_req *)ret_ptr; 610 create->msg.msgtype = MSGBUF_TYPE_FLOW_RING_CREATE; 611 create->msg.ifidx = work->ifidx; 612 create->msg.request_id = 0; 613 create->tid = brcmf_flowring_tid(msgbuf->flow, flowid); 614 create->flow_ring_id = cpu_to_le16(flowid + 615 BRCMF_H2D_MSGRING_FLOWRING_IDSTART); 616 memcpy(create->sa, work->sa, ETH_ALEN); 617 memcpy(create->da, work->da, ETH_ALEN); 618 address = (u64)msgbuf->flowring_dma_handle[flowid]; 619 create->flow_ring_addr.high_addr = cpu_to_le32(address >> 32); 620 create->flow_ring_addr.low_addr = cpu_to_le32(address & 0xffffffff); 621 create->max_items = cpu_to_le16(BRCMF_H2D_TXFLOWRING_MAX_ITEM); 622 create->len_item = cpu_to_le16(BRCMF_H2D_TXFLOWRING_ITEMSIZE); 623 624 brcmf_dbg(MSGBUF, "Send Flow Create Req flow ID %d for peer %pM prio %d ifindex %d\n", 625 flowid, work->da, create->tid, work->ifidx); 626 627 err = brcmf_commonring_write_complete(commonring); 628 brcmf_commonring_unlock(commonring); 629 if (err) { 630 brcmf_err("Failed to write commonring\n"); 631 brcmf_msgbuf_remove_flowring(msgbuf, flowid); 632 return BRCMF_FLOWRING_INVALID_ID; 633 } 634 635 return flowid; 636 } 637 638 639 static void brcmf_msgbuf_flowring_worker(struct work_struct *work) 640 { 641 struct brcmf_msgbuf *msgbuf; 642 struct brcmf_msgbuf_work_item *create; 643 644 msgbuf = container_of(work, struct brcmf_msgbuf, flowring_work); 645 646 while ((create = brcmf_msgbuf_dequeue_work(msgbuf))) { 647 brcmf_msgbuf_flowring_create_worker(msgbuf, create); 648 kfree(create); 649 } 650 } 651 652 653 static u32 brcmf_msgbuf_flowring_create(struct brcmf_msgbuf *msgbuf, int ifidx, 654 struct sk_buff *skb) 655 { 656 struct brcmf_msgbuf_work_item *create; 657 struct ethhdr *eh = (struct ethhdr *)(skb->data); 658 u32 flowid; 659 ulong flags; 660 661 create = kzalloc(sizeof(*create), GFP_ATOMIC); 662 if (create == NULL) 663 return BRCMF_FLOWRING_INVALID_ID; 664 665 flowid = brcmf_flowring_create(msgbuf->flow, eh->h_dest, 666 skb->priority, ifidx); 667 if (flowid == BRCMF_FLOWRING_INVALID_ID) { 668 kfree(create); 669 return flowid; 670 } 671 672 create->flowid = flowid; 673 create->ifidx = ifidx; 674 memcpy(create->sa, eh->h_source, ETH_ALEN); 675 memcpy(create->da, eh->h_dest, ETH_ALEN); 676 677 spin_lock_irqsave(&msgbuf->flowring_work_lock, flags); 678 list_add_tail(&create->queue, &msgbuf->work_queue); 679 spin_unlock_irqrestore(&msgbuf->flowring_work_lock, flags); 680 schedule_work(&msgbuf->flowring_work); 681 682 return flowid; 683 } 684 685 686 static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u16 flowid) 687 { 688 struct brcmf_flowring *flow = msgbuf->flow; 689 struct brcmf_commonring *commonring; 690 void *ret_ptr; 691 u32 count; 692 struct sk_buff *skb; 693 dma_addr_t physaddr; 694 u32 pktid; 695 struct msgbuf_tx_msghdr *tx_msghdr; 696 u64 address; 697 698 commonring = msgbuf->flowrings[flowid]; 699 if (!brcmf_commonring_write_available(commonring)) 700 return; 701 702 brcmf_commonring_lock(commonring); 703 704 count = BRCMF_MSGBUF_TX_FLUSH_CNT2 - BRCMF_MSGBUF_TX_FLUSH_CNT1; 705 while (brcmf_flowring_qlen(flow, flowid)) { 706 skb = brcmf_flowring_dequeue(flow, flowid); 707 if (skb == NULL) { 708 brcmf_err("No SKB, but qlen %d\n", 709 brcmf_flowring_qlen(flow, flowid)); 710 break; 711 } 712 skb_orphan(skb); 713 if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev, 714 msgbuf->tx_pktids, skb, ETH_HLEN, 715 &physaddr, &pktid)) { 716 brcmf_flowring_reinsert(flow, flowid, skb); 717 brcmf_err("No PKTID available !!\n"); 718 break; 719 } 720 ret_ptr = brcmf_commonring_reserve_for_write(commonring); 721 if (!ret_ptr) { 722 brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, 723 msgbuf->tx_pktids, pktid); 724 brcmf_flowring_reinsert(flow, flowid, skb); 725 break; 726 } 727 count++; 728 729 tx_msghdr = (struct msgbuf_tx_msghdr *)ret_ptr; 730 731 tx_msghdr->msg.msgtype = MSGBUF_TYPE_TX_POST; 732 tx_msghdr->msg.request_id = cpu_to_le32(pktid); 733 tx_msghdr->msg.ifidx = brcmf_flowring_ifidx_get(flow, flowid); 734 tx_msghdr->flags = BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3; 735 tx_msghdr->flags |= (skb->priority & 0x07) << 736 BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT; 737 tx_msghdr->seg_cnt = 1; 738 memcpy(tx_msghdr->txhdr, skb->data, ETH_HLEN); 739 tx_msghdr->data_len = cpu_to_le16(skb->len - ETH_HLEN); 740 address = (u64)physaddr; 741 tx_msghdr->data_buf_addr.high_addr = cpu_to_le32(address >> 32); 742 tx_msghdr->data_buf_addr.low_addr = 743 cpu_to_le32(address & 0xffffffff); 744 tx_msghdr->metadata_buf_len = 0; 745 tx_msghdr->metadata_buf_addr.high_addr = 0; 746 tx_msghdr->metadata_buf_addr.low_addr = 0; 747 atomic_inc(&commonring->outstanding_tx); 748 if (count >= BRCMF_MSGBUF_TX_FLUSH_CNT2) { 749 brcmf_commonring_write_complete(commonring); 750 count = 0; 751 } 752 } 753 if (count) 754 brcmf_commonring_write_complete(commonring); 755 brcmf_commonring_unlock(commonring); 756 } 757 758 759 static void brcmf_msgbuf_txflow_worker(struct work_struct *worker) 760 { 761 struct brcmf_msgbuf *msgbuf; 762 u32 flowid; 763 764 msgbuf = container_of(worker, struct brcmf_msgbuf, txflow_work); 765 for_each_set_bit(flowid, msgbuf->flow_map, msgbuf->max_flowrings) { 766 clear_bit(flowid, msgbuf->flow_map); 767 brcmf_msgbuf_txflow(msgbuf, flowid); 768 } 769 } 770 771 772 static int brcmf_msgbuf_schedule_txdata(struct brcmf_msgbuf *msgbuf, u32 flowid, 773 bool force) 774 { 775 struct brcmf_commonring *commonring; 776 777 set_bit(flowid, msgbuf->flow_map); 778 commonring = msgbuf->flowrings[flowid]; 779 if ((force) || (atomic_read(&commonring->outstanding_tx) < 780 BRCMF_MSGBUF_DELAY_TXWORKER_THRS)) 781 queue_work(msgbuf->txflow_wq, &msgbuf->txflow_work); 782 783 return 0; 784 } 785 786 787 static int brcmf_msgbuf_tx_queue_data(struct brcmf_pub *drvr, int ifidx, 788 struct sk_buff *skb) 789 { 790 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 791 struct brcmf_flowring *flow = msgbuf->flow; 792 struct ethhdr *eh = (struct ethhdr *)(skb->data); 793 u32 flowid; 794 u32 queue_count; 795 bool force; 796 797 flowid = brcmf_flowring_lookup(flow, eh->h_dest, skb->priority, ifidx); 798 if (flowid == BRCMF_FLOWRING_INVALID_ID) { 799 flowid = brcmf_msgbuf_flowring_create(msgbuf, ifidx, skb); 800 if (flowid == BRCMF_FLOWRING_INVALID_ID) 801 return -ENOMEM; 802 } 803 queue_count = brcmf_flowring_enqueue(flow, flowid, skb); 804 force = ((queue_count % BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) == 0); 805 brcmf_msgbuf_schedule_txdata(msgbuf, flowid, force); 806 807 return 0; 808 } 809 810 811 static void 812 brcmf_msgbuf_configure_addr_mode(struct brcmf_pub *drvr, int ifidx, 813 enum proto_addr_mode addr_mode) 814 { 815 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 816 817 brcmf_flowring_configure_addr_mode(msgbuf->flow, ifidx, addr_mode); 818 } 819 820 821 static void 822 brcmf_msgbuf_delete_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN]) 823 { 824 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 825 826 brcmf_flowring_delete_peer(msgbuf->flow, ifidx, peer); 827 } 828 829 830 static void 831 brcmf_msgbuf_add_tdls_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN]) 832 { 833 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 834 835 brcmf_flowring_add_tdls_peer(msgbuf->flow, ifidx, peer); 836 } 837 838 839 static void 840 brcmf_msgbuf_process_ioctl_complete(struct brcmf_msgbuf *msgbuf, void *buf) 841 { 842 struct msgbuf_ioctl_resp_hdr *ioctl_resp; 843 844 ioctl_resp = (struct msgbuf_ioctl_resp_hdr *)buf; 845 846 msgbuf->ioctl_resp_status = 847 (s16)le16_to_cpu(ioctl_resp->compl_hdr.status); 848 msgbuf->ioctl_resp_ret_len = le16_to_cpu(ioctl_resp->resp_len); 849 msgbuf->ioctl_resp_pktid = le32_to_cpu(ioctl_resp->msg.request_id); 850 851 brcmf_msgbuf_ioctl_resp_wake(msgbuf); 852 853 if (msgbuf->cur_ioctlrespbuf) 854 msgbuf->cur_ioctlrespbuf--; 855 brcmf_msgbuf_rxbuf_ioctlresp_post(msgbuf); 856 } 857 858 859 static void 860 brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf) 861 { 862 struct brcmf_commonring *commonring; 863 struct msgbuf_tx_status *tx_status; 864 u32 idx; 865 struct sk_buff *skb; 866 u16 flowid; 867 868 tx_status = (struct msgbuf_tx_status *)buf; 869 idx = le32_to_cpu(tx_status->msg.request_id); 870 flowid = le16_to_cpu(tx_status->compl_hdr.flow_ring_id); 871 flowid -= BRCMF_H2D_MSGRING_FLOWRING_IDSTART; 872 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, 873 msgbuf->tx_pktids, idx); 874 if (!skb) 875 return; 876 877 set_bit(flowid, msgbuf->txstatus_done_map); 878 commonring = msgbuf->flowrings[flowid]; 879 atomic_dec(&commonring->outstanding_tx); 880 881 brcmf_txfinalize(brcmf_get_ifp(msgbuf->drvr, tx_status->msg.ifidx), 882 skb, true); 883 } 884 885 886 static u32 brcmf_msgbuf_rxbuf_data_post(struct brcmf_msgbuf *msgbuf, u32 count) 887 { 888 struct brcmf_commonring *commonring; 889 void *ret_ptr; 890 struct sk_buff *skb; 891 u16 alloced; 892 u32 pktlen; 893 dma_addr_t physaddr; 894 struct msgbuf_rx_bufpost *rx_bufpost; 895 u64 address; 896 u32 pktid; 897 u32 i; 898 899 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_RXPOST_SUBMIT]; 900 ret_ptr = brcmf_commonring_reserve_for_write_multiple(commonring, 901 count, 902 &alloced); 903 if (!ret_ptr) { 904 brcmf_dbg(MSGBUF, "Failed to reserve space in commonring\n"); 905 return 0; 906 } 907 908 for (i = 0; i < alloced; i++) { 909 rx_bufpost = (struct msgbuf_rx_bufpost *)ret_ptr; 910 memset(rx_bufpost, 0, sizeof(*rx_bufpost)); 911 912 skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_PKT_SIZE); 913 914 if (skb == NULL) { 915 brcmf_err("Failed to alloc SKB\n"); 916 brcmf_commonring_write_cancel(commonring, alloced - i); 917 break; 918 } 919 920 pktlen = skb->len; 921 if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev, 922 msgbuf->rx_pktids, skb, 0, 923 &physaddr, &pktid)) { 924 dev_kfree_skb_any(skb); 925 brcmf_err("No PKTID available !!\n"); 926 brcmf_commonring_write_cancel(commonring, alloced - i); 927 break; 928 } 929 930 if (msgbuf->rx_metadata_offset) { 931 address = (u64)physaddr; 932 rx_bufpost->metadata_buf_len = 933 cpu_to_le16(msgbuf->rx_metadata_offset); 934 rx_bufpost->metadata_buf_addr.high_addr = 935 cpu_to_le32(address >> 32); 936 rx_bufpost->metadata_buf_addr.low_addr = 937 cpu_to_le32(address & 0xffffffff); 938 939 skb_pull(skb, msgbuf->rx_metadata_offset); 940 pktlen = skb->len; 941 physaddr += msgbuf->rx_metadata_offset; 942 } 943 rx_bufpost->msg.msgtype = MSGBUF_TYPE_RXBUF_POST; 944 rx_bufpost->msg.request_id = cpu_to_le32(pktid); 945 946 address = (u64)physaddr; 947 rx_bufpost->data_buf_len = cpu_to_le16((u16)pktlen); 948 rx_bufpost->data_buf_addr.high_addr = 949 cpu_to_le32(address >> 32); 950 rx_bufpost->data_buf_addr.low_addr = 951 cpu_to_le32(address & 0xffffffff); 952 953 ret_ptr += brcmf_commonring_len_item(commonring); 954 } 955 956 if (i) 957 brcmf_commonring_write_complete(commonring); 958 959 return i; 960 } 961 962 963 static void 964 brcmf_msgbuf_rxbuf_data_fill(struct brcmf_msgbuf *msgbuf) 965 { 966 u32 fillbufs; 967 u32 retcount; 968 969 fillbufs = msgbuf->max_rxbufpost - msgbuf->rxbufpost; 970 971 while (fillbufs) { 972 retcount = brcmf_msgbuf_rxbuf_data_post(msgbuf, fillbufs); 973 if (!retcount) 974 break; 975 msgbuf->rxbufpost += retcount; 976 fillbufs -= retcount; 977 } 978 } 979 980 981 static void 982 brcmf_msgbuf_update_rxbufpost_count(struct brcmf_msgbuf *msgbuf, u16 rxcnt) 983 { 984 msgbuf->rxbufpost -= rxcnt; 985 if (msgbuf->rxbufpost <= (msgbuf->max_rxbufpost - 986 BRCMF_MSGBUF_RXBUFPOST_THRESHOLD)) 987 brcmf_msgbuf_rxbuf_data_fill(msgbuf); 988 } 989 990 991 static u32 992 brcmf_msgbuf_rxbuf_ctrl_post(struct brcmf_msgbuf *msgbuf, bool event_buf, 993 u32 count) 994 { 995 struct brcmf_commonring *commonring; 996 void *ret_ptr; 997 struct sk_buff *skb; 998 u16 alloced; 999 u32 pktlen; 1000 dma_addr_t physaddr; 1001 struct msgbuf_rx_ioctl_resp_or_event *rx_bufpost; 1002 u64 address; 1003 u32 pktid; 1004 u32 i; 1005 1006 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT]; 1007 brcmf_commonring_lock(commonring); 1008 ret_ptr = brcmf_commonring_reserve_for_write_multiple(commonring, 1009 count, 1010 &alloced); 1011 if (!ret_ptr) { 1012 brcmf_err("Failed to reserve space in commonring\n"); 1013 brcmf_commonring_unlock(commonring); 1014 return 0; 1015 } 1016 1017 for (i = 0; i < alloced; i++) { 1018 rx_bufpost = (struct msgbuf_rx_ioctl_resp_or_event *)ret_ptr; 1019 memset(rx_bufpost, 0, sizeof(*rx_bufpost)); 1020 1021 skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_PKT_SIZE); 1022 1023 if (skb == NULL) { 1024 brcmf_err("Failed to alloc SKB\n"); 1025 brcmf_commonring_write_cancel(commonring, alloced - i); 1026 break; 1027 } 1028 1029 pktlen = skb->len; 1030 if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev, 1031 msgbuf->rx_pktids, skb, 0, 1032 &physaddr, &pktid)) { 1033 dev_kfree_skb_any(skb); 1034 brcmf_err("No PKTID available !!\n"); 1035 brcmf_commonring_write_cancel(commonring, alloced - i); 1036 break; 1037 } 1038 if (event_buf) 1039 rx_bufpost->msg.msgtype = MSGBUF_TYPE_EVENT_BUF_POST; 1040 else 1041 rx_bufpost->msg.msgtype = 1042 MSGBUF_TYPE_IOCTLRESP_BUF_POST; 1043 rx_bufpost->msg.request_id = cpu_to_le32(pktid); 1044 1045 address = (u64)physaddr; 1046 rx_bufpost->host_buf_len = cpu_to_le16((u16)pktlen); 1047 rx_bufpost->host_buf_addr.high_addr = 1048 cpu_to_le32(address >> 32); 1049 rx_bufpost->host_buf_addr.low_addr = 1050 cpu_to_le32(address & 0xffffffff); 1051 1052 ret_ptr += brcmf_commonring_len_item(commonring); 1053 } 1054 1055 if (i) 1056 brcmf_commonring_write_complete(commonring); 1057 1058 brcmf_commonring_unlock(commonring); 1059 1060 return i; 1061 } 1062 1063 1064 static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf) 1065 { 1066 u32 count; 1067 1068 count = msgbuf->max_ioctlrespbuf - msgbuf->cur_ioctlrespbuf; 1069 count = brcmf_msgbuf_rxbuf_ctrl_post(msgbuf, false, count); 1070 msgbuf->cur_ioctlrespbuf += count; 1071 } 1072 1073 1074 static void brcmf_msgbuf_rxbuf_event_post(struct brcmf_msgbuf *msgbuf) 1075 { 1076 u32 count; 1077 1078 count = msgbuf->max_eventbuf - msgbuf->cur_eventbuf; 1079 count = brcmf_msgbuf_rxbuf_ctrl_post(msgbuf, true, count); 1080 msgbuf->cur_eventbuf += count; 1081 } 1082 1083 1084 static void brcmf_msgbuf_process_event(struct brcmf_msgbuf *msgbuf, void *buf) 1085 { 1086 struct msgbuf_rx_event *event; 1087 u32 idx; 1088 u16 buflen; 1089 struct sk_buff *skb; 1090 struct brcmf_if *ifp; 1091 1092 event = (struct msgbuf_rx_event *)buf; 1093 idx = le32_to_cpu(event->msg.request_id); 1094 buflen = le16_to_cpu(event->event_data_len); 1095 1096 if (msgbuf->cur_eventbuf) 1097 msgbuf->cur_eventbuf--; 1098 brcmf_msgbuf_rxbuf_event_post(msgbuf); 1099 1100 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, 1101 msgbuf->rx_pktids, idx); 1102 if (!skb) 1103 return; 1104 1105 if (msgbuf->rx_dataoffset) 1106 skb_pull(skb, msgbuf->rx_dataoffset); 1107 1108 skb_trim(skb, buflen); 1109 1110 ifp = brcmf_get_ifp(msgbuf->drvr, event->msg.ifidx); 1111 if (!ifp || !ifp->ndev) { 1112 brcmf_err("Received pkt for invalid ifidx %d\n", 1113 event->msg.ifidx); 1114 goto exit; 1115 } 1116 1117 skb->protocol = eth_type_trans(skb, ifp->ndev); 1118 1119 brcmf_fweh_process_skb(ifp->drvr, skb); 1120 1121 exit: 1122 brcmu_pkt_buf_free_skb(skb); 1123 } 1124 1125 1126 static void 1127 brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf) 1128 { 1129 struct msgbuf_rx_complete *rx_complete; 1130 struct sk_buff *skb; 1131 u16 data_offset; 1132 u16 buflen; 1133 u16 flags; 1134 u32 idx; 1135 struct brcmf_if *ifp; 1136 1137 brcmf_msgbuf_update_rxbufpost_count(msgbuf, 1); 1138 1139 rx_complete = (struct msgbuf_rx_complete *)buf; 1140 data_offset = le16_to_cpu(rx_complete->data_offset); 1141 buflen = le16_to_cpu(rx_complete->data_len); 1142 idx = le32_to_cpu(rx_complete->msg.request_id); 1143 flags = le16_to_cpu(rx_complete->flags); 1144 1145 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, 1146 msgbuf->rx_pktids, idx); 1147 if (!skb) 1148 return; 1149 1150 if (data_offset) 1151 skb_pull(skb, data_offset); 1152 else if (msgbuf->rx_dataoffset) 1153 skb_pull(skb, msgbuf->rx_dataoffset); 1154 1155 skb_trim(skb, buflen); 1156 1157 if ((flags & BRCMF_MSGBUF_PKT_FLAGS_FRAME_MASK) == 1158 BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_11) { 1159 ifp = msgbuf->drvr->mon_if; 1160 1161 if (!ifp) { 1162 brcmf_err("Received unexpected monitor pkt\n"); 1163 brcmu_pkt_buf_free_skb(skb); 1164 return; 1165 } 1166 1167 brcmf_netif_mon_rx(ifp, skb); 1168 return; 1169 } 1170 1171 ifp = brcmf_get_ifp(msgbuf->drvr, rx_complete->msg.ifidx); 1172 if (!ifp || !ifp->ndev) { 1173 brcmf_err("Received pkt for invalid ifidx %d\n", 1174 rx_complete->msg.ifidx); 1175 brcmu_pkt_buf_free_skb(skb); 1176 return; 1177 } 1178 1179 skb->protocol = eth_type_trans(skb, ifp->ndev); 1180 brcmf_netif_rx(ifp, skb); 1181 } 1182 1183 1184 static void 1185 brcmf_msgbuf_process_flow_ring_create_response(struct brcmf_msgbuf *msgbuf, 1186 void *buf) 1187 { 1188 struct msgbuf_flowring_create_resp *flowring_create_resp; 1189 u16 status; 1190 u16 flowid; 1191 1192 flowring_create_resp = (struct msgbuf_flowring_create_resp *)buf; 1193 1194 flowid = le16_to_cpu(flowring_create_resp->compl_hdr.flow_ring_id); 1195 flowid -= BRCMF_H2D_MSGRING_FLOWRING_IDSTART; 1196 status = le16_to_cpu(flowring_create_resp->compl_hdr.status); 1197 1198 if (status) { 1199 brcmf_err("Flowring creation failed, code %d\n", status); 1200 brcmf_msgbuf_remove_flowring(msgbuf, flowid); 1201 return; 1202 } 1203 brcmf_dbg(MSGBUF, "Flowring %d Create response status %d\n", flowid, 1204 status); 1205 1206 brcmf_flowring_open(msgbuf->flow, flowid); 1207 1208 brcmf_msgbuf_schedule_txdata(msgbuf, flowid, true); 1209 } 1210 1211 1212 static void 1213 brcmf_msgbuf_process_flow_ring_delete_response(struct brcmf_msgbuf *msgbuf, 1214 void *buf) 1215 { 1216 struct msgbuf_flowring_delete_resp *flowring_delete_resp; 1217 u16 status; 1218 u16 flowid; 1219 1220 flowring_delete_resp = (struct msgbuf_flowring_delete_resp *)buf; 1221 1222 flowid = le16_to_cpu(flowring_delete_resp->compl_hdr.flow_ring_id); 1223 flowid -= BRCMF_H2D_MSGRING_FLOWRING_IDSTART; 1224 status = le16_to_cpu(flowring_delete_resp->compl_hdr.status); 1225 1226 if (status) { 1227 brcmf_err("Flowring deletion failed, code %d\n", status); 1228 brcmf_flowring_delete(msgbuf->flow, flowid); 1229 return; 1230 } 1231 brcmf_dbg(MSGBUF, "Flowring %d Delete response status %d\n", flowid, 1232 status); 1233 1234 brcmf_msgbuf_remove_flowring(msgbuf, flowid); 1235 } 1236 1237 1238 static void brcmf_msgbuf_process_msgtype(struct brcmf_msgbuf *msgbuf, void *buf) 1239 { 1240 struct msgbuf_common_hdr *msg; 1241 1242 msg = (struct msgbuf_common_hdr *)buf; 1243 switch (msg->msgtype) { 1244 case MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT: 1245 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT\n"); 1246 brcmf_msgbuf_process_flow_ring_create_response(msgbuf, buf); 1247 break; 1248 case MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT: 1249 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT\n"); 1250 brcmf_msgbuf_process_flow_ring_delete_response(msgbuf, buf); 1251 break; 1252 case MSGBUF_TYPE_IOCTLPTR_REQ_ACK: 1253 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_IOCTLPTR_REQ_ACK\n"); 1254 break; 1255 case MSGBUF_TYPE_IOCTL_CMPLT: 1256 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_IOCTL_CMPLT\n"); 1257 brcmf_msgbuf_process_ioctl_complete(msgbuf, buf); 1258 break; 1259 case MSGBUF_TYPE_WL_EVENT: 1260 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_WL_EVENT\n"); 1261 brcmf_msgbuf_process_event(msgbuf, buf); 1262 break; 1263 case MSGBUF_TYPE_TX_STATUS: 1264 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_TX_STATUS\n"); 1265 brcmf_msgbuf_process_txstatus(msgbuf, buf); 1266 break; 1267 case MSGBUF_TYPE_RX_CMPLT: 1268 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_RX_CMPLT\n"); 1269 brcmf_msgbuf_process_rx_complete(msgbuf, buf); 1270 break; 1271 default: 1272 brcmf_err("Unsupported msgtype %d\n", msg->msgtype); 1273 break; 1274 } 1275 } 1276 1277 1278 static void brcmf_msgbuf_process_rx(struct brcmf_msgbuf *msgbuf, 1279 struct brcmf_commonring *commonring) 1280 { 1281 void *buf; 1282 u16 count; 1283 u16 processed; 1284 1285 again: 1286 buf = brcmf_commonring_get_read_ptr(commonring, &count); 1287 if (buf == NULL) 1288 return; 1289 1290 processed = 0; 1291 while (count) { 1292 brcmf_msgbuf_process_msgtype(msgbuf, 1293 buf + msgbuf->rx_dataoffset); 1294 buf += brcmf_commonring_len_item(commonring); 1295 processed++; 1296 if (processed == BRCMF_MSGBUF_UPDATE_RX_PTR_THRS) { 1297 brcmf_commonring_read_complete(commonring, processed); 1298 processed = 0; 1299 } 1300 count--; 1301 } 1302 if (processed) 1303 brcmf_commonring_read_complete(commonring, processed); 1304 1305 if (commonring->r_ptr == 0) 1306 goto again; 1307 } 1308 1309 1310 int brcmf_proto_msgbuf_rx_trigger(struct device *dev) 1311 { 1312 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 1313 struct brcmf_pub *drvr = bus_if->drvr; 1314 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 1315 struct brcmf_commonring *commonring; 1316 void *buf; 1317 u32 flowid; 1318 int qlen; 1319 1320 buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_RX_COMPLETE]; 1321 brcmf_msgbuf_process_rx(msgbuf, buf); 1322 buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_TX_COMPLETE]; 1323 brcmf_msgbuf_process_rx(msgbuf, buf); 1324 buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_CONTROL_COMPLETE]; 1325 brcmf_msgbuf_process_rx(msgbuf, buf); 1326 1327 for_each_set_bit(flowid, msgbuf->txstatus_done_map, 1328 msgbuf->max_flowrings) { 1329 clear_bit(flowid, msgbuf->txstatus_done_map); 1330 commonring = msgbuf->flowrings[flowid]; 1331 qlen = brcmf_flowring_qlen(msgbuf->flow, flowid); 1332 if ((qlen > BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) || 1333 ((qlen) && (atomic_read(&commonring->outstanding_tx) < 1334 BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS))) 1335 brcmf_msgbuf_schedule_txdata(msgbuf, flowid, true); 1336 } 1337 1338 return 0; 1339 } 1340 1341 1342 void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid) 1343 { 1344 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 1345 struct msgbuf_tx_flowring_delete_req *delete; 1346 struct brcmf_commonring *commonring; 1347 void *ret_ptr; 1348 u8 ifidx; 1349 int err; 1350 1351 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT]; 1352 brcmf_commonring_lock(commonring); 1353 ret_ptr = brcmf_commonring_reserve_for_write(commonring); 1354 if (!ret_ptr) { 1355 brcmf_err("FW unaware, flowring will be removed !!\n"); 1356 brcmf_commonring_unlock(commonring); 1357 brcmf_msgbuf_remove_flowring(msgbuf, flowid); 1358 return; 1359 } 1360 1361 delete = (struct msgbuf_tx_flowring_delete_req *)ret_ptr; 1362 1363 ifidx = brcmf_flowring_ifidx_get(msgbuf->flow, flowid); 1364 1365 delete->msg.msgtype = MSGBUF_TYPE_FLOW_RING_DELETE; 1366 delete->msg.ifidx = ifidx; 1367 delete->msg.request_id = 0; 1368 1369 delete->flow_ring_id = cpu_to_le16(flowid + 1370 BRCMF_H2D_MSGRING_FLOWRING_IDSTART); 1371 delete->reason = 0; 1372 1373 brcmf_dbg(MSGBUF, "Send Flow Delete Req flow ID %d, ifindex %d\n", 1374 flowid, ifidx); 1375 1376 err = brcmf_commonring_write_complete(commonring); 1377 brcmf_commonring_unlock(commonring); 1378 if (err) { 1379 brcmf_err("Failed to submit RING_DELETE, flowring will be removed\n"); 1380 brcmf_msgbuf_remove_flowring(msgbuf, flowid); 1381 } 1382 } 1383 1384 #ifdef DEBUG 1385 static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data) 1386 { 1387 struct brcmf_bus *bus_if = dev_get_drvdata(seq->private); 1388 struct brcmf_pub *drvr = bus_if->drvr; 1389 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 1390 struct brcmf_commonring *commonring; 1391 u16 i; 1392 struct brcmf_flowring_ring *ring; 1393 struct brcmf_flowring_hash *hash; 1394 1395 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT]; 1396 seq_printf(seq, "h2d_ctl_submit: rp %4u, wp %4u, depth %4u\n", 1397 commonring->r_ptr, commonring->w_ptr, commonring->depth); 1398 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_RXPOST_SUBMIT]; 1399 seq_printf(seq, "h2d_rx_submit: rp %4u, wp %4u, depth %4u\n", 1400 commonring->r_ptr, commonring->w_ptr, commonring->depth); 1401 commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_CONTROL_COMPLETE]; 1402 seq_printf(seq, "d2h_ctl_cmplt: rp %4u, wp %4u, depth %4u\n", 1403 commonring->r_ptr, commonring->w_ptr, commonring->depth); 1404 commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_TX_COMPLETE]; 1405 seq_printf(seq, "d2h_tx_cmplt: rp %4u, wp %4u, depth %4u\n", 1406 commonring->r_ptr, commonring->w_ptr, commonring->depth); 1407 commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_RX_COMPLETE]; 1408 seq_printf(seq, "d2h_rx_cmplt: rp %4u, wp %4u, depth %4u\n", 1409 commonring->r_ptr, commonring->w_ptr, commonring->depth); 1410 1411 seq_printf(seq, "\nh2d_flowrings: depth %u\n", 1412 BRCMF_H2D_TXFLOWRING_MAX_ITEM); 1413 seq_puts(seq, "Active flowrings:\n"); 1414 hash = msgbuf->flow->hash; 1415 for (i = 0; i < msgbuf->flow->nrofrings; i++) { 1416 if (!msgbuf->flow->rings[i]) 1417 continue; 1418 ring = msgbuf->flow->rings[i]; 1419 if (ring->status != RING_OPEN) 1420 continue; 1421 commonring = msgbuf->flowrings[i]; 1422 hash = &msgbuf->flow->hash[ring->hash_id]; 1423 seq_printf(seq, "id %3u: rp %4u, wp %4u, qlen %4u, blocked %u\n" 1424 " ifidx %u, fifo %u, da %pM\n", 1425 i, commonring->r_ptr, commonring->w_ptr, 1426 skb_queue_len(&ring->skblist), ring->blocked, 1427 hash->ifidx, hash->fifo, hash->mac); 1428 } 1429 1430 return 0; 1431 } 1432 #else 1433 static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data) 1434 { 1435 return 0; 1436 } 1437 #endif 1438 1439 static void brcmf_msgbuf_debugfs_create(struct brcmf_pub *drvr) 1440 { 1441 brcmf_debugfs_add_entry(drvr, "msgbuf_stats", brcmf_msgbuf_stats_read); 1442 } 1443 1444 int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr) 1445 { 1446 struct brcmf_bus_msgbuf *if_msgbuf; 1447 struct brcmf_msgbuf *msgbuf; 1448 u64 address; 1449 u32 count; 1450 1451 if_msgbuf = drvr->bus_if->msgbuf; 1452 1453 if (if_msgbuf->max_flowrings >= BRCMF_FLOWRING_HASHSIZE) { 1454 brcmf_err("driver not configured for this many flowrings %d\n", 1455 if_msgbuf->max_flowrings); 1456 if_msgbuf->max_flowrings = BRCMF_FLOWRING_HASHSIZE - 1; 1457 } 1458 1459 msgbuf = kzalloc(sizeof(*msgbuf), GFP_KERNEL); 1460 if (!msgbuf) 1461 goto fail; 1462 1463 msgbuf->txflow_wq = create_singlethread_workqueue("msgbuf_txflow"); 1464 if (msgbuf->txflow_wq == NULL) { 1465 brcmf_err("workqueue creation failed\n"); 1466 goto fail; 1467 } 1468 INIT_WORK(&msgbuf->txflow_work, brcmf_msgbuf_txflow_worker); 1469 count = BITS_TO_LONGS(if_msgbuf->max_flowrings); 1470 count = count * sizeof(unsigned long); 1471 msgbuf->flow_map = kzalloc(count, GFP_KERNEL); 1472 if (!msgbuf->flow_map) 1473 goto fail; 1474 1475 msgbuf->txstatus_done_map = kzalloc(count, GFP_KERNEL); 1476 if (!msgbuf->txstatus_done_map) 1477 goto fail; 1478 1479 msgbuf->drvr = drvr; 1480 msgbuf->ioctbuf = dma_alloc_coherent(drvr->bus_if->dev, 1481 BRCMF_TX_IOCTL_MAX_MSG_SIZE, 1482 &msgbuf->ioctbuf_handle, 1483 GFP_KERNEL); 1484 if (!msgbuf->ioctbuf) 1485 goto fail; 1486 address = (u64)msgbuf->ioctbuf_handle; 1487 msgbuf->ioctbuf_phys_hi = address >> 32; 1488 msgbuf->ioctbuf_phys_lo = address & 0xffffffff; 1489 1490 drvr->proto->hdrpull = brcmf_msgbuf_hdrpull; 1491 drvr->proto->query_dcmd = brcmf_msgbuf_query_dcmd; 1492 drvr->proto->set_dcmd = brcmf_msgbuf_set_dcmd; 1493 drvr->proto->tx_queue_data = brcmf_msgbuf_tx_queue_data; 1494 drvr->proto->configure_addr_mode = brcmf_msgbuf_configure_addr_mode; 1495 drvr->proto->delete_peer = brcmf_msgbuf_delete_peer; 1496 drvr->proto->add_tdls_peer = brcmf_msgbuf_add_tdls_peer; 1497 drvr->proto->rxreorder = brcmf_msgbuf_rxreorder; 1498 drvr->proto->debugfs_create = brcmf_msgbuf_debugfs_create; 1499 drvr->proto->pd = msgbuf; 1500 1501 init_waitqueue_head(&msgbuf->ioctl_resp_wait); 1502 1503 msgbuf->commonrings = 1504 (struct brcmf_commonring **)if_msgbuf->commonrings; 1505 msgbuf->flowrings = (struct brcmf_commonring **)if_msgbuf->flowrings; 1506 msgbuf->max_flowrings = if_msgbuf->max_flowrings; 1507 msgbuf->flowring_dma_handle = 1508 kcalloc(msgbuf->max_flowrings, 1509 sizeof(*msgbuf->flowring_dma_handle), GFP_KERNEL); 1510 if (!msgbuf->flowring_dma_handle) 1511 goto fail; 1512 1513 msgbuf->rx_dataoffset = if_msgbuf->rx_dataoffset; 1514 msgbuf->max_rxbufpost = if_msgbuf->max_rxbufpost; 1515 1516 msgbuf->max_ioctlrespbuf = BRCMF_MSGBUF_MAX_IOCTLRESPBUF_POST; 1517 msgbuf->max_eventbuf = BRCMF_MSGBUF_MAX_EVENTBUF_POST; 1518 1519 msgbuf->tx_pktids = brcmf_msgbuf_init_pktids(NR_TX_PKTIDS, 1520 DMA_TO_DEVICE); 1521 if (!msgbuf->tx_pktids) 1522 goto fail; 1523 msgbuf->rx_pktids = brcmf_msgbuf_init_pktids(NR_RX_PKTIDS, 1524 DMA_FROM_DEVICE); 1525 if (!msgbuf->rx_pktids) 1526 goto fail; 1527 1528 msgbuf->flow = brcmf_flowring_attach(drvr->bus_if->dev, 1529 if_msgbuf->max_flowrings); 1530 if (!msgbuf->flow) 1531 goto fail; 1532 1533 1534 brcmf_dbg(MSGBUF, "Feeding buffers, rx data %d, rx event %d, rx ioctl resp %d\n", 1535 msgbuf->max_rxbufpost, msgbuf->max_eventbuf, 1536 msgbuf->max_ioctlrespbuf); 1537 count = 0; 1538 do { 1539 brcmf_msgbuf_rxbuf_data_fill(msgbuf); 1540 if (msgbuf->max_rxbufpost != msgbuf->rxbufpost) 1541 msleep(10); 1542 else 1543 break; 1544 count++; 1545 } while (count < 10); 1546 brcmf_msgbuf_rxbuf_event_post(msgbuf); 1547 brcmf_msgbuf_rxbuf_ioctlresp_post(msgbuf); 1548 1549 INIT_WORK(&msgbuf->flowring_work, brcmf_msgbuf_flowring_worker); 1550 spin_lock_init(&msgbuf->flowring_work_lock); 1551 INIT_LIST_HEAD(&msgbuf->work_queue); 1552 1553 return 0; 1554 1555 fail: 1556 if (msgbuf) { 1557 kfree(msgbuf->flow_map); 1558 kfree(msgbuf->txstatus_done_map); 1559 brcmf_msgbuf_release_pktids(msgbuf); 1560 kfree(msgbuf->flowring_dma_handle); 1561 if (msgbuf->ioctbuf) 1562 dma_free_coherent(drvr->bus_if->dev, 1563 BRCMF_TX_IOCTL_MAX_MSG_SIZE, 1564 msgbuf->ioctbuf, 1565 msgbuf->ioctbuf_handle); 1566 kfree(msgbuf); 1567 } 1568 return -ENOMEM; 1569 } 1570 1571 1572 void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr) 1573 { 1574 struct brcmf_msgbuf *msgbuf; 1575 struct brcmf_msgbuf_work_item *work; 1576 1577 brcmf_dbg(TRACE, "Enter\n"); 1578 if (drvr->proto->pd) { 1579 msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 1580 cancel_work_sync(&msgbuf->flowring_work); 1581 while (!list_empty(&msgbuf->work_queue)) { 1582 work = list_first_entry(&msgbuf->work_queue, 1583 struct brcmf_msgbuf_work_item, 1584 queue); 1585 list_del(&work->queue); 1586 kfree(work); 1587 } 1588 kfree(msgbuf->flow_map); 1589 kfree(msgbuf->txstatus_done_map); 1590 if (msgbuf->txflow_wq) 1591 destroy_workqueue(msgbuf->txflow_wq); 1592 1593 brcmf_flowring_detach(msgbuf->flow); 1594 dma_free_coherent(drvr->bus_if->dev, 1595 BRCMF_TX_IOCTL_MAX_MSG_SIZE, 1596 msgbuf->ioctbuf, msgbuf->ioctbuf_handle); 1597 brcmf_msgbuf_release_pktids(msgbuf); 1598 kfree(msgbuf->flowring_dma_handle); 1599 kfree(msgbuf); 1600 drvr->proto->pd = NULL; 1601 } 1602 } 1603