1 /* 2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #ifndef __MT76_H 18 #define __MT76_H 19 20 #include <linux/kernel.h> 21 #include <linux/io.h> 22 #include <linux/spinlock.h> 23 #include <linux/skbuff.h> 24 #include <linux/leds.h> 25 #include <linux/usb.h> 26 #include <linux/average.h> 27 #include <net/mac80211.h> 28 #include "util.h" 29 30 #define MT_TX_RING_SIZE 256 31 #define MT_MCU_RING_SIZE 32 32 #define MT_RX_BUF_SIZE 2048 33 34 struct mt76_dev; 35 struct mt76_wcid; 36 37 struct mt76_reg_pair { 38 u32 reg; 39 u32 value; 40 }; 41 42 enum mt76_bus_type { 43 MT76_BUS_MMIO, 44 MT76_BUS_USB, 45 }; 46 47 struct mt76_bus_ops { 48 u32 (*rr)(struct mt76_dev *dev, u32 offset); 49 void (*wr)(struct mt76_dev *dev, u32 offset, u32 val); 50 u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val); 51 void (*copy)(struct mt76_dev *dev, u32 offset, const void *data, 52 int len); 53 int (*wr_rp)(struct mt76_dev *dev, u32 base, 54 const struct mt76_reg_pair *rp, int len); 55 int (*rd_rp)(struct mt76_dev *dev, u32 base, 56 struct mt76_reg_pair *rp, int len); 57 enum mt76_bus_type type; 58 }; 59 60 #define mt76_is_usb(dev) ((dev)->mt76.bus->type == MT76_BUS_USB) 61 #define mt76_is_mmio(dev) ((dev)->mt76.bus->type == MT76_BUS_MMIO) 62 63 enum mt76_txq_id { 64 MT_TXQ_VO = IEEE80211_AC_VO, 65 MT_TXQ_VI = IEEE80211_AC_VI, 66 MT_TXQ_BE = IEEE80211_AC_BE, 67 MT_TXQ_BK = IEEE80211_AC_BK, 68 MT_TXQ_PSD, 69 MT_TXQ_MCU, 70 MT_TXQ_BEACON, 71 MT_TXQ_CAB, 72 __MT_TXQ_MAX 73 }; 74 75 enum mt76_rxq_id { 76 MT_RXQ_MAIN, 77 MT_RXQ_MCU, 78 __MT_RXQ_MAX 79 }; 80 81 struct mt76_queue_buf { 82 dma_addr_t addr; 83 int len; 84 }; 85 86 struct mt76u_buf { 87 struct mt76_dev *dev; 88 struct urb *urb; 89 size_t len; 90 void *buf; 91 bool done; 92 }; 93 94 struct mt76_queue_entry { 95 union { 96 void *buf; 97 struct sk_buff *skb; 98 }; 99 union { 100 struct mt76_txwi_cache *txwi; 101 struct mt76u_buf ubuf; 102 }; 103 bool schedule; 104 }; 105 106 struct mt76_queue_regs { 107 u32 desc_base; 108 u32 ring_size; 109 u32 cpu_idx; 110 u32 dma_idx; 111 } __packed __aligned(4); 112 113 struct mt76_queue { 114 struct mt76_queue_regs __iomem *regs; 115 116 spinlock_t lock; 117 struct mt76_queue_entry *entry; 118 struct mt76_desc *desc; 119 120 struct list_head swq; 121 int swq_queued; 122 123 u16 first; 124 u16 head; 125 u16 tail; 126 int ndesc; 127 int queued; 128 int buf_size; 129 130 u8 buf_offset; 131 u8 hw_idx; 132 133 dma_addr_t desc_dma; 134 struct sk_buff *rx_head; 135 struct page_frag_cache rx_page; 136 spinlock_t rx_page_lock; 137 }; 138 139 struct mt76_mcu_ops { 140 int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data, 141 int len, bool wait_resp); 142 int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base, 143 const struct mt76_reg_pair *rp, int len); 144 int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base, 145 struct mt76_reg_pair *rp, int len); 146 }; 147 148 struct mt76_queue_ops { 149 int (*init)(struct mt76_dev *dev); 150 151 int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q); 152 153 int (*add_buf)(struct mt76_dev *dev, struct mt76_queue *q, 154 struct mt76_queue_buf *buf, int nbufs, u32 info, 155 struct sk_buff *skb, void *txwi); 156 157 int (*tx_queue_skb)(struct mt76_dev *dev, struct mt76_queue *q, 158 struct sk_buff *skb, struct mt76_wcid *wcid, 159 struct ieee80211_sta *sta); 160 161 int (*tx_queue_skb_raw)(struct mt76_dev *dev, enum mt76_txq_id qid, 162 struct sk_buff *skb, u32 tx_info); 163 164 void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush, 165 int *len, u32 *info, bool *more); 166 167 void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid); 168 169 void (*tx_cleanup)(struct mt76_dev *dev, enum mt76_txq_id qid, 170 bool flush); 171 172 void (*kick)(struct mt76_dev *dev, struct mt76_queue *q); 173 }; 174 175 enum mt76_wcid_flags { 176 MT_WCID_FLAG_CHECK_PS, 177 MT_WCID_FLAG_PS, 178 }; 179 180 #define MT76_N_WCIDS 128 181 182 DECLARE_EWMA(signal, 10, 8); 183 184 struct mt76_wcid { 185 struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS]; 186 187 struct work_struct aggr_work; 188 189 unsigned long flags; 190 191 struct ewma_signal rssi; 192 int inactive_count; 193 194 u8 idx; 195 u8 hw_key_idx; 196 197 u8 sta:1; 198 199 u8 rx_check_pn; 200 u8 rx_key_pn[IEEE80211_NUM_TIDS][6]; 201 202 __le16 tx_rate; 203 bool tx_rate_set; 204 u8 tx_rate_nss; 205 s8 max_txpwr_adj; 206 bool sw_iv; 207 208 u8 packet_id; 209 }; 210 211 struct mt76_txq { 212 struct list_head list; 213 struct mt76_queue *hwq; 214 struct mt76_wcid *wcid; 215 216 struct sk_buff_head retry_q; 217 218 u16 agg_ssn; 219 bool send_bar; 220 bool aggr; 221 }; 222 223 struct mt76_txwi_cache { 224 u32 txwi[8]; 225 dma_addr_t dma_addr; 226 struct list_head list; 227 }; 228 229 230 struct mt76_rx_tid { 231 struct rcu_head rcu_head; 232 233 struct mt76_dev *dev; 234 235 spinlock_t lock; 236 struct delayed_work reorder_work; 237 238 u16 head; 239 u8 size; 240 u8 nframes; 241 242 u8 started:1, stopped:1, timer_pending:1; 243 244 struct sk_buff *reorder_buf[]; 245 }; 246 247 #define MT_TX_CB_DMA_DONE BIT(0) 248 #define MT_TX_CB_TXS_DONE BIT(1) 249 #define MT_TX_CB_TXS_FAILED BIT(2) 250 251 #define MT_PACKET_ID_MASK GENMASK(7, 0) 252 #define MT_PACKET_ID_NO_ACK 0 253 #define MT_PACKET_ID_NO_SKB 1 254 #define MT_PACKET_ID_FIRST 2 255 256 #define MT_TX_STATUS_SKB_TIMEOUT HZ 257 258 struct mt76_tx_cb { 259 unsigned long jiffies; 260 u8 wcid; 261 u8 pktid; 262 u8 flags; 263 }; 264 265 enum { 266 MT76_STATE_INITIALIZED, 267 MT76_STATE_RUNNING, 268 MT76_STATE_MCU_RUNNING, 269 MT76_SCANNING, 270 MT76_RESET, 271 MT76_OFFCHANNEL, 272 MT76_REMOVED, 273 MT76_READING_STATS, 274 }; 275 276 struct mt76_hw_cap { 277 bool has_2ghz; 278 bool has_5ghz; 279 }; 280 281 struct mt76_driver_ops { 282 u16 txwi_size; 283 284 void (*update_survey)(struct mt76_dev *dev); 285 286 int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr, 287 struct sk_buff *skb, struct mt76_queue *q, 288 struct mt76_wcid *wcid, 289 struct ieee80211_sta *sta, u32 *tx_info); 290 291 void (*tx_complete_skb)(struct mt76_dev *dev, struct mt76_queue *q, 292 struct mt76_queue_entry *e, bool flush); 293 294 bool (*tx_status_data)(struct mt76_dev *dev, u8 *update); 295 296 void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q, 297 struct sk_buff *skb); 298 299 void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q); 300 301 void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta, 302 bool ps); 303 304 int (*sta_add)(struct mt76_dev *dev, struct ieee80211_vif *vif, 305 struct ieee80211_sta *sta); 306 307 void (*sta_assoc)(struct mt76_dev *dev, struct ieee80211_vif *vif, 308 struct ieee80211_sta *sta); 309 310 void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif, 311 struct ieee80211_sta *sta); 312 }; 313 314 struct mt76_channel_state { 315 u64 cc_active; 316 u64 cc_busy; 317 }; 318 319 struct mt76_sband { 320 struct ieee80211_supported_band sband; 321 struct mt76_channel_state *chan; 322 }; 323 324 struct mt76_rate_power { 325 union { 326 struct { 327 s8 cck[4]; 328 s8 ofdm[8]; 329 s8 stbc[10]; 330 s8 ht[16]; 331 s8 vht[10]; 332 }; 333 s8 all[48]; 334 }; 335 }; 336 337 /* addr req mask */ 338 #define MT_VEND_TYPE_EEPROM BIT(31) 339 #define MT_VEND_TYPE_CFG BIT(30) 340 #define MT_VEND_TYPE_MASK (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG) 341 342 #define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n)) 343 enum mt_vendor_req { 344 MT_VEND_DEV_MODE = 0x1, 345 MT_VEND_WRITE = 0x2, 346 MT_VEND_MULTI_WRITE = 0x6, 347 MT_VEND_MULTI_READ = 0x7, 348 MT_VEND_READ_EEPROM = 0x9, 349 MT_VEND_WRITE_FCE = 0x42, 350 MT_VEND_WRITE_CFG = 0x46, 351 MT_VEND_READ_CFG = 0x47, 352 }; 353 354 enum mt76u_in_ep { 355 MT_EP_IN_PKT_RX, 356 MT_EP_IN_CMD_RESP, 357 __MT_EP_IN_MAX, 358 }; 359 360 enum mt76u_out_ep { 361 MT_EP_OUT_INBAND_CMD, 362 MT_EP_OUT_AC_BK, 363 MT_EP_OUT_AC_BE, 364 MT_EP_OUT_AC_VI, 365 MT_EP_OUT_AC_VO, 366 MT_EP_OUT_HCCA, 367 __MT_EP_OUT_MAX, 368 }; 369 370 #define MT_SG_MAX_SIZE 8 371 #define MT_NUM_TX_ENTRIES 256 372 #define MT_NUM_RX_ENTRIES 128 373 #define MCU_RESP_URB_SIZE 1024 374 struct mt76_usb { 375 struct mutex usb_ctrl_mtx; 376 u8 data[32]; 377 378 struct tasklet_struct rx_tasklet; 379 struct tasklet_struct tx_tasklet; 380 struct delayed_work stat_work; 381 382 u8 out_ep[__MT_EP_OUT_MAX]; 383 u16 out_max_packet; 384 u8 in_ep[__MT_EP_IN_MAX]; 385 u16 in_max_packet; 386 bool sg_en; 387 388 struct mt76u_mcu { 389 struct mutex mutex; 390 u8 *data; 391 u32 msg_seq; 392 393 /* multiple reads */ 394 struct mt76_reg_pair *rp; 395 int rp_len; 396 u32 base; 397 bool burst; 398 } mcu; 399 }; 400 401 struct mt76_mmio { 402 struct mt76e_mcu { 403 struct mutex mutex; 404 405 wait_queue_head_t wait; 406 struct sk_buff_head res_q; 407 408 u32 msg_seq; 409 } mcu; 410 void __iomem *regs; 411 spinlock_t irq_lock; 412 u32 irqmask; 413 }; 414 415 struct mt76_dev { 416 struct ieee80211_hw *hw; 417 struct cfg80211_chan_def chandef; 418 struct ieee80211_channel *main_chan; 419 420 spinlock_t lock; 421 spinlock_t cc_lock; 422 423 struct mutex mutex; 424 425 const struct mt76_bus_ops *bus; 426 const struct mt76_driver_ops *drv; 427 const struct mt76_mcu_ops *mcu_ops; 428 struct device *dev; 429 430 struct net_device napi_dev; 431 spinlock_t rx_lock; 432 struct napi_struct napi[__MT_RXQ_MAX]; 433 struct sk_buff_head rx_skb[__MT_RXQ_MAX]; 434 435 struct list_head txwi_cache; 436 struct mt76_queue q_tx[__MT_TXQ_MAX]; 437 struct mt76_queue q_rx[__MT_RXQ_MAX]; 438 const struct mt76_queue_ops *queue_ops; 439 int tx_dma_idx[4]; 440 441 wait_queue_head_t tx_wait; 442 struct sk_buff_head status_list; 443 444 unsigned long wcid_mask[MT76_N_WCIDS / BITS_PER_LONG]; 445 446 struct mt76_wcid global_wcid; 447 struct mt76_wcid __rcu *wcid[MT76_N_WCIDS]; 448 449 u8 macaddr[ETH_ALEN]; 450 u32 rev; 451 unsigned long state; 452 453 u8 antenna_mask; 454 u16 chainmask; 455 456 struct mt76_sband sband_2g; 457 struct mt76_sband sband_5g; 458 struct debugfs_blob_wrapper eeprom; 459 struct debugfs_blob_wrapper otp; 460 struct mt76_hw_cap cap; 461 462 struct mt76_rate_power rate_power; 463 int txpower_conf; 464 int txpower_cur; 465 466 u32 debugfs_reg; 467 468 struct led_classdev led_cdev; 469 char led_name[32]; 470 bool led_al; 471 u8 led_pin; 472 473 u8 csa_complete; 474 475 u32 rxfilter; 476 477 union { 478 struct mt76_mmio mmio; 479 struct mt76_usb usb; 480 }; 481 }; 482 483 enum mt76_phy_type { 484 MT_PHY_TYPE_CCK, 485 MT_PHY_TYPE_OFDM, 486 MT_PHY_TYPE_HT, 487 MT_PHY_TYPE_HT_GF, 488 MT_PHY_TYPE_VHT, 489 }; 490 491 struct mt76_rx_status { 492 struct mt76_wcid *wcid; 493 494 unsigned long reorder_time; 495 496 u8 iv[6]; 497 498 u8 aggr:1; 499 u8 tid; 500 u16 seqno; 501 502 u16 freq; 503 u32 flag; 504 u8 enc_flags; 505 u8 encoding:2, bw:3; 506 u8 rate_idx; 507 u8 nss; 508 u8 band; 509 s8 signal; 510 u8 chains; 511 s8 chain_signal[IEEE80211_MAX_CHAINS]; 512 }; 513 514 #define __mt76_rr(dev, ...) (dev)->bus->rr((dev), __VA_ARGS__) 515 #define __mt76_wr(dev, ...) (dev)->bus->wr((dev), __VA_ARGS__) 516 #define __mt76_rmw(dev, ...) (dev)->bus->rmw((dev), __VA_ARGS__) 517 #define __mt76_wr_copy(dev, ...) (dev)->bus->copy((dev), __VA_ARGS__) 518 519 #define __mt76_set(dev, offset, val) __mt76_rmw(dev, offset, 0, val) 520 #define __mt76_clear(dev, offset, val) __mt76_rmw(dev, offset, val, 0) 521 522 #define mt76_rr(dev, ...) (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__) 523 #define mt76_wr(dev, ...) (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__) 524 #define mt76_rmw(dev, ...) (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__) 525 #define mt76_wr_copy(dev, ...) (dev)->mt76.bus->copy(&((dev)->mt76), __VA_ARGS__) 526 #define mt76_wr_rp(dev, ...) (dev)->mt76.bus->wr_rp(&((dev)->mt76), __VA_ARGS__) 527 #define mt76_rd_rp(dev, ...) (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__) 528 529 #define mt76_mcu_send_msg(dev, ...) (dev)->mt76.mcu_ops->mcu_send_msg(&((dev)->mt76), __VA_ARGS__) 530 531 #define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val) 532 #define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0) 533 534 #define mt76_get_field(_dev, _reg, _field) \ 535 FIELD_GET(_field, mt76_rr(dev, _reg)) 536 537 #define mt76_rmw_field(_dev, _reg, _field, _val) \ 538 mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val)) 539 540 #define __mt76_rmw_field(_dev, _reg, _field, _val) \ 541 __mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val)) 542 543 #define mt76_hw(dev) (dev)->mt76.hw 544 545 bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, 546 int timeout); 547 548 #define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__) 549 550 bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, 551 int timeout); 552 553 #define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__) 554 555 void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs); 556 557 static inline u16 mt76_chip(struct mt76_dev *dev) 558 { 559 return dev->rev >> 16; 560 } 561 562 static inline u16 mt76_rev(struct mt76_dev *dev) 563 { 564 return dev->rev & 0xffff; 565 } 566 567 #define mt76xx_chip(dev) mt76_chip(&((dev)->mt76)) 568 #define mt76xx_rev(dev) mt76_rev(&((dev)->mt76)) 569 570 #define mt76_init_queues(dev) (dev)->mt76.queue_ops->init(&((dev)->mt76)) 571 #define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__) 572 #define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__) 573 #define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__) 574 #define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__) 575 #define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__) 576 577 static inline struct mt76_channel_state * 578 mt76_channel_state(struct mt76_dev *dev, struct ieee80211_channel *c) 579 { 580 struct mt76_sband *msband; 581 int idx; 582 583 if (c->band == NL80211_BAND_2GHZ) 584 msband = &dev->sband_2g; 585 else 586 msband = &dev->sband_5g; 587 588 idx = c - &msband->sband.channels[0]; 589 return &msband->chan[idx]; 590 } 591 592 struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size, 593 const struct ieee80211_ops *ops, 594 const struct mt76_driver_ops *drv_ops); 595 int mt76_register_device(struct mt76_dev *dev, bool vht, 596 struct ieee80211_rate *rates, int n_rates); 597 void mt76_unregister_device(struct mt76_dev *dev); 598 599 struct dentry *mt76_register_debugfs(struct mt76_dev *dev); 600 void mt76_seq_puts_array(struct seq_file *file, const char *str, 601 s8 *val, int len); 602 603 int mt76_eeprom_init(struct mt76_dev *dev, int len); 604 void mt76_eeprom_override(struct mt76_dev *dev); 605 606 /* increment with wrap-around */ 607 static inline int mt76_incr(int val, int size) 608 { 609 return (val + 1) & (size - 1); 610 } 611 612 /* decrement with wrap-around */ 613 static inline int mt76_decr(int val, int size) 614 { 615 return (val - 1) & (size - 1); 616 } 617 618 u8 mt76_ac_to_hwq(u8 ac); 619 620 static inline struct ieee80211_txq * 621 mtxq_to_txq(struct mt76_txq *mtxq) 622 { 623 void *ptr = mtxq; 624 625 return container_of(ptr, struct ieee80211_txq, drv_priv); 626 } 627 628 static inline struct ieee80211_sta * 629 wcid_to_sta(struct mt76_wcid *wcid) 630 { 631 void *ptr = wcid; 632 633 if (!wcid || !wcid->sta) 634 return NULL; 635 636 return container_of(ptr, struct ieee80211_sta, drv_priv); 637 } 638 639 static inline struct mt76_tx_cb *mt76_tx_skb_cb(struct sk_buff *skb) 640 { 641 BUILD_BUG_ON(sizeof(struct mt76_tx_cb) > 642 sizeof(IEEE80211_SKB_CB(skb)->status.status_driver_data)); 643 return ((void *) IEEE80211_SKB_CB(skb)->status.status_driver_data); 644 } 645 646 int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, 647 struct sk_buff *skb, struct mt76_wcid *wcid, 648 struct ieee80211_sta *sta); 649 650 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb); 651 void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta, 652 struct mt76_wcid *wcid, struct sk_buff *skb); 653 void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq); 654 void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq); 655 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq); 656 void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta, 657 bool send_bar); 658 void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq); 659 void mt76_txq_schedule_all(struct mt76_dev *dev); 660 void mt76_release_buffered_frames(struct ieee80211_hw *hw, 661 struct ieee80211_sta *sta, 662 u16 tids, int nframes, 663 enum ieee80211_frame_release_type reason, 664 bool more_data); 665 void mt76_set_channel(struct mt76_dev *dev); 666 int mt76_get_survey(struct ieee80211_hw *hw, int idx, 667 struct survey_info *survey); 668 void mt76_set_stream_caps(struct mt76_dev *dev, bool vht); 669 670 int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid, 671 u16 ssn, u8 size); 672 void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid); 673 674 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid, 675 struct ieee80211_key_conf *key); 676 677 void mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list) 678 __acquires(&dev->status_list.lock); 679 void mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list) 680 __releases(&dev->status_list.lock); 681 682 int mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid, 683 struct sk_buff *skb); 684 struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev, 685 struct mt76_wcid *wcid, int pktid, 686 struct sk_buff_head *list); 687 void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, 688 struct sk_buff_head *list); 689 void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb); 690 void mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, 691 bool flush); 692 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 693 struct ieee80211_sta *sta, 694 enum ieee80211_sta_state old_state, 695 enum ieee80211_sta_state new_state); 696 697 struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb); 698 699 int mt76_get_min_avg_rssi(struct mt76_dev *dev); 700 701 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 702 int *dbm); 703 704 void mt76_csa_check(struct mt76_dev *dev); 705 void mt76_csa_finish(struct mt76_dev *dev); 706 707 /* internal */ 708 void mt76_tx_free(struct mt76_dev *dev); 709 struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev); 710 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t); 711 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, 712 struct napi_struct *napi); 713 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q, 714 struct napi_struct *napi); 715 void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames); 716 717 /* usb */ 718 static inline bool mt76u_urb_error(struct urb *urb) 719 { 720 return urb->status && 721 urb->status != -ECONNRESET && 722 urb->status != -ESHUTDOWN && 723 urb->status != -ENOENT; 724 } 725 726 /* Map hardware queues to usb endpoints */ 727 static inline u8 q2ep(u8 qid) 728 { 729 /* TODO: take management packets to queue 5 */ 730 return qid + 1; 731 } 732 733 static inline int 734 mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len, 735 int timeout) 736 { 737 struct usb_interface *intf = to_usb_interface(dev->dev); 738 struct usb_device *udev = interface_to_usbdev(intf); 739 struct mt76_usb *usb = &dev->usb; 740 unsigned int pipe; 741 742 if (actual_len) 743 pipe = usb_rcvbulkpipe(udev, usb->in_ep[MT_EP_IN_CMD_RESP]); 744 else 745 pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]); 746 747 return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout); 748 } 749 750 int mt76u_vendor_request(struct mt76_dev *dev, u8 req, 751 u8 req_type, u16 val, u16 offset, 752 void *buf, size_t len); 753 void mt76u_single_wr(struct mt76_dev *dev, const u8 req, 754 const u16 offset, const u32 val); 755 int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf); 756 int mt76u_submit_rx_buffers(struct mt76_dev *dev); 757 int mt76u_alloc_queues(struct mt76_dev *dev); 758 void mt76u_stop_queues(struct mt76_dev *dev); 759 void mt76u_stop_stat_wk(struct mt76_dev *dev); 760 void mt76u_queues_deinit(struct mt76_dev *dev); 761 762 struct sk_buff * 763 mt76_mcu_msg_alloc(const void *data, int head_len, 764 int data_len, int tail_len); 765 void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb); 766 struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev, 767 unsigned long expires); 768 769 #endif 770