1 /* SPDX-License-Identifier: ISC */ 2 /* 3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 4 */ 5 6 #ifndef __MT76_H 7 #define __MT76_H 8 9 #include <linux/kernel.h> 10 #include <linux/io.h> 11 #include <linux/spinlock.h> 12 #include <linux/skbuff.h> 13 #include <linux/leds.h> 14 #include <linux/usb.h> 15 #include <linux/average.h> 16 #include <net/mac80211.h> 17 #include "util.h" 18 19 #define MT_TX_RING_SIZE 256 20 #define MT_MCU_RING_SIZE 32 21 #define MT_RX_BUF_SIZE 2048 22 #define MT_SKB_HEAD_LEN 128 23 24 struct mt76_dev; 25 struct mt76_phy; 26 struct mt76_wcid; 27 28 struct mt76_reg_pair { 29 u32 reg; 30 u32 value; 31 }; 32 33 enum mt76_bus_type { 34 MT76_BUS_MMIO, 35 MT76_BUS_USB, 36 }; 37 38 struct mt76_bus_ops { 39 u32 (*rr)(struct mt76_dev *dev, u32 offset); 40 void (*wr)(struct mt76_dev *dev, u32 offset, u32 val); 41 u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val); 42 void (*write_copy)(struct mt76_dev *dev, u32 offset, const void *data, 43 int len); 44 void (*read_copy)(struct mt76_dev *dev, u32 offset, void *data, 45 int len); 46 int (*wr_rp)(struct mt76_dev *dev, u32 base, 47 const struct mt76_reg_pair *rp, int len); 48 int (*rd_rp)(struct mt76_dev *dev, u32 base, 49 struct mt76_reg_pair *rp, int len); 50 enum mt76_bus_type type; 51 }; 52 53 #define mt76_is_usb(dev) ((dev)->bus->type == MT76_BUS_USB) 54 #define mt76_is_mmio(dev) ((dev)->bus->type == MT76_BUS_MMIO) 55 56 enum mt76_txq_id { 57 MT_TXQ_VO = IEEE80211_AC_VO, 58 MT_TXQ_VI = IEEE80211_AC_VI, 59 MT_TXQ_BE = IEEE80211_AC_BE, 60 MT_TXQ_BK = IEEE80211_AC_BK, 61 MT_TXQ_PSD, 62 MT_TXQ_MCU, 63 MT_TXQ_BEACON, 64 MT_TXQ_CAB, 65 MT_TXQ_FWDL, 66 __MT_TXQ_MAX 67 }; 68 69 enum mt76_rxq_id { 70 MT_RXQ_MAIN, 71 MT_RXQ_MCU, 72 __MT_RXQ_MAX 73 }; 74 75 struct mt76_queue_buf { 76 dma_addr_t addr; 77 int len; 78 }; 79 80 struct mt76_tx_info { 81 struct mt76_queue_buf buf[32]; 82 struct sk_buff *skb; 83 int nbuf; 84 u32 info; 85 }; 86 87 struct mt76_queue_entry { 88 union { 89 void *buf; 90 struct sk_buff *skb; 91 }; 92 union { 93 struct mt76_txwi_cache *txwi; 94 struct urb *urb; 95 }; 96 enum mt76_txq_id qid; 97 bool skip_buf0:1; 98 bool schedule:1; 99 bool done:1; 100 }; 101 102 struct mt76_queue_regs { 103 u32 desc_base; 104 u32 ring_size; 105 u32 cpu_idx; 106 u32 dma_idx; 107 } __packed __aligned(4); 108 109 struct mt76_queue { 110 struct mt76_queue_regs __iomem *regs; 111 112 spinlock_t lock; 113 struct mt76_queue_entry *entry; 114 struct mt76_desc *desc; 115 116 u16 first; 117 u16 head; 118 u16 tail; 119 int ndesc; 120 int queued; 121 int buf_size; 122 bool stopped; 123 124 u8 buf_offset; 125 u8 hw_idx; 126 127 dma_addr_t desc_dma; 128 struct sk_buff *rx_head; 129 struct page_frag_cache rx_page; 130 }; 131 132 struct mt76_sw_queue { 133 struct mt76_queue *q; 134 135 struct list_head swq; 136 int swq_queued; 137 }; 138 139 struct mt76_mcu_ops { 140 int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data, 141 int len, bool wait_resp); 142 int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base, 143 const struct mt76_reg_pair *rp, int len); 144 int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base, 145 struct mt76_reg_pair *rp, int len); 146 int (*mcu_restart)(struct mt76_dev *dev); 147 }; 148 149 struct mt76_queue_ops { 150 int (*init)(struct mt76_dev *dev); 151 152 int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q, 153 int idx, int n_desc, int bufsize, 154 u32 ring_base); 155 156 int (*tx_queue_skb)(struct mt76_dev *dev, enum mt76_txq_id qid, 157 struct sk_buff *skb, struct mt76_wcid *wcid, 158 struct ieee80211_sta *sta); 159 160 int (*tx_queue_skb_raw)(struct mt76_dev *dev, enum mt76_txq_id qid, 161 struct sk_buff *skb, u32 tx_info); 162 163 void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush, 164 int *len, u32 *info, bool *more); 165 166 void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid); 167 168 void (*tx_cleanup)(struct mt76_dev *dev, enum mt76_txq_id qid, 169 bool flush); 170 171 void (*kick)(struct mt76_dev *dev, struct mt76_queue *q); 172 }; 173 174 enum mt76_wcid_flags { 175 MT_WCID_FLAG_CHECK_PS, 176 MT_WCID_FLAG_PS, 177 }; 178 179 #define MT76_N_WCIDS 128 180 181 /* stored in ieee80211_tx_info::hw_queue */ 182 #define MT_TX_HW_QUEUE_EXT_PHY BIT(3) 183 184 DECLARE_EWMA(signal, 10, 8); 185 186 #define MT_WCID_TX_INFO_RATE GENMASK(15, 0) 187 #define MT_WCID_TX_INFO_NSS GENMASK(17, 16) 188 #define MT_WCID_TX_INFO_TXPWR_ADJ GENMASK(25, 18) 189 #define MT_WCID_TX_INFO_SET BIT(31) 190 191 struct mt76_wcid { 192 struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS]; 193 194 unsigned long flags; 195 196 struct ewma_signal rssi; 197 int inactive_count; 198 199 u8 idx; 200 u8 hw_key_idx; 201 202 u8 sta:1; 203 u8 ext_phy:1; 204 205 u8 rx_check_pn; 206 u8 rx_key_pn[IEEE80211_NUM_TIDS][6]; 207 u16 cipher; 208 209 u32 tx_info; 210 bool sw_iv; 211 212 u8 packet_id; 213 }; 214 215 struct mt76_txq { 216 struct mt76_sw_queue *swq; 217 struct mt76_wcid *wcid; 218 219 struct sk_buff_head retry_q; 220 221 u16 agg_ssn; 222 bool send_bar; 223 bool aggr; 224 }; 225 226 struct mt76_txwi_cache { 227 struct list_head list; 228 dma_addr_t dma_addr; 229 230 struct sk_buff *skb; 231 }; 232 233 struct mt76_rx_tid { 234 struct rcu_head rcu_head; 235 236 struct mt76_dev *dev; 237 238 spinlock_t lock; 239 struct delayed_work reorder_work; 240 241 u16 head; 242 u8 size; 243 u8 nframes; 244 245 u8 num; 246 247 u8 started:1, stopped:1, timer_pending:1; 248 249 struct sk_buff *reorder_buf[]; 250 }; 251 252 #define MT_TX_CB_DMA_DONE BIT(0) 253 #define MT_TX_CB_TXS_DONE BIT(1) 254 #define MT_TX_CB_TXS_FAILED BIT(2) 255 256 #define MT_PACKET_ID_MASK GENMASK(6, 0) 257 #define MT_PACKET_ID_NO_ACK 0 258 #define MT_PACKET_ID_NO_SKB 1 259 #define MT_PACKET_ID_FIRST 2 260 #define MT_PACKET_ID_HAS_RATE BIT(7) 261 262 #define MT_TX_STATUS_SKB_TIMEOUT HZ 263 264 struct mt76_tx_cb { 265 unsigned long jiffies; 266 u8 wcid; 267 u8 pktid; 268 u8 flags; 269 }; 270 271 enum { 272 MT76_STATE_INITIALIZED, 273 MT76_STATE_RUNNING, 274 MT76_STATE_MCU_RUNNING, 275 MT76_SCANNING, 276 MT76_RESET, 277 MT76_REMOVED, 278 MT76_READING_STATS, 279 }; 280 281 struct mt76_hw_cap { 282 bool has_2ghz; 283 bool has_5ghz; 284 }; 285 286 #define MT_DRV_TXWI_NO_FREE BIT(0) 287 #define MT_DRV_TX_ALIGNED4_SKBS BIT(1) 288 #define MT_DRV_SW_RX_AIRTIME BIT(2) 289 290 struct mt76_driver_ops { 291 u32 drv_flags; 292 u32 survey_flags; 293 u16 txwi_size; 294 295 void (*update_survey)(struct mt76_dev *dev); 296 297 int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr, 298 enum mt76_txq_id qid, struct mt76_wcid *wcid, 299 struct ieee80211_sta *sta, 300 struct mt76_tx_info *tx_info); 301 302 void (*tx_complete_skb)(struct mt76_dev *dev, enum mt76_txq_id qid, 303 struct mt76_queue_entry *e); 304 305 bool (*tx_status_data)(struct mt76_dev *dev, u8 *update); 306 307 void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q, 308 struct sk_buff *skb); 309 310 void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q); 311 312 void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta, 313 bool ps); 314 315 int (*sta_add)(struct mt76_dev *dev, struct ieee80211_vif *vif, 316 struct ieee80211_sta *sta); 317 318 void (*sta_assoc)(struct mt76_dev *dev, struct ieee80211_vif *vif, 319 struct ieee80211_sta *sta); 320 321 void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif, 322 struct ieee80211_sta *sta); 323 }; 324 325 struct mt76_channel_state { 326 u64 cc_active; 327 u64 cc_busy; 328 u64 cc_rx; 329 u64 cc_bss_rx; 330 u64 cc_tx; 331 332 s8 noise; 333 }; 334 335 struct mt76_sband { 336 struct ieee80211_supported_band sband; 337 struct mt76_channel_state *chan; 338 }; 339 340 struct mt76_rate_power { 341 union { 342 struct { 343 s8 cck[4]; 344 s8 ofdm[8]; 345 s8 stbc[10]; 346 s8 ht[16]; 347 s8 vht[10]; 348 }; 349 s8 all[48]; 350 }; 351 }; 352 353 /* addr req mask */ 354 #define MT_VEND_TYPE_EEPROM BIT(31) 355 #define MT_VEND_TYPE_CFG BIT(30) 356 #define MT_VEND_TYPE_MASK (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG) 357 358 #define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n)) 359 enum mt_vendor_req { 360 MT_VEND_DEV_MODE = 0x1, 361 MT_VEND_WRITE = 0x2, 362 MT_VEND_MULTI_WRITE = 0x6, 363 MT_VEND_MULTI_READ = 0x7, 364 MT_VEND_READ_EEPROM = 0x9, 365 MT_VEND_WRITE_FCE = 0x42, 366 MT_VEND_WRITE_CFG = 0x46, 367 MT_VEND_READ_CFG = 0x47, 368 }; 369 370 enum mt76u_in_ep { 371 MT_EP_IN_PKT_RX, 372 MT_EP_IN_CMD_RESP, 373 __MT_EP_IN_MAX, 374 }; 375 376 enum mt76u_out_ep { 377 MT_EP_OUT_INBAND_CMD, 378 MT_EP_OUT_AC_BE, 379 MT_EP_OUT_AC_BK, 380 MT_EP_OUT_AC_VI, 381 MT_EP_OUT_AC_VO, 382 MT_EP_OUT_HCCA, 383 __MT_EP_OUT_MAX, 384 }; 385 386 #define MT_TX_SG_MAX_SIZE 8 387 #define MT_RX_SG_MAX_SIZE 1 388 #define MT_NUM_TX_ENTRIES 256 389 #define MT_NUM_RX_ENTRIES 128 390 #define MCU_RESP_URB_SIZE 1024 391 struct mt76_usb { 392 struct mutex usb_ctrl_mtx; 393 __le32 reg_val; 394 u8 *data; 395 u16 data_len; 396 397 struct tasklet_struct rx_tasklet; 398 struct workqueue_struct *stat_wq; 399 struct work_struct stat_work; 400 401 u8 out_ep[__MT_EP_OUT_MAX]; 402 u8 in_ep[__MT_EP_IN_MAX]; 403 bool sg_en; 404 405 struct mt76u_mcu { 406 struct mutex mutex; 407 u8 *data; 408 u32 msg_seq; 409 410 /* multiple reads */ 411 struct mt76_reg_pair *rp; 412 int rp_len; 413 u32 base; 414 bool burst; 415 } mcu; 416 }; 417 418 struct mt76_mmio { 419 struct mt76e_mcu { 420 struct mutex mutex; 421 422 wait_queue_head_t wait; 423 struct sk_buff_head res_q; 424 425 u32 msg_seq; 426 } mcu; 427 void __iomem *regs; 428 spinlock_t irq_lock; 429 u32 irqmask; 430 }; 431 432 struct mt76_rx_status { 433 union { 434 struct mt76_wcid *wcid; 435 u8 wcid_idx; 436 }; 437 438 unsigned long reorder_time; 439 440 u32 ampdu_ref; 441 442 u8 iv[6]; 443 444 u8 ext_phy:1; 445 u8 aggr:1; 446 u8 tid; 447 u16 seqno; 448 449 u16 freq; 450 u32 flag; 451 u8 enc_flags; 452 u8 encoding:2, bw:3; 453 u8 rate_idx; 454 u8 nss; 455 u8 band; 456 s8 signal; 457 u8 chains; 458 s8 chain_signal[IEEE80211_MAX_CHAINS]; 459 }; 460 461 struct mt76_phy { 462 struct ieee80211_hw *hw; 463 struct mt76_dev *dev; 464 void *priv; 465 466 unsigned long state; 467 468 struct cfg80211_chan_def chandef; 469 struct ieee80211_channel *main_chan; 470 471 struct mt76_channel_state *chan_state; 472 ktime_t survey_time; 473 474 struct mt76_sband sband_2g; 475 struct mt76_sband sband_5g; 476 477 int txpower_cur; 478 u8 antenna_mask; 479 }; 480 481 struct mt76_dev { 482 struct mt76_phy phy; /* must be first */ 483 484 struct mt76_phy *phy2; 485 486 struct ieee80211_hw *hw; 487 488 spinlock_t lock; 489 spinlock_t cc_lock; 490 491 u32 cur_cc_bss_rx; 492 493 struct mt76_rx_status rx_ampdu_status; 494 u32 rx_ampdu_len; 495 u32 rx_ampdu_ref; 496 497 struct mutex mutex; 498 499 const struct mt76_bus_ops *bus; 500 const struct mt76_driver_ops *drv; 501 const struct mt76_mcu_ops *mcu_ops; 502 struct device *dev; 503 504 struct net_device napi_dev; 505 spinlock_t rx_lock; 506 struct napi_struct napi[__MT_RXQ_MAX]; 507 struct sk_buff_head rx_skb[__MT_RXQ_MAX]; 508 509 struct list_head txwi_cache; 510 struct mt76_sw_queue q_tx[2 * __MT_TXQ_MAX]; 511 struct mt76_queue q_rx[__MT_RXQ_MAX]; 512 const struct mt76_queue_ops *queue_ops; 513 int tx_dma_idx[4]; 514 515 struct tasklet_struct tx_tasklet; 516 struct napi_struct tx_napi; 517 struct delayed_work mac_work; 518 519 wait_queue_head_t tx_wait; 520 struct sk_buff_head status_list; 521 522 unsigned long wcid_mask[MT76_N_WCIDS / BITS_PER_LONG]; 523 unsigned long wcid_phy_mask[MT76_N_WCIDS / BITS_PER_LONG]; 524 525 struct mt76_wcid global_wcid; 526 struct mt76_wcid __rcu *wcid[MT76_N_WCIDS]; 527 528 u8 macaddr[ETH_ALEN]; 529 u32 rev; 530 531 u32 aggr_stats[32]; 532 533 struct tasklet_struct pre_tbtt_tasklet; 534 int beacon_int; 535 u8 beacon_mask; 536 537 struct debugfs_blob_wrapper eeprom; 538 struct debugfs_blob_wrapper otp; 539 struct mt76_hw_cap cap; 540 541 struct mt76_rate_power rate_power; 542 543 enum nl80211_dfs_regions region; 544 545 u32 debugfs_reg; 546 547 struct led_classdev led_cdev; 548 char led_name[32]; 549 bool led_al; 550 u8 led_pin; 551 552 u8 csa_complete; 553 554 u32 rxfilter; 555 556 union { 557 struct mt76_mmio mmio; 558 struct mt76_usb usb; 559 }; 560 }; 561 562 enum mt76_phy_type { 563 MT_PHY_TYPE_CCK, 564 MT_PHY_TYPE_OFDM, 565 MT_PHY_TYPE_HT, 566 MT_PHY_TYPE_HT_GF, 567 MT_PHY_TYPE_VHT, 568 }; 569 570 #define __mt76_rr(dev, ...) (dev)->bus->rr((dev), __VA_ARGS__) 571 #define __mt76_wr(dev, ...) (dev)->bus->wr((dev), __VA_ARGS__) 572 #define __mt76_rmw(dev, ...) (dev)->bus->rmw((dev), __VA_ARGS__) 573 #define __mt76_wr_copy(dev, ...) (dev)->bus->write_copy((dev), __VA_ARGS__) 574 #define __mt76_rr_copy(dev, ...) (dev)->bus->read_copy((dev), __VA_ARGS__) 575 576 #define __mt76_set(dev, offset, val) __mt76_rmw(dev, offset, 0, val) 577 #define __mt76_clear(dev, offset, val) __mt76_rmw(dev, offset, val, 0) 578 579 #define mt76_rr(dev, ...) (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__) 580 #define mt76_wr(dev, ...) (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__) 581 #define mt76_rmw(dev, ...) (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__) 582 #define mt76_wr_copy(dev, ...) (dev)->mt76.bus->write_copy(&((dev)->mt76), __VA_ARGS__) 583 #define mt76_rr_copy(dev, ...) (dev)->mt76.bus->read_copy(&((dev)->mt76), __VA_ARGS__) 584 #define mt76_wr_rp(dev, ...) (dev)->mt76.bus->wr_rp(&((dev)->mt76), __VA_ARGS__) 585 #define mt76_rd_rp(dev, ...) (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__) 586 587 #define mt76_mcu_send_msg(dev, ...) (dev)->mt76.mcu_ops->mcu_send_msg(&((dev)->mt76), __VA_ARGS__) 588 #define __mt76_mcu_send_msg(dev, ...) (dev)->mcu_ops->mcu_send_msg((dev), __VA_ARGS__) 589 #define mt76_mcu_restart(dev, ...) (dev)->mt76.mcu_ops->mcu_restart(&((dev)->mt76)) 590 #define __mt76_mcu_restart(dev, ...) (dev)->mcu_ops->mcu_restart((dev)) 591 592 #define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val) 593 #define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0) 594 595 #define mt76_get_field(_dev, _reg, _field) \ 596 FIELD_GET(_field, mt76_rr(dev, _reg)) 597 598 #define mt76_rmw_field(_dev, _reg, _field, _val) \ 599 mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val)) 600 601 #define __mt76_rmw_field(_dev, _reg, _field, _val) \ 602 __mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val)) 603 604 #define mt76_hw(dev) (dev)->mphy.hw 605 606 static inline struct ieee80211_hw * 607 mt76_wcid_hw(struct mt76_dev *dev, u8 wcid) 608 { 609 if (wcid <= MT76_N_WCIDS && 610 mt76_wcid_mask_test(dev->wcid_phy_mask, wcid)) 611 return dev->phy2->hw; 612 613 return dev->phy.hw; 614 } 615 616 bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, 617 int timeout); 618 619 #define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__) 620 621 bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, 622 int timeout); 623 624 #define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__) 625 626 void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs); 627 void mt76_pci_disable_aspm(struct pci_dev *pdev); 628 629 static inline u16 mt76_chip(struct mt76_dev *dev) 630 { 631 return dev->rev >> 16; 632 } 633 634 static inline u16 mt76_rev(struct mt76_dev *dev) 635 { 636 return dev->rev & 0xffff; 637 } 638 639 #define mt76xx_chip(dev) mt76_chip(&((dev)->mt76)) 640 #define mt76xx_rev(dev) mt76_rev(&((dev)->mt76)) 641 642 #define mt76_init_queues(dev) (dev)->mt76.queue_ops->init(&((dev)->mt76)) 643 #define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__) 644 #define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__) 645 #define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mt76), __VA_ARGS__) 646 #define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__) 647 #define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__) 648 #define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__) 649 650 struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size, 651 const struct ieee80211_ops *ops, 652 const struct mt76_driver_ops *drv_ops); 653 int mt76_register_device(struct mt76_dev *dev, bool vht, 654 struct ieee80211_rate *rates, int n_rates); 655 void mt76_unregister_device(struct mt76_dev *dev); 656 void mt76_free_device(struct mt76_dev *dev); 657 void mt76_unregister_phy(struct mt76_phy *phy); 658 659 struct mt76_phy *mt76_alloc_phy(struct mt76_dev *dev, unsigned int size, 660 const struct ieee80211_ops *ops); 661 int mt76_register_phy(struct mt76_phy *phy); 662 663 struct dentry *mt76_register_debugfs(struct mt76_dev *dev); 664 int mt76_queues_read(struct seq_file *s, void *data); 665 void mt76_seq_puts_array(struct seq_file *file, const char *str, 666 s8 *val, int len); 667 668 int mt76_eeprom_init(struct mt76_dev *dev, int len); 669 void mt76_eeprom_override(struct mt76_dev *dev); 670 671 static inline struct mt76_phy * 672 mt76_dev_phy(struct mt76_dev *dev, bool phy_ext) 673 { 674 if (phy_ext && dev->phy2) 675 return dev->phy2; 676 return &dev->phy; 677 } 678 679 static inline struct ieee80211_hw * 680 mt76_phy_hw(struct mt76_dev *dev, bool phy_ext) 681 { 682 return mt76_dev_phy(dev, phy_ext)->hw; 683 } 684 685 static inline u8 * 686 mt76_get_txwi_ptr(struct mt76_dev *dev, struct mt76_txwi_cache *t) 687 { 688 return (u8 *)t - dev->drv->txwi_size; 689 } 690 691 /* increment with wrap-around */ 692 static inline int mt76_incr(int val, int size) 693 { 694 return (val + 1) & (size - 1); 695 } 696 697 /* decrement with wrap-around */ 698 static inline int mt76_decr(int val, int size) 699 { 700 return (val - 1) & (size - 1); 701 } 702 703 u8 mt76_ac_to_hwq(u8 ac); 704 705 static inline struct ieee80211_txq * 706 mtxq_to_txq(struct mt76_txq *mtxq) 707 { 708 void *ptr = mtxq; 709 710 return container_of(ptr, struct ieee80211_txq, drv_priv); 711 } 712 713 static inline struct ieee80211_sta * 714 wcid_to_sta(struct mt76_wcid *wcid) 715 { 716 void *ptr = wcid; 717 718 if (!wcid || !wcid->sta) 719 return NULL; 720 721 return container_of(ptr, struct ieee80211_sta, drv_priv); 722 } 723 724 static inline struct mt76_tx_cb *mt76_tx_skb_cb(struct sk_buff *skb) 725 { 726 BUILD_BUG_ON(sizeof(struct mt76_tx_cb) > 727 sizeof(IEEE80211_SKB_CB(skb)->status.status_driver_data)); 728 return ((void *)IEEE80211_SKB_CB(skb)->status.status_driver_data); 729 } 730 731 static inline void mt76_insert_hdr_pad(struct sk_buff *skb) 732 { 733 int len = ieee80211_get_hdrlen_from_skb(skb); 734 735 if (len % 4 == 0) 736 return; 737 738 skb_push(skb, 2); 739 memmove(skb->data, skb->data + 2, len); 740 741 skb->data[len] = 0; 742 skb->data[len + 1] = 0; 743 } 744 745 static inline bool mt76_is_skb_pktid(u8 pktid) 746 { 747 if (pktid & MT_PACKET_ID_HAS_RATE) 748 return false; 749 750 return pktid >= MT_PACKET_ID_FIRST; 751 } 752 753 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb); 754 void mt76_tx(struct mt76_phy *dev, struct ieee80211_sta *sta, 755 struct mt76_wcid *wcid, struct sk_buff *skb); 756 void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq); 757 void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq); 758 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq); 759 void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta, 760 bool send_bar); 761 void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid); 762 void mt76_txq_schedule_all(struct mt76_phy *phy); 763 void mt76_tx_tasklet(unsigned long data); 764 void mt76_release_buffered_frames(struct ieee80211_hw *hw, 765 struct ieee80211_sta *sta, 766 u16 tids, int nframes, 767 enum ieee80211_frame_release_type reason, 768 bool more_data); 769 bool mt76_has_tx_pending(struct mt76_phy *phy); 770 void mt76_set_channel(struct mt76_phy *phy); 771 void mt76_update_survey(struct mt76_dev *dev); 772 int mt76_get_survey(struct ieee80211_hw *hw, int idx, 773 struct survey_info *survey); 774 void mt76_set_stream_caps(struct mt76_dev *dev, bool vht); 775 776 int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid, 777 u16 ssn, u8 size); 778 void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid); 779 780 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid, 781 struct ieee80211_key_conf *key); 782 783 void mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list) 784 __acquires(&dev->status_list.lock); 785 void mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list) 786 __releases(&dev->status_list.lock); 787 788 int mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid, 789 struct sk_buff *skb); 790 struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev, 791 struct mt76_wcid *wcid, int pktid, 792 struct sk_buff_head *list); 793 void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, 794 struct sk_buff_head *list); 795 void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb); 796 void mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, 797 bool flush); 798 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 799 struct ieee80211_sta *sta, 800 enum ieee80211_sta_state old_state, 801 enum ieee80211_sta_state new_state); 802 void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, 803 struct ieee80211_sta *sta); 804 805 int mt76_get_min_avg_rssi(struct mt76_dev *dev, bool ext_phy); 806 807 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 808 int *dbm); 809 810 void mt76_csa_check(struct mt76_dev *dev); 811 void mt76_csa_finish(struct mt76_dev *dev); 812 813 int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant); 814 int mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set); 815 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id); 816 int mt76_get_rate(struct mt76_dev *dev, 817 struct ieee80211_supported_band *sband, 818 int idx, bool cck); 819 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 820 const u8 *mac); 821 void mt76_sw_scan_complete(struct ieee80211_hw *hw, 822 struct ieee80211_vif *vif); 823 824 /* internal */ 825 static inline struct ieee80211_hw * 826 mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb) 827 { 828 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 829 struct ieee80211_hw *hw = dev->phy.hw; 830 831 if ((info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY) && dev->phy2) 832 hw = dev->phy2->hw; 833 834 info->hw_queue &= ~MT_TX_HW_QUEUE_EXT_PHY; 835 836 return hw; 837 } 838 839 void mt76_tx_free(struct mt76_dev *dev); 840 struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev); 841 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t); 842 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, 843 struct napi_struct *napi); 844 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q, 845 struct napi_struct *napi); 846 void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames); 847 848 /* usb */ 849 static inline bool mt76u_urb_error(struct urb *urb) 850 { 851 return urb->status && 852 urb->status != -ECONNRESET && 853 urb->status != -ESHUTDOWN && 854 urb->status != -ENOENT; 855 } 856 857 /* Map hardware queues to usb endpoints */ 858 static inline u8 q2ep(u8 qid) 859 { 860 /* TODO: take management packets to queue 5 */ 861 return qid + 1; 862 } 863 864 static inline int 865 mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len, 866 int timeout) 867 { 868 struct usb_interface *uintf = to_usb_interface(dev->dev); 869 struct usb_device *udev = interface_to_usbdev(uintf); 870 struct mt76_usb *usb = &dev->usb; 871 unsigned int pipe; 872 873 if (actual_len) 874 pipe = usb_rcvbulkpipe(udev, usb->in_ep[MT_EP_IN_CMD_RESP]); 875 else 876 pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]); 877 878 return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout); 879 } 880 881 int mt76u_vendor_request(struct mt76_dev *dev, u8 req, 882 u8 req_type, u16 val, u16 offset, 883 void *buf, size_t len); 884 void mt76u_single_wr(struct mt76_dev *dev, const u8 req, 885 const u16 offset, const u32 val); 886 int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf); 887 void mt76u_deinit(struct mt76_dev *dev); 888 int mt76u_alloc_queues(struct mt76_dev *dev); 889 void mt76u_stop_tx(struct mt76_dev *dev); 890 void mt76u_stop_rx(struct mt76_dev *dev); 891 int mt76u_resume_rx(struct mt76_dev *dev); 892 void mt76u_queues_deinit(struct mt76_dev *dev); 893 894 struct sk_buff * 895 mt76_mcu_msg_alloc(const void *data, int head_len, 896 int data_len, int tail_len); 897 void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb); 898 struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev, 899 unsigned long expires); 900 901 void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr, u32 clear, u32 set); 902 903 #endif 904