1 /* SPDX-License-Identifier: ISC */ 2 /* 3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 4 */ 5 6 #ifndef __MT76_H 7 #define __MT76_H 8 9 #include <linux/kernel.h> 10 #include <linux/io.h> 11 #include <linux/spinlock.h> 12 #include <linux/skbuff.h> 13 #include <linux/leds.h> 14 #include <linux/usb.h> 15 #include <linux/average.h> 16 #include <net/mac80211.h> 17 #include "util.h" 18 19 #define MT_TX_RING_SIZE 256 20 #define MT_MCU_RING_SIZE 32 21 #define MT_RX_BUF_SIZE 2048 22 #define MT_SKB_HEAD_LEN 128 23 24 struct mt76_dev; 25 struct mt76_phy; 26 struct mt76_wcid; 27 28 struct mt76_reg_pair { 29 u32 reg; 30 u32 value; 31 }; 32 33 enum mt76_bus_type { 34 MT76_BUS_MMIO, 35 MT76_BUS_USB, 36 }; 37 38 struct mt76_bus_ops { 39 u32 (*rr)(struct mt76_dev *dev, u32 offset); 40 void (*wr)(struct mt76_dev *dev, u32 offset, u32 val); 41 u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val); 42 void (*write_copy)(struct mt76_dev *dev, u32 offset, const void *data, 43 int len); 44 void (*read_copy)(struct mt76_dev *dev, u32 offset, void *data, 45 int len); 46 int (*wr_rp)(struct mt76_dev *dev, u32 base, 47 const struct mt76_reg_pair *rp, int len); 48 int (*rd_rp)(struct mt76_dev *dev, u32 base, 49 struct mt76_reg_pair *rp, int len); 50 enum mt76_bus_type type; 51 }; 52 53 #define mt76_is_usb(dev) ((dev)->bus->type == MT76_BUS_USB) 54 #define mt76_is_mmio(dev) ((dev)->bus->type == MT76_BUS_MMIO) 55 56 enum mt76_txq_id { 57 MT_TXQ_VO = IEEE80211_AC_VO, 58 MT_TXQ_VI = IEEE80211_AC_VI, 59 MT_TXQ_BE = IEEE80211_AC_BE, 60 MT_TXQ_BK = IEEE80211_AC_BK, 61 MT_TXQ_PSD, 62 MT_TXQ_MCU, 63 MT_TXQ_BEACON, 64 MT_TXQ_CAB, 65 MT_TXQ_FWDL, 66 __MT_TXQ_MAX 67 }; 68 69 enum mt76_rxq_id { 70 MT_RXQ_MAIN, 71 MT_RXQ_MCU, 72 __MT_RXQ_MAX 73 }; 74 75 struct mt76_queue_buf { 76 dma_addr_t addr; 77 int len; 78 }; 79 80 struct mt76_tx_info { 81 struct mt76_queue_buf buf[32]; 82 struct sk_buff *skb; 83 int nbuf; 84 u32 info; 85 }; 86 87 struct mt76_queue_entry { 88 union { 89 void *buf; 90 struct sk_buff *skb; 91 }; 92 union { 93 struct mt76_txwi_cache *txwi; 94 struct urb *urb; 95 }; 96 enum mt76_txq_id qid; 97 bool skip_buf0:1; 98 bool schedule:1; 99 bool done:1; 100 }; 101 102 struct mt76_queue_regs { 103 u32 desc_base; 104 u32 ring_size; 105 u32 cpu_idx; 106 u32 dma_idx; 107 } __packed __aligned(4); 108 109 struct mt76_queue { 110 struct mt76_queue_regs __iomem *regs; 111 112 spinlock_t lock; 113 struct mt76_queue_entry *entry; 114 struct mt76_desc *desc; 115 116 u16 first; 117 u16 head; 118 u16 tail; 119 int ndesc; 120 int queued; 121 int buf_size; 122 bool stopped; 123 124 u8 buf_offset; 125 u8 hw_idx; 126 127 dma_addr_t desc_dma; 128 struct sk_buff *rx_head; 129 struct page_frag_cache rx_page; 130 }; 131 132 struct mt76_sw_queue { 133 struct mt76_queue *q; 134 135 struct list_head swq; 136 int swq_queued; 137 }; 138 139 struct mt76_mcu_ops { 140 int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data, 141 int len, bool wait_resp); 142 int (*mcu_skb_send_msg)(struct mt76_dev *dev, struct sk_buff *skb, 143 int cmd, bool wait_resp); 144 int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base, 145 const struct mt76_reg_pair *rp, int len); 146 int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base, 147 struct mt76_reg_pair *rp, int len); 148 int (*mcu_restart)(struct mt76_dev *dev); 149 }; 150 151 struct mt76_queue_ops { 152 int (*init)(struct mt76_dev *dev); 153 154 int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q, 155 int idx, int n_desc, int bufsize, 156 u32 ring_base); 157 158 int (*tx_queue_skb)(struct mt76_dev *dev, enum mt76_txq_id qid, 159 struct sk_buff *skb, struct mt76_wcid *wcid, 160 struct ieee80211_sta *sta); 161 162 int (*tx_queue_skb_raw)(struct mt76_dev *dev, enum mt76_txq_id qid, 163 struct sk_buff *skb, u32 tx_info); 164 165 void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush, 166 int *len, u32 *info, bool *more); 167 168 void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid); 169 170 void (*tx_cleanup)(struct mt76_dev *dev, enum mt76_txq_id qid, 171 bool flush); 172 173 void (*kick)(struct mt76_dev *dev, struct mt76_queue *q); 174 }; 175 176 enum mt76_wcid_flags { 177 MT_WCID_FLAG_CHECK_PS, 178 MT_WCID_FLAG_PS, 179 }; 180 181 #define MT76_N_WCIDS 128 182 183 /* stored in ieee80211_tx_info::hw_queue */ 184 #define MT_TX_HW_QUEUE_EXT_PHY BIT(3) 185 186 DECLARE_EWMA(signal, 10, 8); 187 188 #define MT_WCID_TX_INFO_RATE GENMASK(15, 0) 189 #define MT_WCID_TX_INFO_NSS GENMASK(17, 16) 190 #define MT_WCID_TX_INFO_TXPWR_ADJ GENMASK(25, 18) 191 #define MT_WCID_TX_INFO_SET BIT(31) 192 193 struct mt76_wcid { 194 struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS]; 195 196 unsigned long flags; 197 198 struct ewma_signal rssi; 199 int inactive_count; 200 201 u8 idx; 202 u8 hw_key_idx; 203 204 u8 sta:1; 205 u8 ext_phy:1; 206 207 u8 rx_check_pn; 208 u8 rx_key_pn[IEEE80211_NUM_TIDS][6]; 209 u16 cipher; 210 211 u32 tx_info; 212 bool sw_iv; 213 214 u8 packet_id; 215 }; 216 217 struct mt76_txq { 218 struct mt76_sw_queue *swq; 219 struct mt76_wcid *wcid; 220 221 struct sk_buff_head retry_q; 222 223 u16 agg_ssn; 224 bool send_bar; 225 bool aggr; 226 }; 227 228 struct mt76_txwi_cache { 229 struct list_head list; 230 dma_addr_t dma_addr; 231 232 struct sk_buff *skb; 233 }; 234 235 struct mt76_rx_tid { 236 struct rcu_head rcu_head; 237 238 struct mt76_dev *dev; 239 240 spinlock_t lock; 241 struct delayed_work reorder_work; 242 243 u16 head; 244 u8 size; 245 u8 nframes; 246 247 u8 num; 248 249 u8 started:1, stopped:1, timer_pending:1; 250 251 struct sk_buff *reorder_buf[]; 252 }; 253 254 #define MT_TX_CB_DMA_DONE BIT(0) 255 #define MT_TX_CB_TXS_DONE BIT(1) 256 #define MT_TX_CB_TXS_FAILED BIT(2) 257 258 #define MT_PACKET_ID_MASK GENMASK(6, 0) 259 #define MT_PACKET_ID_NO_ACK 0 260 #define MT_PACKET_ID_NO_SKB 1 261 #define MT_PACKET_ID_FIRST 2 262 #define MT_PACKET_ID_HAS_RATE BIT(7) 263 264 #define MT_TX_STATUS_SKB_TIMEOUT HZ 265 266 struct mt76_tx_cb { 267 unsigned long jiffies; 268 u8 wcid; 269 u8 pktid; 270 u8 flags; 271 }; 272 273 enum { 274 MT76_STATE_INITIALIZED, 275 MT76_STATE_RUNNING, 276 MT76_STATE_MCU_RUNNING, 277 MT76_SCANNING, 278 MT76_RESET, 279 MT76_MCU_RESET, 280 MT76_REMOVED, 281 MT76_READING_STATS, 282 }; 283 284 struct mt76_hw_cap { 285 bool has_2ghz; 286 bool has_5ghz; 287 }; 288 289 #define MT_DRV_TXWI_NO_FREE BIT(0) 290 #define MT_DRV_TX_ALIGNED4_SKBS BIT(1) 291 #define MT_DRV_SW_RX_AIRTIME BIT(2) 292 #define MT_DRV_RX_DMA_HDR BIT(3) 293 294 struct mt76_driver_ops { 295 u32 drv_flags; 296 u32 survey_flags; 297 u16 txwi_size; 298 299 void (*update_survey)(struct mt76_dev *dev); 300 301 int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr, 302 enum mt76_txq_id qid, struct mt76_wcid *wcid, 303 struct ieee80211_sta *sta, 304 struct mt76_tx_info *tx_info); 305 306 void (*tx_complete_skb)(struct mt76_dev *dev, enum mt76_txq_id qid, 307 struct mt76_queue_entry *e); 308 309 bool (*tx_status_data)(struct mt76_dev *dev, u8 *update); 310 311 void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q, 312 struct sk_buff *skb); 313 314 void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q); 315 316 void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta, 317 bool ps); 318 319 int (*sta_add)(struct mt76_dev *dev, struct ieee80211_vif *vif, 320 struct ieee80211_sta *sta); 321 322 void (*sta_assoc)(struct mt76_dev *dev, struct ieee80211_vif *vif, 323 struct ieee80211_sta *sta); 324 325 void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif, 326 struct ieee80211_sta *sta); 327 }; 328 329 struct mt76_channel_state { 330 u64 cc_active; 331 u64 cc_busy; 332 u64 cc_rx; 333 u64 cc_bss_rx; 334 u64 cc_tx; 335 336 s8 noise; 337 }; 338 339 struct mt76_sband { 340 struct ieee80211_supported_band sband; 341 struct mt76_channel_state *chan; 342 }; 343 344 struct mt76_rate_power { 345 union { 346 struct { 347 s8 cck[4]; 348 s8 ofdm[8]; 349 s8 stbc[10]; 350 s8 ht[16]; 351 s8 vht[10]; 352 }; 353 s8 all[48]; 354 }; 355 }; 356 357 /* addr req mask */ 358 #define MT_VEND_TYPE_EEPROM BIT(31) 359 #define MT_VEND_TYPE_CFG BIT(30) 360 #define MT_VEND_TYPE_MASK (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG) 361 362 #define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n)) 363 enum mt_vendor_req { 364 MT_VEND_DEV_MODE = 0x1, 365 MT_VEND_WRITE = 0x2, 366 MT_VEND_POWER_ON = 0x4, 367 MT_VEND_MULTI_WRITE = 0x6, 368 MT_VEND_MULTI_READ = 0x7, 369 MT_VEND_READ_EEPROM = 0x9, 370 MT_VEND_WRITE_FCE = 0x42, 371 MT_VEND_WRITE_CFG = 0x46, 372 MT_VEND_READ_CFG = 0x47, 373 MT_VEND_READ_EXT = 0x63, 374 MT_VEND_WRITE_EXT = 0x66, 375 }; 376 377 enum mt76u_in_ep { 378 MT_EP_IN_PKT_RX, 379 MT_EP_IN_CMD_RESP, 380 __MT_EP_IN_MAX, 381 }; 382 383 enum mt76u_out_ep { 384 MT_EP_OUT_INBAND_CMD, 385 MT_EP_OUT_AC_BE, 386 MT_EP_OUT_AC_BK, 387 MT_EP_OUT_AC_VI, 388 MT_EP_OUT_AC_VO, 389 MT_EP_OUT_HCCA, 390 __MT_EP_OUT_MAX, 391 }; 392 393 struct mt76_mcu { 394 struct mutex mutex; 395 u32 msg_seq; 396 397 struct sk_buff_head res_q; 398 wait_queue_head_t wait; 399 }; 400 401 #define MT_TX_SG_MAX_SIZE 8 402 #define MT_RX_SG_MAX_SIZE 4 403 #define MT_NUM_TX_ENTRIES 256 404 #define MT_NUM_RX_ENTRIES 128 405 #define MCU_RESP_URB_SIZE 1024 406 struct mt76_usb { 407 struct mutex usb_ctrl_mtx; 408 u8 *data; 409 u16 data_len; 410 411 struct tasklet_struct rx_tasklet; 412 struct workqueue_struct *wq; 413 struct work_struct stat_work; 414 415 u8 out_ep[__MT_EP_OUT_MAX]; 416 u8 in_ep[__MT_EP_IN_MAX]; 417 bool sg_en; 418 419 struct mt76u_mcu { 420 u8 *data; 421 /* multiple reads */ 422 struct mt76_reg_pair *rp; 423 int rp_len; 424 u32 base; 425 bool burst; 426 } mcu; 427 }; 428 429 struct mt76_mmio { 430 void __iomem *regs; 431 spinlock_t irq_lock; 432 u32 irqmask; 433 }; 434 435 struct mt76_rx_status { 436 union { 437 struct mt76_wcid *wcid; 438 u8 wcid_idx; 439 }; 440 441 unsigned long reorder_time; 442 443 u32 ampdu_ref; 444 445 u8 iv[6]; 446 447 u8 ext_phy:1; 448 u8 aggr:1; 449 u8 tid; 450 u16 seqno; 451 452 u16 freq; 453 u32 flag; 454 u8 enc_flags; 455 u8 encoding:2, bw:3; 456 u8 rate_idx; 457 u8 nss; 458 u8 band; 459 s8 signal; 460 u8 chains; 461 s8 chain_signal[IEEE80211_MAX_CHAINS]; 462 }; 463 464 struct mt76_phy { 465 struct ieee80211_hw *hw; 466 struct mt76_dev *dev; 467 void *priv; 468 469 unsigned long state; 470 471 struct cfg80211_chan_def chandef; 472 struct ieee80211_channel *main_chan; 473 474 struct mt76_channel_state *chan_state; 475 ktime_t survey_time; 476 477 struct mt76_sband sband_2g; 478 struct mt76_sband sband_5g; 479 480 int txpower_cur; 481 u8 antenna_mask; 482 }; 483 484 struct mt76_dev { 485 struct mt76_phy phy; /* must be first */ 486 487 struct mt76_phy *phy2; 488 489 struct ieee80211_hw *hw; 490 491 spinlock_t lock; 492 spinlock_t cc_lock; 493 494 u32 cur_cc_bss_rx; 495 496 struct mt76_rx_status rx_ampdu_status; 497 u32 rx_ampdu_len; 498 u32 rx_ampdu_ref; 499 500 struct mutex mutex; 501 502 const struct mt76_bus_ops *bus; 503 const struct mt76_driver_ops *drv; 504 const struct mt76_mcu_ops *mcu_ops; 505 struct device *dev; 506 507 struct mt76_mcu mcu; 508 509 struct net_device napi_dev; 510 spinlock_t rx_lock; 511 struct napi_struct napi[__MT_RXQ_MAX]; 512 struct sk_buff_head rx_skb[__MT_RXQ_MAX]; 513 514 struct list_head txwi_cache; 515 struct mt76_sw_queue q_tx[2 * __MT_TXQ_MAX]; 516 struct mt76_queue q_rx[__MT_RXQ_MAX]; 517 const struct mt76_queue_ops *queue_ops; 518 int tx_dma_idx[4]; 519 520 struct tasklet_struct tx_tasklet; 521 struct napi_struct tx_napi; 522 struct delayed_work mac_work; 523 524 wait_queue_head_t tx_wait; 525 struct sk_buff_head status_list; 526 527 unsigned long wcid_mask[MT76_N_WCIDS / BITS_PER_LONG]; 528 unsigned long wcid_phy_mask[MT76_N_WCIDS / BITS_PER_LONG]; 529 530 struct mt76_wcid global_wcid; 531 struct mt76_wcid __rcu *wcid[MT76_N_WCIDS]; 532 533 u8 macaddr[ETH_ALEN]; 534 u32 rev; 535 536 u32 aggr_stats[32]; 537 538 struct tasklet_struct pre_tbtt_tasklet; 539 int beacon_int; 540 u8 beacon_mask; 541 542 struct debugfs_blob_wrapper eeprom; 543 struct debugfs_blob_wrapper otp; 544 struct mt76_hw_cap cap; 545 546 struct mt76_rate_power rate_power; 547 548 enum nl80211_dfs_regions region; 549 550 u32 debugfs_reg; 551 552 struct led_classdev led_cdev; 553 char led_name[32]; 554 bool led_al; 555 u8 led_pin; 556 557 u8 csa_complete; 558 559 u32 rxfilter; 560 561 union { 562 struct mt76_mmio mmio; 563 struct mt76_usb usb; 564 }; 565 }; 566 567 enum mt76_phy_type { 568 MT_PHY_TYPE_CCK, 569 MT_PHY_TYPE_OFDM, 570 MT_PHY_TYPE_HT, 571 MT_PHY_TYPE_HT_GF, 572 MT_PHY_TYPE_VHT, 573 }; 574 575 #define __mt76_rr(dev, ...) (dev)->bus->rr((dev), __VA_ARGS__) 576 #define __mt76_wr(dev, ...) (dev)->bus->wr((dev), __VA_ARGS__) 577 #define __mt76_rmw(dev, ...) (dev)->bus->rmw((dev), __VA_ARGS__) 578 #define __mt76_wr_copy(dev, ...) (dev)->bus->write_copy((dev), __VA_ARGS__) 579 #define __mt76_rr_copy(dev, ...) (dev)->bus->read_copy((dev), __VA_ARGS__) 580 581 #define __mt76_set(dev, offset, val) __mt76_rmw(dev, offset, 0, val) 582 #define __mt76_clear(dev, offset, val) __mt76_rmw(dev, offset, val, 0) 583 584 #define mt76_rr(dev, ...) (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__) 585 #define mt76_wr(dev, ...) (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__) 586 #define mt76_rmw(dev, ...) (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__) 587 #define mt76_wr_copy(dev, ...) (dev)->mt76.bus->write_copy(&((dev)->mt76), __VA_ARGS__) 588 #define mt76_rr_copy(dev, ...) (dev)->mt76.bus->read_copy(&((dev)->mt76), __VA_ARGS__) 589 #define mt76_wr_rp(dev, ...) (dev)->mt76.bus->wr_rp(&((dev)->mt76), __VA_ARGS__) 590 #define mt76_rd_rp(dev, ...) (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__) 591 592 #define mt76_mcu_send_msg(dev, ...) (dev)->mt76.mcu_ops->mcu_send_msg(&((dev)->mt76), __VA_ARGS__) 593 594 #define __mt76_mcu_send_msg(dev, ...) (dev)->mcu_ops->mcu_send_msg((dev), __VA_ARGS__) 595 #define __mt76_mcu_skb_send_msg(dev, ...) (dev)->mcu_ops->mcu_skb_send_msg((dev), __VA_ARGS__) 596 #define mt76_mcu_restart(dev, ...) (dev)->mt76.mcu_ops->mcu_restart(&((dev)->mt76)) 597 #define __mt76_mcu_restart(dev, ...) (dev)->mcu_ops->mcu_restart((dev)) 598 599 #define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val) 600 #define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0) 601 602 #define mt76_get_field(_dev, _reg, _field) \ 603 FIELD_GET(_field, mt76_rr(dev, _reg)) 604 605 #define mt76_rmw_field(_dev, _reg, _field, _val) \ 606 mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val)) 607 608 #define __mt76_rmw_field(_dev, _reg, _field, _val) \ 609 __mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val)) 610 611 #define mt76_hw(dev) (dev)->mphy.hw 612 613 static inline struct ieee80211_hw * 614 mt76_wcid_hw(struct mt76_dev *dev, u8 wcid) 615 { 616 if (wcid <= MT76_N_WCIDS && 617 mt76_wcid_mask_test(dev->wcid_phy_mask, wcid)) 618 return dev->phy2->hw; 619 620 return dev->phy.hw; 621 } 622 623 bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, 624 int timeout); 625 626 #define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__) 627 628 bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, 629 int timeout); 630 631 #define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__) 632 633 void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs); 634 void mt76_pci_disable_aspm(struct pci_dev *pdev); 635 636 static inline u16 mt76_chip(struct mt76_dev *dev) 637 { 638 return dev->rev >> 16; 639 } 640 641 static inline u16 mt76_rev(struct mt76_dev *dev) 642 { 643 return dev->rev & 0xffff; 644 } 645 646 #define mt76xx_chip(dev) mt76_chip(&((dev)->mt76)) 647 #define mt76xx_rev(dev) mt76_rev(&((dev)->mt76)) 648 649 #define mt76_init_queues(dev) (dev)->mt76.queue_ops->init(&((dev)->mt76)) 650 #define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__) 651 #define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__) 652 #define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mt76), __VA_ARGS__) 653 #define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__) 654 #define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__) 655 #define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__) 656 657 struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size, 658 const struct ieee80211_ops *ops, 659 const struct mt76_driver_ops *drv_ops); 660 int mt76_register_device(struct mt76_dev *dev, bool vht, 661 struct ieee80211_rate *rates, int n_rates); 662 void mt76_unregister_device(struct mt76_dev *dev); 663 void mt76_free_device(struct mt76_dev *dev); 664 void mt76_unregister_phy(struct mt76_phy *phy); 665 666 struct mt76_phy *mt76_alloc_phy(struct mt76_dev *dev, unsigned int size, 667 const struct ieee80211_ops *ops); 668 int mt76_register_phy(struct mt76_phy *phy); 669 670 struct dentry *mt76_register_debugfs(struct mt76_dev *dev); 671 int mt76_queues_read(struct seq_file *s, void *data); 672 void mt76_seq_puts_array(struct seq_file *file, const char *str, 673 s8 *val, int len); 674 675 int mt76_eeprom_init(struct mt76_dev *dev, int len); 676 void mt76_eeprom_override(struct mt76_dev *dev); 677 678 static inline struct mt76_phy * 679 mt76_dev_phy(struct mt76_dev *dev, bool phy_ext) 680 { 681 if (phy_ext && dev->phy2) 682 return dev->phy2; 683 return &dev->phy; 684 } 685 686 static inline struct ieee80211_hw * 687 mt76_phy_hw(struct mt76_dev *dev, bool phy_ext) 688 { 689 return mt76_dev_phy(dev, phy_ext)->hw; 690 } 691 692 static inline u8 * 693 mt76_get_txwi_ptr(struct mt76_dev *dev, struct mt76_txwi_cache *t) 694 { 695 return (u8 *)t - dev->drv->txwi_size; 696 } 697 698 /* increment with wrap-around */ 699 static inline int mt76_incr(int val, int size) 700 { 701 return (val + 1) & (size - 1); 702 } 703 704 /* decrement with wrap-around */ 705 static inline int mt76_decr(int val, int size) 706 { 707 return (val - 1) & (size - 1); 708 } 709 710 u8 mt76_ac_to_hwq(u8 ac); 711 712 static inline struct ieee80211_txq * 713 mtxq_to_txq(struct mt76_txq *mtxq) 714 { 715 void *ptr = mtxq; 716 717 return container_of(ptr, struct ieee80211_txq, drv_priv); 718 } 719 720 static inline struct ieee80211_sta * 721 wcid_to_sta(struct mt76_wcid *wcid) 722 { 723 void *ptr = wcid; 724 725 if (!wcid || !wcid->sta) 726 return NULL; 727 728 return container_of(ptr, struct ieee80211_sta, drv_priv); 729 } 730 731 static inline struct mt76_tx_cb *mt76_tx_skb_cb(struct sk_buff *skb) 732 { 733 BUILD_BUG_ON(sizeof(struct mt76_tx_cb) > 734 sizeof(IEEE80211_SKB_CB(skb)->status.status_driver_data)); 735 return ((void *)IEEE80211_SKB_CB(skb)->status.status_driver_data); 736 } 737 738 static inline void mt76_insert_hdr_pad(struct sk_buff *skb) 739 { 740 int len = ieee80211_get_hdrlen_from_skb(skb); 741 742 if (len % 4 == 0) 743 return; 744 745 skb_push(skb, 2); 746 memmove(skb->data, skb->data + 2, len); 747 748 skb->data[len] = 0; 749 skb->data[len + 1] = 0; 750 } 751 752 static inline bool mt76_is_skb_pktid(u8 pktid) 753 { 754 if (pktid & MT_PACKET_ID_HAS_RATE) 755 return false; 756 757 return pktid >= MT_PACKET_ID_FIRST; 758 } 759 760 static inline u8 mt76_tx_power_nss_delta(u8 nss) 761 { 762 static const u8 nss_delta[4] = { 0, 6, 9, 12 }; 763 764 return nss_delta[nss - 1]; 765 } 766 767 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb); 768 void mt76_tx(struct mt76_phy *dev, struct ieee80211_sta *sta, 769 struct mt76_wcid *wcid, struct sk_buff *skb); 770 void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq); 771 void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq); 772 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq); 773 void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta, 774 bool send_bar); 775 void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid); 776 void mt76_txq_schedule_all(struct mt76_phy *phy); 777 void mt76_tx_tasklet(unsigned long data); 778 void mt76_release_buffered_frames(struct ieee80211_hw *hw, 779 struct ieee80211_sta *sta, 780 u16 tids, int nframes, 781 enum ieee80211_frame_release_type reason, 782 bool more_data); 783 bool mt76_has_tx_pending(struct mt76_phy *phy); 784 void mt76_set_channel(struct mt76_phy *phy); 785 void mt76_update_survey(struct mt76_dev *dev); 786 int mt76_get_survey(struct ieee80211_hw *hw, int idx, 787 struct survey_info *survey); 788 void mt76_set_stream_caps(struct mt76_dev *dev, bool vht); 789 790 int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid, 791 u16 ssn, u8 size); 792 void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid); 793 794 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid, 795 struct ieee80211_key_conf *key); 796 797 void mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list) 798 __acquires(&dev->status_list.lock); 799 void mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list) 800 __releases(&dev->status_list.lock); 801 802 int mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid, 803 struct sk_buff *skb); 804 struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev, 805 struct mt76_wcid *wcid, int pktid, 806 struct sk_buff_head *list); 807 void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, 808 struct sk_buff_head *list); 809 void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb); 810 void mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, 811 bool flush); 812 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 813 struct ieee80211_sta *sta, 814 enum ieee80211_sta_state old_state, 815 enum ieee80211_sta_state new_state); 816 void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, 817 struct ieee80211_sta *sta); 818 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 819 struct ieee80211_sta *sta); 820 821 int mt76_get_min_avg_rssi(struct mt76_dev *dev, bool ext_phy); 822 823 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 824 int *dbm); 825 826 void mt76_csa_check(struct mt76_dev *dev); 827 void mt76_csa_finish(struct mt76_dev *dev); 828 829 int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant); 830 int mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set); 831 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id); 832 int mt76_get_rate(struct mt76_dev *dev, 833 struct ieee80211_supported_band *sband, 834 int idx, bool cck); 835 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 836 const u8 *mac); 837 void mt76_sw_scan_complete(struct ieee80211_hw *hw, 838 struct ieee80211_vif *vif); 839 840 /* internal */ 841 static inline struct ieee80211_hw * 842 mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb) 843 { 844 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 845 struct ieee80211_hw *hw = dev->phy.hw; 846 847 if ((info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY) && dev->phy2) 848 hw = dev->phy2->hw; 849 850 info->hw_queue &= ~MT_TX_HW_QUEUE_EXT_PHY; 851 852 return hw; 853 } 854 855 void mt76_tx_free(struct mt76_dev *dev); 856 struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev); 857 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t); 858 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, 859 struct napi_struct *napi); 860 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q, 861 struct napi_struct *napi); 862 void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames); 863 864 /* usb */ 865 static inline bool mt76u_urb_error(struct urb *urb) 866 { 867 return urb->status && 868 urb->status != -ECONNRESET && 869 urb->status != -ESHUTDOWN && 870 urb->status != -ENOENT; 871 } 872 873 /* Map hardware queues to usb endpoints */ 874 static inline u8 q2ep(u8 qid) 875 { 876 /* TODO: take management packets to queue 5 */ 877 return qid + 1; 878 } 879 880 static inline int 881 mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len, 882 int timeout, int ep) 883 { 884 struct usb_interface *uintf = to_usb_interface(dev->dev); 885 struct usb_device *udev = interface_to_usbdev(uintf); 886 struct mt76_usb *usb = &dev->usb; 887 unsigned int pipe; 888 889 if (actual_len) 890 pipe = usb_rcvbulkpipe(udev, usb->in_ep[ep]); 891 else 892 pipe = usb_sndbulkpipe(udev, usb->out_ep[ep]); 893 894 return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout); 895 } 896 897 int mt76u_skb_dma_info(struct sk_buff *skb, u32 info); 898 int mt76u_vendor_request(struct mt76_dev *dev, u8 req, 899 u8 req_type, u16 val, u16 offset, 900 void *buf, size_t len); 901 void mt76u_single_wr(struct mt76_dev *dev, const u8 req, 902 const u16 offset, const u32 val); 903 void mt76u_deinit(struct mt76_dev *dev); 904 int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf, 905 bool ext); 906 int mt76u_alloc_mcu_queue(struct mt76_dev *dev); 907 int mt76u_alloc_queues(struct mt76_dev *dev); 908 void mt76u_stop_tx(struct mt76_dev *dev); 909 void mt76u_stop_rx(struct mt76_dev *dev); 910 int mt76u_resume_rx(struct mt76_dev *dev); 911 void mt76u_queues_deinit(struct mt76_dev *dev); 912 913 struct sk_buff * 914 mt76_mcu_msg_alloc(const void *data, int head_len, 915 int data_len, int tail_len); 916 void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb); 917 struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev, 918 unsigned long expires); 919 920 void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr, u32 clear, u32 set); 921 922 #endif 923