1 /* SPDX-License-Identifier: ISC */ 2 /* 3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 4 */ 5 6 #ifndef __MT76_H 7 #define __MT76_H 8 9 #include <linux/kernel.h> 10 #include <linux/io.h> 11 #include <linux/spinlock.h> 12 #include <linux/skbuff.h> 13 #include <linux/leds.h> 14 #include <linux/usb.h> 15 #include <linux/average.h> 16 #include <net/mac80211.h> 17 #include "util.h" 18 #include "testmode.h" 19 20 #define MT_TX_RING_SIZE 256 21 #define MT_MCU_RING_SIZE 32 22 #define MT_RX_BUF_SIZE 2048 23 #define MT_SKB_HEAD_LEN 128 24 25 struct mt76_dev; 26 struct mt76_phy; 27 struct mt76_wcid; 28 29 struct mt76_reg_pair { 30 u32 reg; 31 u32 value; 32 }; 33 34 enum mt76_bus_type { 35 MT76_BUS_MMIO, 36 MT76_BUS_USB, 37 }; 38 39 struct mt76_bus_ops { 40 u32 (*rr)(struct mt76_dev *dev, u32 offset); 41 void (*wr)(struct mt76_dev *dev, u32 offset, u32 val); 42 u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val); 43 void (*write_copy)(struct mt76_dev *dev, u32 offset, const void *data, 44 int len); 45 void (*read_copy)(struct mt76_dev *dev, u32 offset, void *data, 46 int len); 47 int (*wr_rp)(struct mt76_dev *dev, u32 base, 48 const struct mt76_reg_pair *rp, int len); 49 int (*rd_rp)(struct mt76_dev *dev, u32 base, 50 struct mt76_reg_pair *rp, int len); 51 enum mt76_bus_type type; 52 }; 53 54 #define mt76_is_usb(dev) ((dev)->bus->type == MT76_BUS_USB) 55 #define mt76_is_mmio(dev) ((dev)->bus->type == MT76_BUS_MMIO) 56 57 enum mt76_txq_id { 58 MT_TXQ_VO = IEEE80211_AC_VO, 59 MT_TXQ_VI = IEEE80211_AC_VI, 60 MT_TXQ_BE = IEEE80211_AC_BE, 61 MT_TXQ_BK = IEEE80211_AC_BK, 62 MT_TXQ_PSD, 63 MT_TXQ_MCU, 64 MT_TXQ_MCU_WA, 65 MT_TXQ_BEACON, 66 MT_TXQ_CAB, 67 MT_TXQ_FWDL, 68 __MT_TXQ_MAX 69 }; 70 71 enum mt76_rxq_id { 72 MT_RXQ_MAIN, 73 MT_RXQ_MCU, 74 MT_RXQ_MCU_WA, 75 __MT_RXQ_MAX 76 }; 77 78 struct mt76_queue_buf { 79 dma_addr_t addr; 80 int len; 81 }; 82 83 struct mt76_tx_info { 84 struct mt76_queue_buf buf[32]; 85 struct sk_buff *skb; 86 int nbuf; 87 u32 info; 88 }; 89 90 struct mt76_queue_entry { 91 union { 92 void *buf; 93 struct sk_buff *skb; 94 }; 95 union { 96 struct mt76_txwi_cache *txwi; 97 struct urb *urb; 98 }; 99 enum mt76_txq_id qid; 100 bool skip_buf0:1; 101 bool schedule:1; 102 bool done:1; 103 }; 104 105 struct mt76_queue_regs { 106 u32 desc_base; 107 u32 ring_size; 108 u32 cpu_idx; 109 u32 dma_idx; 110 } __packed __aligned(4); 111 112 struct mt76_queue { 113 struct mt76_queue_regs __iomem *regs; 114 115 spinlock_t lock; 116 struct mt76_queue_entry *entry; 117 struct mt76_desc *desc; 118 119 u16 first; 120 u16 head; 121 u16 tail; 122 int ndesc; 123 int queued; 124 int buf_size; 125 bool stopped; 126 127 u8 buf_offset; 128 u8 hw_idx; 129 130 dma_addr_t desc_dma; 131 struct sk_buff *rx_head; 132 struct page_frag_cache rx_page; 133 }; 134 135 struct mt76_sw_queue { 136 struct mt76_queue *q; 137 138 struct list_head swq; 139 int swq_queued; 140 }; 141 142 struct mt76_mcu_ops { 143 u32 headroom; 144 u32 tailroom; 145 146 int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data, 147 int len, bool wait_resp); 148 int (*mcu_skb_send_msg)(struct mt76_dev *dev, struct sk_buff *skb, 149 int cmd, bool wait_resp); 150 int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base, 151 const struct mt76_reg_pair *rp, int len); 152 int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base, 153 struct mt76_reg_pair *rp, int len); 154 int (*mcu_restart)(struct mt76_dev *dev); 155 }; 156 157 struct mt76_queue_ops { 158 int (*init)(struct mt76_dev *dev); 159 160 int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q, 161 int idx, int n_desc, int bufsize, 162 u32 ring_base); 163 164 int (*tx_queue_skb)(struct mt76_dev *dev, enum mt76_txq_id qid, 165 struct sk_buff *skb, struct mt76_wcid *wcid, 166 struct ieee80211_sta *sta); 167 168 int (*tx_queue_skb_raw)(struct mt76_dev *dev, enum mt76_txq_id qid, 169 struct sk_buff *skb, u32 tx_info); 170 171 void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush, 172 int *len, u32 *info, bool *more); 173 174 void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid); 175 176 void (*tx_cleanup)(struct mt76_dev *dev, enum mt76_txq_id qid, 177 bool flush); 178 179 void (*kick)(struct mt76_dev *dev, struct mt76_queue *q); 180 }; 181 182 enum mt76_wcid_flags { 183 MT_WCID_FLAG_CHECK_PS, 184 MT_WCID_FLAG_PS, 185 }; 186 187 #define MT76_N_WCIDS 288 188 189 /* stored in ieee80211_tx_info::hw_queue */ 190 #define MT_TX_HW_QUEUE_EXT_PHY BIT(3) 191 192 DECLARE_EWMA(signal, 10, 8); 193 194 #define MT_WCID_TX_INFO_RATE GENMASK(15, 0) 195 #define MT_WCID_TX_INFO_NSS GENMASK(17, 16) 196 #define MT_WCID_TX_INFO_TXPWR_ADJ GENMASK(25, 18) 197 #define MT_WCID_TX_INFO_SET BIT(31) 198 199 struct mt76_wcid { 200 struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS]; 201 202 unsigned long flags; 203 204 struct ewma_signal rssi; 205 int inactive_count; 206 207 u16 idx; 208 u8 hw_key_idx; 209 210 u8 sta:1; 211 u8 ext_phy:1; 212 213 u8 rx_check_pn; 214 u8 rx_key_pn[IEEE80211_NUM_TIDS][6]; 215 u16 cipher; 216 217 u32 tx_info; 218 bool sw_iv; 219 220 u8 packet_id; 221 }; 222 223 struct mt76_txq { 224 struct mt76_sw_queue *swq; 225 struct mt76_wcid *wcid; 226 227 struct sk_buff_head retry_q; 228 229 u16 agg_ssn; 230 bool send_bar; 231 bool aggr; 232 }; 233 234 struct mt76_txwi_cache { 235 struct list_head list; 236 dma_addr_t dma_addr; 237 238 struct sk_buff *skb; 239 }; 240 241 struct mt76_rx_tid { 242 struct rcu_head rcu_head; 243 244 struct mt76_dev *dev; 245 246 spinlock_t lock; 247 struct delayed_work reorder_work; 248 249 u16 head; 250 u16 size; 251 u16 nframes; 252 253 u8 num; 254 255 u8 started:1, stopped:1, timer_pending:1; 256 257 struct sk_buff *reorder_buf[]; 258 }; 259 260 #define MT_TX_CB_DMA_DONE BIT(0) 261 #define MT_TX_CB_TXS_DONE BIT(1) 262 #define MT_TX_CB_TXS_FAILED BIT(2) 263 264 #define MT_PACKET_ID_MASK GENMASK(6, 0) 265 #define MT_PACKET_ID_NO_ACK 0 266 #define MT_PACKET_ID_NO_SKB 1 267 #define MT_PACKET_ID_FIRST 2 268 #define MT_PACKET_ID_HAS_RATE BIT(7) 269 270 #define MT_TX_STATUS_SKB_TIMEOUT HZ 271 272 struct mt76_tx_cb { 273 unsigned long jiffies; 274 u16 wcid; 275 u8 pktid; 276 u8 flags; 277 }; 278 279 enum { 280 MT76_STATE_INITIALIZED, 281 MT76_STATE_RUNNING, 282 MT76_STATE_MCU_RUNNING, 283 MT76_SCANNING, 284 MT76_HW_SCANNING, 285 MT76_HW_SCHED_SCANNING, 286 MT76_RESTART, 287 MT76_RESET, 288 MT76_MCU_RESET, 289 MT76_REMOVED, 290 MT76_READING_STATS, 291 MT76_STATE_POWER_OFF, 292 MT76_STATE_SUSPEND, 293 MT76_STATE_ROC, 294 }; 295 296 struct mt76_hw_cap { 297 bool has_2ghz; 298 bool has_5ghz; 299 }; 300 301 #define MT_DRV_TXWI_NO_FREE BIT(0) 302 #define MT_DRV_TX_ALIGNED4_SKBS BIT(1) 303 #define MT_DRV_SW_RX_AIRTIME BIT(2) 304 #define MT_DRV_RX_DMA_HDR BIT(3) 305 #define MT_DRV_HW_MGMT_TXQ BIT(4) 306 307 struct mt76_driver_ops { 308 u32 drv_flags; 309 u32 survey_flags; 310 u16 txwi_size; 311 312 void (*update_survey)(struct mt76_dev *dev); 313 314 int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr, 315 enum mt76_txq_id qid, struct mt76_wcid *wcid, 316 struct ieee80211_sta *sta, 317 struct mt76_tx_info *tx_info); 318 319 void (*tx_complete_skb)(struct mt76_dev *dev, enum mt76_txq_id qid, 320 struct mt76_queue_entry *e); 321 322 bool (*tx_status_data)(struct mt76_dev *dev, u8 *update); 323 324 void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q, 325 struct sk_buff *skb); 326 327 void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q); 328 329 void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta, 330 bool ps); 331 332 int (*sta_add)(struct mt76_dev *dev, struct ieee80211_vif *vif, 333 struct ieee80211_sta *sta); 334 335 void (*sta_assoc)(struct mt76_dev *dev, struct ieee80211_vif *vif, 336 struct ieee80211_sta *sta); 337 338 void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif, 339 struct ieee80211_sta *sta); 340 }; 341 342 struct mt76_channel_state { 343 u64 cc_active; 344 u64 cc_busy; 345 u64 cc_rx; 346 u64 cc_bss_rx; 347 u64 cc_tx; 348 349 s8 noise; 350 }; 351 352 struct mt76_sband { 353 struct ieee80211_supported_band sband; 354 struct mt76_channel_state *chan; 355 }; 356 357 struct mt76_rate_power { 358 union { 359 struct { 360 s8 cck[4]; 361 s8 ofdm[8]; 362 s8 stbc[10]; 363 s8 ht[16]; 364 s8 vht[10]; 365 }; 366 s8 all[48]; 367 }; 368 }; 369 370 /* addr req mask */ 371 #define MT_VEND_TYPE_EEPROM BIT(31) 372 #define MT_VEND_TYPE_CFG BIT(30) 373 #define MT_VEND_TYPE_MASK (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG) 374 375 #define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n)) 376 enum mt_vendor_req { 377 MT_VEND_DEV_MODE = 0x1, 378 MT_VEND_WRITE = 0x2, 379 MT_VEND_POWER_ON = 0x4, 380 MT_VEND_MULTI_WRITE = 0x6, 381 MT_VEND_MULTI_READ = 0x7, 382 MT_VEND_READ_EEPROM = 0x9, 383 MT_VEND_WRITE_FCE = 0x42, 384 MT_VEND_WRITE_CFG = 0x46, 385 MT_VEND_READ_CFG = 0x47, 386 MT_VEND_READ_EXT = 0x63, 387 MT_VEND_WRITE_EXT = 0x66, 388 MT_VEND_FEATURE_SET = 0x91, 389 }; 390 391 enum mt76u_in_ep { 392 MT_EP_IN_PKT_RX, 393 MT_EP_IN_CMD_RESP, 394 __MT_EP_IN_MAX, 395 }; 396 397 enum mt76u_out_ep { 398 MT_EP_OUT_INBAND_CMD, 399 MT_EP_OUT_AC_BE, 400 MT_EP_OUT_AC_BK, 401 MT_EP_OUT_AC_VI, 402 MT_EP_OUT_AC_VO, 403 MT_EP_OUT_HCCA, 404 __MT_EP_OUT_MAX, 405 }; 406 407 struct mt76_mcu { 408 struct mutex mutex; 409 u32 msg_seq; 410 411 struct sk_buff_head res_q; 412 wait_queue_head_t wait; 413 }; 414 415 #define MT_TX_SG_MAX_SIZE 8 416 #define MT_RX_SG_MAX_SIZE 4 417 #define MT_NUM_TX_ENTRIES 256 418 #define MT_NUM_RX_ENTRIES 128 419 #define MCU_RESP_URB_SIZE 1024 420 struct mt76_usb { 421 struct mutex usb_ctrl_mtx; 422 u8 *data; 423 u16 data_len; 424 425 struct tasklet_struct rx_tasklet; 426 struct workqueue_struct *wq; 427 struct work_struct stat_work; 428 429 u8 out_ep[__MT_EP_OUT_MAX]; 430 u8 in_ep[__MT_EP_IN_MAX]; 431 bool sg_en; 432 433 struct mt76u_mcu { 434 u8 *data; 435 /* multiple reads */ 436 struct mt76_reg_pair *rp; 437 int rp_len; 438 u32 base; 439 bool burst; 440 } mcu; 441 }; 442 443 struct mt76_mmio { 444 void __iomem *regs; 445 spinlock_t irq_lock; 446 u32 irqmask; 447 }; 448 449 struct mt76_rx_status { 450 union { 451 struct mt76_wcid *wcid; 452 u16 wcid_idx; 453 }; 454 455 unsigned long reorder_time; 456 457 u32 ampdu_ref; 458 459 u8 iv[6]; 460 461 u8 ext_phy:1; 462 u8 aggr:1; 463 u8 tid; 464 u16 seqno; 465 466 u16 freq; 467 u32 flag; 468 u8 enc_flags; 469 u8 encoding:2, bw:3, he_ru:3; 470 u8 he_gi:2, he_dcm:1; 471 u8 rate_idx; 472 u8 nss; 473 u8 band; 474 s8 signal; 475 u8 chains; 476 s8 chain_signal[IEEE80211_MAX_CHAINS]; 477 }; 478 479 struct mt76_testmode_ops { 480 int (*set_state)(struct mt76_dev *dev, enum mt76_testmode_state state); 481 int (*set_params)(struct mt76_dev *dev, struct nlattr **tb, 482 enum mt76_testmode_state new_state); 483 int (*dump_stats)(struct mt76_dev *dev, struct sk_buff *msg); 484 }; 485 486 struct mt76_testmode_data { 487 enum mt76_testmode_state state; 488 489 u32 param_set[DIV_ROUND_UP(NUM_MT76_TM_ATTRS, 32)]; 490 struct sk_buff *tx_skb; 491 492 u32 tx_count; 493 u16 tx_msdu_len; 494 495 u8 tx_rate_mode; 496 u8 tx_rate_idx; 497 u8 tx_rate_nss; 498 u8 tx_rate_sgi; 499 u8 tx_rate_ldpc; 500 501 u8 tx_antenna_mask; 502 503 u32 freq_offset; 504 505 u8 tx_power[4]; 506 u8 tx_power_control; 507 508 const char *mtd_name; 509 u32 mtd_offset; 510 511 u32 tx_pending; 512 u32 tx_queued; 513 u32 tx_done; 514 struct { 515 u64 packets[__MT_RXQ_MAX]; 516 u64 fcs_error[__MT_RXQ_MAX]; 517 } rx_stats; 518 }; 519 520 struct mt76_phy { 521 struct ieee80211_hw *hw; 522 struct mt76_dev *dev; 523 void *priv; 524 525 unsigned long state; 526 527 struct cfg80211_chan_def chandef; 528 struct ieee80211_channel *main_chan; 529 530 struct mt76_channel_state *chan_state; 531 ktime_t survey_time; 532 533 struct mt76_sband sband_2g; 534 struct mt76_sband sband_5g; 535 536 u32 vif_mask; 537 538 int txpower_cur; 539 u8 antenna_mask; 540 }; 541 542 struct mt76_dev { 543 struct mt76_phy phy; /* must be first */ 544 545 struct mt76_phy *phy2; 546 547 struct ieee80211_hw *hw; 548 549 spinlock_t lock; 550 spinlock_t cc_lock; 551 552 u32 cur_cc_bss_rx; 553 554 struct mt76_rx_status rx_ampdu_status; 555 u32 rx_ampdu_len; 556 u32 rx_ampdu_ref; 557 558 struct mutex mutex; 559 560 const struct mt76_bus_ops *bus; 561 const struct mt76_driver_ops *drv; 562 const struct mt76_mcu_ops *mcu_ops; 563 struct device *dev; 564 565 struct mt76_mcu mcu; 566 567 struct net_device napi_dev; 568 spinlock_t rx_lock; 569 struct napi_struct napi[__MT_RXQ_MAX]; 570 struct sk_buff_head rx_skb[__MT_RXQ_MAX]; 571 572 struct list_head txwi_cache; 573 struct mt76_sw_queue q_tx[2 * __MT_TXQ_MAX]; 574 struct mt76_queue q_rx[__MT_RXQ_MAX]; 575 const struct mt76_queue_ops *queue_ops; 576 int tx_dma_idx[4]; 577 578 struct tasklet_struct tx_tasklet; 579 struct napi_struct tx_napi; 580 struct delayed_work mac_work; 581 582 wait_queue_head_t tx_wait; 583 struct sk_buff_head status_list; 584 585 u32 wcid_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)]; 586 u32 wcid_phy_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)]; 587 588 struct mt76_wcid global_wcid; 589 struct mt76_wcid __rcu *wcid[MT76_N_WCIDS]; 590 591 u8 macaddr[ETH_ALEN]; 592 u32 rev; 593 594 u32 aggr_stats[32]; 595 596 struct tasklet_struct pre_tbtt_tasklet; 597 int beacon_int; 598 u8 beacon_mask; 599 600 struct debugfs_blob_wrapper eeprom; 601 struct debugfs_blob_wrapper otp; 602 struct mt76_hw_cap cap; 603 604 struct mt76_rate_power rate_power; 605 606 enum nl80211_dfs_regions region; 607 608 u32 debugfs_reg; 609 610 struct led_classdev led_cdev; 611 char led_name[32]; 612 bool led_al; 613 u8 led_pin; 614 615 u8 csa_complete; 616 617 u32 rxfilter; 618 619 #ifdef CONFIG_NL80211_TESTMODE 620 const struct mt76_testmode_ops *test_ops; 621 struct mt76_testmode_data test; 622 #endif 623 624 union { 625 struct mt76_mmio mmio; 626 struct mt76_usb usb; 627 }; 628 }; 629 630 enum mt76_phy_type { 631 MT_PHY_TYPE_CCK, 632 MT_PHY_TYPE_OFDM, 633 MT_PHY_TYPE_HT, 634 MT_PHY_TYPE_HT_GF, 635 MT_PHY_TYPE_VHT, 636 MT_PHY_TYPE_HE_SU = 8, 637 MT_PHY_TYPE_HE_EXT_SU, 638 MT_PHY_TYPE_HE_TB, 639 MT_PHY_TYPE_HE_MU, 640 }; 641 642 #define __mt76_rr(dev, ...) (dev)->bus->rr((dev), __VA_ARGS__) 643 #define __mt76_wr(dev, ...) (dev)->bus->wr((dev), __VA_ARGS__) 644 #define __mt76_rmw(dev, ...) (dev)->bus->rmw((dev), __VA_ARGS__) 645 #define __mt76_wr_copy(dev, ...) (dev)->bus->write_copy((dev), __VA_ARGS__) 646 #define __mt76_rr_copy(dev, ...) (dev)->bus->read_copy((dev), __VA_ARGS__) 647 648 #define __mt76_set(dev, offset, val) __mt76_rmw(dev, offset, 0, val) 649 #define __mt76_clear(dev, offset, val) __mt76_rmw(dev, offset, val, 0) 650 651 #define mt76_rr(dev, ...) (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__) 652 #define mt76_wr(dev, ...) (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__) 653 #define mt76_rmw(dev, ...) (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__) 654 #define mt76_wr_copy(dev, ...) (dev)->mt76.bus->write_copy(&((dev)->mt76), __VA_ARGS__) 655 #define mt76_rr_copy(dev, ...) (dev)->mt76.bus->read_copy(&((dev)->mt76), __VA_ARGS__) 656 #define mt76_wr_rp(dev, ...) (dev)->mt76.bus->wr_rp(&((dev)->mt76), __VA_ARGS__) 657 #define mt76_rd_rp(dev, ...) (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__) 658 659 #define mt76_mcu_send_msg(dev, ...) (dev)->mt76.mcu_ops->mcu_send_msg(&((dev)->mt76), __VA_ARGS__) 660 661 #define __mt76_mcu_send_msg(dev, ...) (dev)->mcu_ops->mcu_send_msg((dev), __VA_ARGS__) 662 #define __mt76_mcu_skb_send_msg(dev, ...) (dev)->mcu_ops->mcu_skb_send_msg((dev), __VA_ARGS__) 663 #define mt76_mcu_restart(dev, ...) (dev)->mt76.mcu_ops->mcu_restart(&((dev)->mt76)) 664 #define __mt76_mcu_restart(dev, ...) (dev)->mcu_ops->mcu_restart((dev)) 665 666 #define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val) 667 #define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0) 668 669 #define mt76_get_field(_dev, _reg, _field) \ 670 FIELD_GET(_field, mt76_rr(dev, _reg)) 671 672 #define mt76_rmw_field(_dev, _reg, _field, _val) \ 673 mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val)) 674 675 #define __mt76_rmw_field(_dev, _reg, _field, _val) \ 676 __mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val)) 677 678 #define mt76_hw(dev) (dev)->mphy.hw 679 680 static inline struct ieee80211_hw * 681 mt76_wcid_hw(struct mt76_dev *dev, u16 wcid) 682 { 683 if (wcid <= MT76_N_WCIDS && 684 mt76_wcid_mask_test(dev->wcid_phy_mask, wcid)) 685 return dev->phy2->hw; 686 687 return dev->phy.hw; 688 } 689 690 bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, 691 int timeout); 692 693 #define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__) 694 695 bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, 696 int timeout); 697 698 #define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__) 699 700 void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs); 701 void mt76_pci_disable_aspm(struct pci_dev *pdev); 702 703 static inline u16 mt76_chip(struct mt76_dev *dev) 704 { 705 return dev->rev >> 16; 706 } 707 708 static inline u16 mt76_rev(struct mt76_dev *dev) 709 { 710 return dev->rev & 0xffff; 711 } 712 713 #define mt76xx_chip(dev) mt76_chip(&((dev)->mt76)) 714 #define mt76xx_rev(dev) mt76_rev(&((dev)->mt76)) 715 716 #define mt76_init_queues(dev) (dev)->mt76.queue_ops->init(&((dev)->mt76)) 717 #define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__) 718 #define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__) 719 #define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mt76), __VA_ARGS__) 720 #define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__) 721 #define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__) 722 #define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__) 723 724 #define mt76_for_each_q_rx(dev, i) \ 725 for (i = 0; i < ARRAY_SIZE((dev)->q_rx) && \ 726 (dev)->q_rx[i].ndesc; i++) 727 728 struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size, 729 const struct ieee80211_ops *ops, 730 const struct mt76_driver_ops *drv_ops); 731 int mt76_register_device(struct mt76_dev *dev, bool vht, 732 struct ieee80211_rate *rates, int n_rates); 733 void mt76_unregister_device(struct mt76_dev *dev); 734 void mt76_free_device(struct mt76_dev *dev); 735 void mt76_unregister_phy(struct mt76_phy *phy); 736 737 struct mt76_phy *mt76_alloc_phy(struct mt76_dev *dev, unsigned int size, 738 const struct ieee80211_ops *ops); 739 int mt76_register_phy(struct mt76_phy *phy); 740 741 struct dentry *mt76_register_debugfs(struct mt76_dev *dev); 742 int mt76_queues_read(struct seq_file *s, void *data); 743 void mt76_seq_puts_array(struct seq_file *file, const char *str, 744 s8 *val, int len); 745 746 int mt76_eeprom_init(struct mt76_dev *dev, int len); 747 void mt76_eeprom_override(struct mt76_dev *dev); 748 749 static inline struct mt76_phy * 750 mt76_dev_phy(struct mt76_dev *dev, bool phy_ext) 751 { 752 if (phy_ext && dev->phy2) 753 return dev->phy2; 754 return &dev->phy; 755 } 756 757 static inline struct ieee80211_hw * 758 mt76_phy_hw(struct mt76_dev *dev, bool phy_ext) 759 { 760 return mt76_dev_phy(dev, phy_ext)->hw; 761 } 762 763 static inline u8 * 764 mt76_get_txwi_ptr(struct mt76_dev *dev, struct mt76_txwi_cache *t) 765 { 766 return (u8 *)t - dev->drv->txwi_size; 767 } 768 769 /* increment with wrap-around */ 770 static inline int mt76_incr(int val, int size) 771 { 772 return (val + 1) & (size - 1); 773 } 774 775 /* decrement with wrap-around */ 776 static inline int mt76_decr(int val, int size) 777 { 778 return (val - 1) & (size - 1); 779 } 780 781 u8 mt76_ac_to_hwq(u8 ac); 782 783 static inline struct ieee80211_txq * 784 mtxq_to_txq(struct mt76_txq *mtxq) 785 { 786 void *ptr = mtxq; 787 788 return container_of(ptr, struct ieee80211_txq, drv_priv); 789 } 790 791 static inline struct ieee80211_sta * 792 wcid_to_sta(struct mt76_wcid *wcid) 793 { 794 void *ptr = wcid; 795 796 if (!wcid || !wcid->sta) 797 return NULL; 798 799 return container_of(ptr, struct ieee80211_sta, drv_priv); 800 } 801 802 static inline struct mt76_tx_cb *mt76_tx_skb_cb(struct sk_buff *skb) 803 { 804 BUILD_BUG_ON(sizeof(struct mt76_tx_cb) > 805 sizeof(IEEE80211_SKB_CB(skb)->status.status_driver_data)); 806 return ((void *)IEEE80211_SKB_CB(skb)->status.status_driver_data); 807 } 808 809 static inline void *mt76_skb_get_hdr(struct sk_buff *skb) 810 { 811 struct mt76_rx_status mstat; 812 u8 *data = skb->data; 813 814 /* Alignment concerns */ 815 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) % 4); 816 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) % 4); 817 818 mstat = *((struct mt76_rx_status *)skb->cb); 819 820 if (mstat.flag & RX_FLAG_RADIOTAP_HE) 821 data += sizeof(struct ieee80211_radiotap_he); 822 if (mstat.flag & RX_FLAG_RADIOTAP_HE_MU) 823 data += sizeof(struct ieee80211_radiotap_he_mu); 824 825 return data; 826 } 827 828 static inline void mt76_insert_hdr_pad(struct sk_buff *skb) 829 { 830 int len = ieee80211_get_hdrlen_from_skb(skb); 831 832 if (len % 4 == 0) 833 return; 834 835 skb_push(skb, 2); 836 memmove(skb->data, skb->data + 2, len); 837 838 skb->data[len] = 0; 839 skb->data[len + 1] = 0; 840 } 841 842 static inline bool mt76_is_skb_pktid(u8 pktid) 843 { 844 if (pktid & MT_PACKET_ID_HAS_RATE) 845 return false; 846 847 return pktid >= MT_PACKET_ID_FIRST; 848 } 849 850 static inline u8 mt76_tx_power_nss_delta(u8 nss) 851 { 852 static const u8 nss_delta[4] = { 0, 6, 9, 12 }; 853 854 return nss_delta[nss - 1]; 855 } 856 857 static inline bool mt76_testmode_enabled(struct mt76_dev *dev) 858 { 859 #ifdef CONFIG_NL80211_TESTMODE 860 return dev->test.state != MT76_TM_STATE_OFF; 861 #else 862 return false; 863 #endif 864 } 865 866 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb); 867 void mt76_tx(struct mt76_phy *dev, struct ieee80211_sta *sta, 868 struct mt76_wcid *wcid, struct sk_buff *skb); 869 void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq); 870 void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq); 871 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq); 872 void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta, 873 bool send_bar); 874 void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid); 875 void mt76_txq_schedule_all(struct mt76_phy *phy); 876 void mt76_tx_tasklet(unsigned long data); 877 void mt76_release_buffered_frames(struct ieee80211_hw *hw, 878 struct ieee80211_sta *sta, 879 u16 tids, int nframes, 880 enum ieee80211_frame_release_type reason, 881 bool more_data); 882 bool mt76_has_tx_pending(struct mt76_phy *phy); 883 void mt76_set_channel(struct mt76_phy *phy); 884 void mt76_update_survey(struct mt76_dev *dev); 885 int mt76_get_survey(struct ieee80211_hw *hw, int idx, 886 struct survey_info *survey); 887 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht); 888 889 int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid, 890 u16 ssn, u16 size); 891 void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid); 892 893 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid, 894 struct ieee80211_key_conf *key); 895 896 void mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list) 897 __acquires(&dev->status_list.lock); 898 void mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list) 899 __releases(&dev->status_list.lock); 900 901 int mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid, 902 struct sk_buff *skb); 903 struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev, 904 struct mt76_wcid *wcid, int pktid, 905 struct sk_buff_head *list); 906 void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, 907 struct sk_buff_head *list); 908 void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb); 909 void mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, 910 bool flush); 911 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 912 struct ieee80211_sta *sta, 913 enum ieee80211_sta_state old_state, 914 enum ieee80211_sta_state new_state); 915 void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, 916 struct ieee80211_sta *sta); 917 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 918 struct ieee80211_sta *sta); 919 920 int mt76_get_min_avg_rssi(struct mt76_dev *dev, bool ext_phy); 921 922 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 923 int *dbm); 924 925 void mt76_csa_check(struct mt76_dev *dev); 926 void mt76_csa_finish(struct mt76_dev *dev); 927 928 int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant); 929 int mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set); 930 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id); 931 int mt76_get_rate(struct mt76_dev *dev, 932 struct ieee80211_supported_band *sband, 933 int idx, bool cck); 934 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 935 const u8 *mac); 936 void mt76_sw_scan_complete(struct ieee80211_hw *hw, 937 struct ieee80211_vif *vif); 938 int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 939 void *data, int len); 940 int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb, 941 struct netlink_callback *cb, void *data, int len); 942 int mt76_testmode_set_state(struct mt76_dev *dev, enum mt76_testmode_state state); 943 944 static inline void mt76_testmode_reset(struct mt76_dev *dev, bool disable) 945 { 946 #ifdef CONFIG_NL80211_TESTMODE 947 enum mt76_testmode_state state = MT76_TM_STATE_IDLE; 948 949 if (disable || dev->test.state == MT76_TM_STATE_OFF) 950 state = MT76_TM_STATE_OFF; 951 952 mt76_testmode_set_state(dev, state); 953 #endif 954 } 955 956 957 /* internal */ 958 static inline struct ieee80211_hw * 959 mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb) 960 { 961 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 962 struct ieee80211_hw *hw = dev->phy.hw; 963 964 if ((info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY) && dev->phy2) 965 hw = dev->phy2->hw; 966 967 info->hw_queue &= ~MT_TX_HW_QUEUE_EXT_PHY; 968 969 return hw; 970 } 971 972 void mt76_tx_free(struct mt76_dev *dev); 973 struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev); 974 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t); 975 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, 976 struct napi_struct *napi); 977 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q, 978 struct napi_struct *napi); 979 void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames); 980 void mt76_testmode_tx_pending(struct mt76_dev *dev); 981 982 /* usb */ 983 static inline bool mt76u_urb_error(struct urb *urb) 984 { 985 return urb->status && 986 urb->status != -ECONNRESET && 987 urb->status != -ESHUTDOWN && 988 urb->status != -ENOENT; 989 } 990 991 /* Map hardware queues to usb endpoints */ 992 static inline u8 q2ep(u8 qid) 993 { 994 /* TODO: take management packets to queue 5 */ 995 return qid + 1; 996 } 997 998 static inline int 999 mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len, 1000 int timeout, int ep) 1001 { 1002 struct usb_interface *uintf = to_usb_interface(dev->dev); 1003 struct usb_device *udev = interface_to_usbdev(uintf); 1004 struct mt76_usb *usb = &dev->usb; 1005 unsigned int pipe; 1006 1007 if (actual_len) 1008 pipe = usb_rcvbulkpipe(udev, usb->in_ep[ep]); 1009 else 1010 pipe = usb_sndbulkpipe(udev, usb->out_ep[ep]); 1011 1012 return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout); 1013 } 1014 1015 int mt76u_skb_dma_info(struct sk_buff *skb, u32 info); 1016 int mt76u_vendor_request(struct mt76_dev *dev, u8 req, 1017 u8 req_type, u16 val, u16 offset, 1018 void *buf, size_t len); 1019 void mt76u_single_wr(struct mt76_dev *dev, const u8 req, 1020 const u16 offset, const u32 val); 1021 void mt76u_deinit(struct mt76_dev *dev); 1022 int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf, 1023 bool ext); 1024 int mt76u_alloc_mcu_queue(struct mt76_dev *dev); 1025 int mt76u_alloc_queues(struct mt76_dev *dev); 1026 void mt76u_stop_tx(struct mt76_dev *dev); 1027 void mt76u_stop_rx(struct mt76_dev *dev); 1028 int mt76u_resume_rx(struct mt76_dev *dev); 1029 void mt76u_queues_deinit(struct mt76_dev *dev); 1030 1031 struct sk_buff * 1032 mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data, 1033 int data_len); 1034 void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb); 1035 struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev, 1036 unsigned long expires); 1037 1038 void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr, u32 clear, u32 set); 1039 1040 #endif 1041