Lines Matching refs:dev
65 u32 (*rr)(struct mt76_dev *dev, u32 offset);
66 void (*wr)(struct mt76_dev *dev, u32 offset, u32 val);
67 u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val);
68 void (*write_copy)(struct mt76_dev *dev, u32 offset, const void *data,
70 void (*read_copy)(struct mt76_dev *dev, u32 offset, void *data,
72 int (*wr_rp)(struct mt76_dev *dev, u32 base,
74 int (*rd_rp)(struct mt76_dev *dev, u32 base,
79 #define mt76_is_usb(dev) ((dev)->bus->type == MT76_BUS_USB) argument
80 #define mt76_is_mmio(dev) ((dev)->bus->type == MT76_BUS_MMIO) argument
81 #define mt76_is_sdio(dev) ((dev)->bus->type == MT76_BUS_SDIO) argument
213 int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data,
215 int (*mcu_skb_send_msg)(struct mt76_dev *dev, struct sk_buff *skb,
217 int (*mcu_parse_response)(struct mt76_dev *dev, int cmd,
219 u32 (*mcu_rr)(struct mt76_dev *dev, u32 offset);
220 void (*mcu_wr)(struct mt76_dev *dev, u32 offset, u32 val);
221 int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base,
223 int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base,
225 int (*mcu_restart)(struct mt76_dev *dev);
229 int (*init)(struct mt76_dev *dev,
232 int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q,
236 int (*tx_queue_skb)(struct mt76_dev *dev, struct mt76_queue *q,
240 int (*tx_queue_skb_raw)(struct mt76_dev *dev, struct mt76_queue *q,
243 void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
246 void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid);
248 void (*tx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q,
251 void (*rx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q);
253 void (*kick)(struct mt76_dev *dev, struct mt76_queue *q);
255 void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q);
369 struct mt76_dev *dev; member
452 int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr,
457 void (*tx_complete_skb)(struct mt76_dev *dev,
460 bool (*tx_status_data)(struct mt76_dev *dev, u8 *update);
462 bool (*rx_check)(struct mt76_dev *dev, void *data, int len);
464 void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q,
467 void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q);
469 void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta,
472 int (*sta_add)(struct mt76_dev *dev, struct ieee80211_vif *vif,
475 void (*sta_assoc)(struct mt76_dev *dev, struct ieee80211_vif *vif,
478 void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif,
596 int (*parse_irq)(struct mt76_dev *dev, struct mt76s_intr *intr);
719 struct mt76_dev *dev; member
793 struct device *dev; member
1000 #define __mt76_rr(dev, ...) (dev)->bus->rr((dev), __VA_ARGS__) argument
1001 #define __mt76_wr(dev, ...) (dev)->bus->wr((dev), __VA_ARGS__) argument
1002 #define __mt76_rmw(dev, ...) (dev)->bus->rmw((dev), __VA_ARGS__) argument
1003 #define __mt76_wr_copy(dev, ...) (dev)->bus->write_copy((dev), __VA_ARGS__) argument
1004 #define __mt76_rr_copy(dev, ...) (dev)->bus->read_copy((dev), __VA_ARGS__) argument
1006 #define __mt76_set(dev, offset, val) __mt76_rmw(dev, offset, 0, val) argument
1007 #define __mt76_clear(dev, offset, val) __mt76_rmw(dev, offset, val, 0) argument
1009 #define mt76_rr(dev, ...) (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__) argument
1010 #define mt76_wr(dev, ...) (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__) argument
1011 #define mt76_rmw(dev, ...) (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__) argument
1012 #define mt76_wr_copy(dev, ...) (dev)->mt76.bus->write_copy(&((dev)->mt76), __VA_ARGS__) argument
1013 #define mt76_rr_copy(dev, ...) (dev)->mt76.bus->read_copy(&((dev)->mt76), __VA_ARGS__) argument
1014 #define mt76_wr_rp(dev, ...) (dev)->mt76.bus->wr_rp(&((dev)->mt76), __VA_ARGS__) argument
1015 #define mt76_rd_rp(dev, ...) (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__) argument
1018 #define mt76_mcu_restart(dev, ...) (dev)->mt76.mcu_ops->mcu_restart(&((dev)->mt76)) argument
1020 #define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val) argument
1021 #define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0) argument
1024 FIELD_GET(_field, mt76_rr(dev, _reg))
1032 #define mt76_hw(dev) (dev)->mphy.hw argument
1034 bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
1037 #define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__) argument
1039 bool ____mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
1042 #define mt76_poll_msec(dev, ...) ____mt76_poll_msec(&((dev)->mt76), __VA_ARGS__, 10) argument
1043 #define mt76_poll_msec_tick(dev, ...) ____mt76_poll_msec(&((dev)->mt76), __VA_ARGS__) argument
1045 void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
1048 static inline u16 mt76_chip(struct mt76_dev *dev) in mt76_chip() argument
1050 return dev->rev >> 16; in mt76_chip()
1053 static inline u16 mt76_rev(struct mt76_dev *dev) in mt76_rev() argument
1055 return dev->rev & 0xffff; in mt76_rev()
1058 #define mt76xx_chip(dev) mt76_chip(&((dev)->mt76)) argument
1059 #define mt76xx_rev(dev) mt76_rev(&((dev)->mt76)) argument
1061 #define mt76_init_queues(dev, ...) (dev)->mt76.queue_ops->init(&((dev)->mt76), __VA_ARGS__) argument
1062 #define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__) argument
1063 #define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __V… argument
1064 #define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mt76), __VA_ARGS__) argument
1065 #define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__) argument
1066 #define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS… argument
1067 #define mt76_queue_rx_cleanup(dev, ...) (dev)->mt76.queue_ops->rx_cleanup(&((dev)->mt76), __VA_ARGS… argument
1068 #define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__) argument
1069 #define mt76_queue_reset(dev, ...) (dev)->mt76.queue_ops->reset_q(&((dev)->mt76), __VA_ARGS__) argument
1071 #define mt76_for_each_q_rx(dev, i) \ argument
1072 for (i = 0; i < ARRAY_SIZE((dev)->q_rx); i++) \
1073 if ((dev)->q_rx[i].ndesc)
1078 int mt76_register_device(struct mt76_dev *dev, bool vht,
1080 void mt76_unregister_device(struct mt76_dev *dev);
1081 void mt76_free_device(struct mt76_dev *dev);
1084 struct mt76_phy *mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
1092 static inline struct dentry *mt76_register_debugfs(struct mt76_dev *dev) in mt76_register_debugfs() argument
1094 return mt76_register_debugfs_fops(&dev->phy, NULL); in mt76_register_debugfs()
1101 int mt76_eeprom_init(struct mt76_dev *dev, int len);
1103 int mt76_get_of_eeprom(struct mt76_dev *dev, void *data, int offset, int len);
1106 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
1115 q = mt76_init_queue(phy->dev, qid, idx, n_desc, ring_base, flags); in mt76_init_tx_queue()
1124 static inline int mt76_init_mcu_queue(struct mt76_dev *dev, int qid, int idx, in mt76_init_mcu_queue() argument
1129 q = mt76_init_queue(dev, qid, idx, n_desc, ring_base, 0); in mt76_init_mcu_queue()
1133 dev->q_mcu[qid] = q; in mt76_init_mcu_queue()
1139 mt76_dev_phy(struct mt76_dev *dev, u8 phy_idx) in mt76_dev_phy() argument
1141 if ((phy_idx == MT_BAND1 && dev->phys[phy_idx]) || in mt76_dev_phy()
1142 (phy_idx == MT_BAND2 && dev->phys[phy_idx])) in mt76_dev_phy()
1143 return dev->phys[phy_idx]; in mt76_dev_phy()
1145 return &dev->phy; in mt76_dev_phy()
1149 mt76_phy_hw(struct mt76_dev *dev, u8 phy_idx) in mt76_phy_hw() argument
1151 return mt76_dev_phy(dev, phy_idx)->hw; in mt76_phy_hw()
1155 mt76_get_txwi_ptr(struct mt76_dev *dev, struct mt76_txwi_cache *t) in mt76_get_txwi_ptr() argument
1157 return (u8 *)t - dev->drv->txwi_size; in mt76_get_txwi_ptr()
1258 static inline bool mt76_is_testmode_skb(struct mt76_dev *dev, in mt76_is_testmode_skb() argument
1265 for (i = 0; i < ARRAY_SIZE(dev->phys); i++) { in mt76_is_testmode_skb()
1266 struct mt76_phy *phy = dev->phys[i]; in mt76_is_testmode_skb()
1269 *hw = dev->phys[i]->hw; in mt76_is_testmode_skb()
1279 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
1280 void mt76_tx(struct mt76_phy *dev, struct ieee80211_sta *sta,
1288 void mt76_tx_worker_run(struct mt76_dev *dev);
1304 int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid,
1306 void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid);
1308 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
1311 void mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
1312 __acquires(&dev->status_lock);
1313 void mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
1314 __releases(&dev->status_lock);
1316 int mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
1318 struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev,
1321 void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
1323 void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb,
1326 mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb) in mt76_tx_complete_skb() argument
1328 __mt76_tx_complete_skb(dev, wcid, skb, NULL); in mt76_tx_complete_skb()
1331 void mt76_tx_status_check(struct mt76_dev *dev, bool flush);
1336 void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
1341 int mt76_get_min_avg_rssi(struct mt76_dev *dev, bool ext_phy);
1351 void mt76_csa_check(struct mt76_dev *dev);
1352 void mt76_csa_finish(struct mt76_dev *dev);
1357 int mt76_get_rate(struct mt76_dev *dev,
1387 mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb) in mt76_tx_status_get_hw() argument
1391 struct ieee80211_hw *hw = mt76_phy_hw(dev, phy_idx); in mt76_tx_status_get_hw()
1398 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
1399 void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
1400 struct mt76_txwi_cache *mt76_get_rxwi(struct mt76_dev *dev);
1401 void mt76_free_pending_rxwi(struct mt76_dev *dev);
1402 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
1404 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
1408 void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
1428 mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len, in mt76u_bulk_msg() argument
1431 struct usb_interface *uintf = to_usb_interface(dev->dev); in mt76u_bulk_msg()
1433 struct mt76_usb *usb = &dev->usb; in mt76u_bulk_msg()
1444 void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index);
1448 int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type,
1450 int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
1453 void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
1455 void mt76u_read_copy(struct mt76_dev *dev, u32 offset,
1457 u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u8 req_type, u32 addr);
1458 void ___mt76u_wr(struct mt76_dev *dev, u8 req, u8 req_type,
1460 int __mt76u_init(struct mt76_dev *dev, struct usb_interface *intf,
1462 int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf);
1463 int mt76u_alloc_mcu_queue(struct mt76_dev *dev);
1464 int mt76u_alloc_queues(struct mt76_dev *dev);
1465 void mt76u_stop_tx(struct mt76_dev *dev);
1466 void mt76u_stop_rx(struct mt76_dev *dev);
1467 int mt76u_resume_rx(struct mt76_dev *dev);
1468 void mt76u_queues_deinit(struct mt76_dev *dev);
1470 int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
1472 int mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid);
1473 int mt76s_alloc_tx(struct mt76_dev *dev);
1474 void mt76s_deinit(struct mt76_dev *dev);
1477 bool mt76s_txqs_empty(struct mt76_dev *dev);
1478 int mt76s_hw_init(struct mt76_dev *dev, struct sdio_func *func,
1480 u32 mt76s_rr(struct mt76_dev *dev, u32 offset);
1481 void mt76s_wr(struct mt76_dev *dev, u32 offset, u32 val);
1482 u32 mt76s_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val);
1483 u32 mt76s_read_pcr(struct mt76_dev *dev);
1484 void mt76s_write_copy(struct mt76_dev *dev, u32 offset,
1486 void mt76s_read_copy(struct mt76_dev *dev, u32 offset,
1488 int mt76s_wr_rp(struct mt76_dev *dev, u32 base,
1491 int mt76s_rd_rp(struct mt76_dev *dev, u32 base,
1495 __mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
1498 mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data, in mt76_mcu_msg_alloc() argument
1501 return __mt76_mcu_msg_alloc(dev, data, data_len, data_len, GFP_KERNEL); in mt76_mcu_msg_alloc()
1504 void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb);
1505 struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
1507 int mt76_mcu_send_and_get_msg(struct mt76_dev *dev, int cmd, const void *data,
1509 int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb,
1511 int __mt76_mcu_send_firmware(struct mt76_dev *dev, int cmd, const void *data,
1514 mt76_mcu_send_firmware(struct mt76_dev *dev, int cmd, const void *data, in mt76_mcu_send_firmware() argument
1517 int max_len = 4096 - dev->mcu_ops->headroom; in mt76_mcu_send_firmware()
1519 return __mt76_mcu_send_firmware(dev, cmd, data, len, max_len); in mt76_mcu_send_firmware()
1523 mt76_mcu_send_msg(struct mt76_dev *dev, int cmd, const void *data, int len, in mt76_mcu_send_msg() argument
1526 return mt76_mcu_send_and_get_msg(dev, cmd, data, len, wait_resp, NULL); in mt76_mcu_send_msg()
1530 mt76_mcu_skb_send_msg(struct mt76_dev *dev, struct sk_buff *skb, int cmd, in mt76_mcu_skb_send_msg() argument
1533 return mt76_mcu_skb_send_and_get_msg(dev, skb, cmd, wait_resp, NULL); in mt76_mcu_skb_send_msg()
1536 void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr, u32 clear, u32 set);
1550 mt76_token_release(struct mt76_dev *dev, int token, bool *wake);
1551 int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi);
1552 void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
1553 struct mt76_txwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
1554 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
1556 int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q);
1576 static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked) in mt76_set_tx_blocked() argument
1578 spin_lock_bh(&dev->token_lock); in mt76_set_tx_blocked()
1579 __mt76_set_tx_blocked(dev, blocked); in mt76_set_tx_blocked()
1580 spin_unlock_bh(&dev->token_lock); in mt76_set_tx_blocked()
1584 mt76_token_get(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi) in mt76_token_get() argument
1588 spin_lock_bh(&dev->token_lock); in mt76_token_get()
1589 token = idr_alloc(&dev->token, *ptxwi, 0, dev->token_size, GFP_ATOMIC); in mt76_token_get()
1590 spin_unlock_bh(&dev->token_lock); in mt76_token_get()
1596 mt76_token_put(struct mt76_dev *dev, int token) in mt76_token_put() argument
1600 spin_lock_bh(&dev->token_lock); in mt76_token_put()
1601 txwi = idr_remove(&dev->token, token); in mt76_token_put()
1602 spin_unlock_bh(&dev->token_lock); in mt76_token_put()
1608 void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid);