1 /* 2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #ifndef __MT76_H 18 #define __MT76_H 19 20 #include <linux/kernel.h> 21 #include <linux/io.h> 22 #include <linux/spinlock.h> 23 #include <linux/skbuff.h> 24 #include <linux/leds.h> 25 #include <net/mac80211.h> 26 #include "util.h" 27 28 #define MT_TX_RING_SIZE 256 29 #define MT_MCU_RING_SIZE 32 30 #define MT_RX_BUF_SIZE 2048 31 32 struct mt76_dev; 33 34 struct mt76_bus_ops { 35 u32 (*rr)(struct mt76_dev *dev, u32 offset); 36 void (*wr)(struct mt76_dev *dev, u32 offset, u32 val); 37 u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val); 38 void (*copy)(struct mt76_dev *dev, u32 offset, const void *data, 39 int len); 40 }; 41 42 enum mt76_txq_id { 43 MT_TXQ_VO = IEEE80211_AC_VO, 44 MT_TXQ_VI = IEEE80211_AC_VI, 45 MT_TXQ_BE = IEEE80211_AC_BE, 46 MT_TXQ_BK = IEEE80211_AC_BK, 47 MT_TXQ_PSD, 48 MT_TXQ_MCU, 49 MT_TXQ_BEACON, 50 MT_TXQ_CAB, 51 __MT_TXQ_MAX 52 }; 53 54 enum mt76_rxq_id { 55 MT_RXQ_MAIN, 56 MT_RXQ_MCU, 57 __MT_RXQ_MAX 58 }; 59 60 struct mt76_queue_buf { 61 dma_addr_t addr; 62 int len; 63 }; 64 65 struct mt76_queue_entry { 66 union { 67 void *buf; 68 struct sk_buff *skb; 69 }; 70 struct mt76_txwi_cache *txwi; 71 bool schedule; 72 }; 73 74 struct mt76_queue_regs { 75 u32 desc_base; 76 u32 ring_size; 77 u32 cpu_idx; 78 u32 dma_idx; 79 } __packed __aligned(4); 80 81 struct mt76_queue { 82 struct mt76_queue_regs __iomem *regs; 83 84 spinlock_t lock; 85 struct mt76_queue_entry *entry; 86 struct mt76_desc *desc; 87 88 struct list_head swq; 89 int swq_queued; 90 91 u16 head; 92 u16 tail; 93 int ndesc; 94 int queued; 95 int buf_size; 96 97 u8 buf_offset; 98 u8 hw_idx; 99 100 dma_addr_t desc_dma; 101 struct sk_buff *rx_head; 102 }; 103 104 struct mt76_queue_ops { 105 int (*init)(struct mt76_dev *dev); 106 107 int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q); 108 109 int (*add_buf)(struct mt76_dev *dev, struct mt76_queue *q, 110 struct mt76_queue_buf *buf, int nbufs, u32 info, 111 struct sk_buff *skb, void *txwi); 112 113 void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush, 114 int *len, u32 *info, bool *more); 115 116 void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid); 117 118 void (*tx_cleanup)(struct mt76_dev *dev, enum mt76_txq_id qid, 119 bool flush); 120 121 void (*kick)(struct mt76_dev *dev, struct mt76_queue *q); 122 }; 123 124 struct mt76_wcid { 125 u8 idx; 126 u8 hw_key_idx; 127 128 __le16 tx_rate; 129 bool tx_rate_set; 130 u8 tx_rate_nss; 131 s8 max_txpwr_adj; 132 }; 133 134 struct mt76_txq { 135 struct list_head list; 136 struct mt76_queue *hwq; 137 struct mt76_wcid *wcid; 138 139 struct sk_buff_head retry_q; 140 141 u16 agg_ssn; 142 bool send_bar; 143 bool aggr; 144 }; 145 146 struct mt76_txwi_cache { 147 u32 txwi[8]; 148 dma_addr_t dma_addr; 149 struct list_head list; 150 }; 151 152 enum { 153 MT76_STATE_INITIALIZED, 154 MT76_STATE_RUNNING, 155 MT76_SCANNING, 156 MT76_RESET, 157 }; 158 159 struct mt76_hw_cap { 160 bool has_2ghz; 161 bool has_5ghz; 162 }; 163 164 struct mt76_driver_ops { 165 u16 txwi_size; 166 167 void (*update_survey)(struct mt76_dev *dev); 168 169 int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr, 170 struct sk_buff *skb, struct mt76_queue *q, 171 struct mt76_wcid *wcid, 172 struct ieee80211_sta *sta, u32 *tx_info); 173 174 void (*tx_complete_skb)(struct mt76_dev *dev, struct mt76_queue *q, 175 struct mt76_queue_entry *e, bool flush); 176 177 void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q, 178 struct sk_buff *skb); 179 180 void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q); 181 }; 182 183 struct mt76_channel_state { 184 u64 cc_active; 185 u64 cc_busy; 186 }; 187 188 struct mt76_sband { 189 struct ieee80211_supported_band sband; 190 struct mt76_channel_state *chan; 191 }; 192 193 struct mt76_dev { 194 struct ieee80211_hw *hw; 195 struct cfg80211_chan_def chandef; 196 struct ieee80211_channel *main_chan; 197 198 spinlock_t lock; 199 spinlock_t cc_lock; 200 const struct mt76_bus_ops *bus; 201 const struct mt76_driver_ops *drv; 202 void __iomem *regs; 203 struct device *dev; 204 205 struct net_device napi_dev; 206 struct napi_struct napi[__MT_RXQ_MAX]; 207 struct sk_buff_head rx_skb[__MT_RXQ_MAX]; 208 209 struct list_head txwi_cache; 210 struct mt76_queue q_tx[__MT_TXQ_MAX]; 211 struct mt76_queue q_rx[__MT_RXQ_MAX]; 212 const struct mt76_queue_ops *queue_ops; 213 214 u8 macaddr[ETH_ALEN]; 215 u32 rev; 216 unsigned long state; 217 218 struct mt76_sband sband_2g; 219 struct mt76_sband sband_5g; 220 struct debugfs_blob_wrapper eeprom; 221 struct debugfs_blob_wrapper otp; 222 struct mt76_hw_cap cap; 223 224 u32 debugfs_reg; 225 226 struct led_classdev led_cdev; 227 char led_name[32]; 228 bool led_al; 229 u8 led_pin; 230 }; 231 232 enum mt76_phy_type { 233 MT_PHY_TYPE_CCK, 234 MT_PHY_TYPE_OFDM, 235 MT_PHY_TYPE_HT, 236 MT_PHY_TYPE_HT_GF, 237 MT_PHY_TYPE_VHT, 238 }; 239 240 struct mt76_rate_power { 241 union { 242 struct { 243 s8 cck[4]; 244 s8 ofdm[8]; 245 s8 ht[16]; 246 s8 vht[10]; 247 }; 248 s8 all[38]; 249 }; 250 }; 251 252 #define mt76_rr(dev, ...) (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__) 253 #define mt76_wr(dev, ...) (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__) 254 #define mt76_rmw(dev, ...) (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__) 255 #define mt76_wr_copy(dev, ...) (dev)->mt76.bus->copy(&((dev)->mt76), __VA_ARGS__) 256 257 #define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val) 258 #define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0) 259 260 #define mt76_get_field(_dev, _reg, _field) \ 261 FIELD_GET(_field, mt76_rr(dev, _reg)) 262 263 #define mt76_rmw_field(_dev, _reg, _field, _val) \ 264 mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val)) 265 266 #define mt76_hw(dev) (dev)->mt76.hw 267 268 bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, 269 int timeout); 270 271 #define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__) 272 273 bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, 274 int timeout); 275 276 #define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__) 277 278 void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs); 279 280 static inline u16 mt76_chip(struct mt76_dev *dev) 281 { 282 return dev->rev >> 16; 283 } 284 285 static inline u16 mt76_rev(struct mt76_dev *dev) 286 { 287 return dev->rev & 0xffff; 288 } 289 290 #define mt76xx_chip(dev) mt76_chip(&((dev)->mt76)) 291 #define mt76xx_rev(dev) mt76_rev(&((dev)->mt76)) 292 293 #define mt76_init_queues(dev) (dev)->mt76.queue_ops->init(&((dev)->mt76)) 294 #define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__) 295 #define mt76_queue_add_buf(dev, ...) (dev)->mt76.queue_ops->add_buf(&((dev)->mt76), __VA_ARGS__) 296 #define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__) 297 #define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__) 298 #define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__) 299 300 static inline struct mt76_channel_state * 301 mt76_channel_state(struct mt76_dev *dev, struct ieee80211_channel *c) 302 { 303 struct mt76_sband *msband; 304 int idx; 305 306 if (c->band == NL80211_BAND_2GHZ) 307 msband = &dev->sband_2g; 308 else 309 msband = &dev->sband_5g; 310 311 idx = c - &msband->sband.channels[0]; 312 return &msband->chan[idx]; 313 } 314 315 int mt76_register_device(struct mt76_dev *dev, bool vht, 316 struct ieee80211_rate *rates, int n_rates); 317 void mt76_unregister_device(struct mt76_dev *dev); 318 319 struct dentry *mt76_register_debugfs(struct mt76_dev *dev); 320 321 int mt76_eeprom_init(struct mt76_dev *dev, int len); 322 void mt76_eeprom_override(struct mt76_dev *dev); 323 324 static inline struct ieee80211_txq * 325 mtxq_to_txq(struct mt76_txq *mtxq) 326 { 327 void *ptr = mtxq; 328 329 return container_of(ptr, struct ieee80211_txq, drv_priv); 330 } 331 332 int mt76_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, 333 struct sk_buff *skb, struct mt76_wcid *wcid, 334 struct ieee80211_sta *sta); 335 336 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb); 337 void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta, 338 struct mt76_wcid *wcid, struct sk_buff *skb); 339 void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq); 340 void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq); 341 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq); 342 void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta, 343 bool send_bar); 344 void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq); 345 void mt76_txq_schedule_all(struct mt76_dev *dev); 346 void mt76_release_buffered_frames(struct ieee80211_hw *hw, 347 struct ieee80211_sta *sta, 348 u16 tids, int nframes, 349 enum ieee80211_frame_release_type reason, 350 bool more_data); 351 void mt76_set_channel(struct mt76_dev *dev); 352 int mt76_get_survey(struct ieee80211_hw *hw, int idx, 353 struct survey_info *survey); 354 355 /* internal */ 356 void mt76_tx_free(struct mt76_dev *dev); 357 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t); 358 void mt76_rx_complete(struct mt76_dev *dev, enum mt76_rxq_id q); 359 360 #endif 361