1 /* 2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #ifndef __MT76_H 18 #define __MT76_H 19 20 #include <linux/kernel.h> 21 #include <linux/io.h> 22 #include <linux/spinlock.h> 23 #include <linux/skbuff.h> 24 #include <linux/leds.h> 25 #include <net/mac80211.h> 26 #include "util.h" 27 28 #define MT_TX_RING_SIZE 256 29 #define MT_MCU_RING_SIZE 32 30 #define MT_RX_BUF_SIZE 2048 31 32 struct mt76_dev; 33 34 struct mt76_bus_ops { 35 u32 (*rr)(struct mt76_dev *dev, u32 offset); 36 void (*wr)(struct mt76_dev *dev, u32 offset, u32 val); 37 u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val); 38 void (*copy)(struct mt76_dev *dev, u32 offset, const void *data, 39 int len); 40 }; 41 42 enum mt76_txq_id { 43 MT_TXQ_VO = IEEE80211_AC_VO, 44 MT_TXQ_VI = IEEE80211_AC_VI, 45 MT_TXQ_BE = IEEE80211_AC_BE, 46 MT_TXQ_BK = IEEE80211_AC_BK, 47 MT_TXQ_PSD, 48 MT_TXQ_MCU, 49 MT_TXQ_BEACON, 50 MT_TXQ_CAB, 51 __MT_TXQ_MAX 52 }; 53 54 enum mt76_rxq_id { 55 MT_RXQ_MAIN, 56 MT_RXQ_MCU, 57 __MT_RXQ_MAX 58 }; 59 60 struct mt76_queue_buf { 61 dma_addr_t addr; 62 int len; 63 }; 64 65 struct mt76_queue_entry { 66 union { 67 void *buf; 68 struct sk_buff *skb; 69 }; 70 struct mt76_txwi_cache *txwi; 71 bool schedule; 72 }; 73 74 struct mt76_queue_regs { 75 u32 desc_base; 76 u32 ring_size; 77 u32 cpu_idx; 78 u32 dma_idx; 79 } __packed __aligned(4); 80 81 struct mt76_queue { 82 struct mt76_queue_regs __iomem *regs; 83 84 spinlock_t lock; 85 struct mt76_queue_entry *entry; 86 struct mt76_desc *desc; 87 88 struct list_head swq; 89 int swq_queued; 90 91 u16 head; 92 u16 tail; 93 int ndesc; 94 int queued; 95 int buf_size; 96 97 u8 buf_offset; 98 u8 hw_idx; 99 100 dma_addr_t desc_dma; 101 struct sk_buff *rx_head; 102 }; 103 104 struct mt76_queue_ops { 105 int (*init)(struct mt76_dev *dev); 106 107 int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q); 108 109 int (*add_buf)(struct mt76_dev *dev, struct mt76_queue *q, 110 struct mt76_queue_buf *buf, int nbufs, u32 info, 111 struct sk_buff *skb, void *txwi); 112 113 void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush, 114 int *len, u32 *info, bool *more); 115 116 void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid); 117 118 void (*tx_cleanup)(struct mt76_dev *dev, enum mt76_txq_id qid, 119 bool flush); 120 121 void (*kick)(struct mt76_dev *dev, struct mt76_queue *q); 122 }; 123 124 enum mt76_wcid_flags { 125 MT_WCID_FLAG_CHECK_PS, 126 MT_WCID_FLAG_PS, 127 }; 128 129 struct mt76_wcid { 130 struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS]; 131 132 struct work_struct aggr_work; 133 134 unsigned long flags; 135 136 u8 idx; 137 u8 hw_key_idx; 138 139 u8 sta:1; 140 141 u8 rx_check_pn; 142 u8 rx_key_pn[IEEE80211_NUM_TIDS][6]; 143 144 __le16 tx_rate; 145 bool tx_rate_set; 146 u8 tx_rate_nss; 147 s8 max_txpwr_adj; 148 bool sw_iv; 149 }; 150 151 struct mt76_txq { 152 struct list_head list; 153 struct mt76_queue *hwq; 154 struct mt76_wcid *wcid; 155 156 struct sk_buff_head retry_q; 157 158 u16 agg_ssn; 159 bool send_bar; 160 bool aggr; 161 }; 162 163 struct mt76_txwi_cache { 164 u32 txwi[8]; 165 dma_addr_t dma_addr; 166 struct list_head list; 167 }; 168 169 170 struct mt76_rx_tid { 171 struct rcu_head rcu_head; 172 173 struct mt76_dev *dev; 174 175 spinlock_t lock; 176 struct delayed_work reorder_work; 177 178 u16 head; 179 u8 size; 180 u8 nframes; 181 182 u8 started:1, stopped:1, timer_pending:1; 183 184 struct sk_buff *reorder_buf[]; 185 }; 186 187 enum { 188 MT76_STATE_INITIALIZED, 189 MT76_STATE_RUNNING, 190 MT76_SCANNING, 191 MT76_RESET, 192 }; 193 194 struct mt76_hw_cap { 195 bool has_2ghz; 196 bool has_5ghz; 197 }; 198 199 struct mt76_driver_ops { 200 u16 txwi_size; 201 202 void (*update_survey)(struct mt76_dev *dev); 203 204 int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr, 205 struct sk_buff *skb, struct mt76_queue *q, 206 struct mt76_wcid *wcid, 207 struct ieee80211_sta *sta, u32 *tx_info); 208 209 void (*tx_complete_skb)(struct mt76_dev *dev, struct mt76_queue *q, 210 struct mt76_queue_entry *e, bool flush); 211 212 void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q, 213 struct sk_buff *skb); 214 215 void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q); 216 217 void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta, 218 bool ps); 219 }; 220 221 struct mt76_channel_state { 222 u64 cc_active; 223 u64 cc_busy; 224 }; 225 226 struct mt76_sband { 227 struct ieee80211_supported_band sband; 228 struct mt76_channel_state *chan; 229 }; 230 231 struct mt76_dev { 232 struct ieee80211_hw *hw; 233 struct cfg80211_chan_def chandef; 234 struct ieee80211_channel *main_chan; 235 236 spinlock_t lock; 237 spinlock_t cc_lock; 238 const struct mt76_bus_ops *bus; 239 const struct mt76_driver_ops *drv; 240 void __iomem *regs; 241 struct device *dev; 242 243 struct net_device napi_dev; 244 struct napi_struct napi[__MT_RXQ_MAX]; 245 struct sk_buff_head rx_skb[__MT_RXQ_MAX]; 246 247 struct list_head txwi_cache; 248 struct mt76_queue q_tx[__MT_TXQ_MAX]; 249 struct mt76_queue q_rx[__MT_RXQ_MAX]; 250 const struct mt76_queue_ops *queue_ops; 251 252 u8 macaddr[ETH_ALEN]; 253 u32 rev; 254 unsigned long state; 255 256 struct mt76_sband sband_2g; 257 struct mt76_sband sband_5g; 258 struct debugfs_blob_wrapper eeprom; 259 struct debugfs_blob_wrapper otp; 260 struct mt76_hw_cap cap; 261 262 u32 debugfs_reg; 263 264 struct led_classdev led_cdev; 265 char led_name[32]; 266 bool led_al; 267 u8 led_pin; 268 }; 269 270 enum mt76_phy_type { 271 MT_PHY_TYPE_CCK, 272 MT_PHY_TYPE_OFDM, 273 MT_PHY_TYPE_HT, 274 MT_PHY_TYPE_HT_GF, 275 MT_PHY_TYPE_VHT, 276 }; 277 278 struct mt76_rate_power { 279 union { 280 struct { 281 s8 cck[4]; 282 s8 ofdm[8]; 283 s8 ht[16]; 284 s8 vht[10]; 285 }; 286 s8 all[38]; 287 }; 288 }; 289 290 struct mt76_rx_status { 291 struct mt76_wcid *wcid; 292 293 unsigned long reorder_time; 294 295 u8 iv[6]; 296 297 u8 aggr:1; 298 u8 tid; 299 u16 seqno; 300 301 u16 freq; 302 u32 flag; 303 u8 enc_flags; 304 u8 encoding:2, bw:3; 305 u8 rate_idx; 306 u8 nss; 307 u8 band; 308 u8 signal; 309 u8 chains; 310 s8 chain_signal[IEEE80211_MAX_CHAINS]; 311 }; 312 313 #define mt76_rr(dev, ...) (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__) 314 #define mt76_wr(dev, ...) (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__) 315 #define mt76_rmw(dev, ...) (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__) 316 #define mt76_wr_copy(dev, ...) (dev)->mt76.bus->copy(&((dev)->mt76), __VA_ARGS__) 317 318 #define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val) 319 #define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0) 320 321 #define mt76_get_field(_dev, _reg, _field) \ 322 FIELD_GET(_field, mt76_rr(dev, _reg)) 323 324 #define mt76_rmw_field(_dev, _reg, _field, _val) \ 325 mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val)) 326 327 #define mt76_hw(dev) (dev)->mt76.hw 328 329 bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, 330 int timeout); 331 332 #define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__) 333 334 bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, 335 int timeout); 336 337 #define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__) 338 339 void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs); 340 341 static inline u16 mt76_chip(struct mt76_dev *dev) 342 { 343 return dev->rev >> 16; 344 } 345 346 static inline u16 mt76_rev(struct mt76_dev *dev) 347 { 348 return dev->rev & 0xffff; 349 } 350 351 #define mt76xx_chip(dev) mt76_chip(&((dev)->mt76)) 352 #define mt76xx_rev(dev) mt76_rev(&((dev)->mt76)) 353 354 #define mt76_init_queues(dev) (dev)->mt76.queue_ops->init(&((dev)->mt76)) 355 #define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__) 356 #define mt76_queue_add_buf(dev, ...) (dev)->mt76.queue_ops->add_buf(&((dev)->mt76), __VA_ARGS__) 357 #define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__) 358 #define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__) 359 #define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__) 360 361 static inline struct mt76_channel_state * 362 mt76_channel_state(struct mt76_dev *dev, struct ieee80211_channel *c) 363 { 364 struct mt76_sband *msband; 365 int idx; 366 367 if (c->band == NL80211_BAND_2GHZ) 368 msband = &dev->sband_2g; 369 else 370 msband = &dev->sband_5g; 371 372 idx = c - &msband->sband.channels[0]; 373 return &msband->chan[idx]; 374 } 375 376 int mt76_register_device(struct mt76_dev *dev, bool vht, 377 struct ieee80211_rate *rates, int n_rates); 378 void mt76_unregister_device(struct mt76_dev *dev); 379 380 struct dentry *mt76_register_debugfs(struct mt76_dev *dev); 381 382 int mt76_eeprom_init(struct mt76_dev *dev, int len); 383 void mt76_eeprom_override(struct mt76_dev *dev); 384 385 static inline struct ieee80211_txq * 386 mtxq_to_txq(struct mt76_txq *mtxq) 387 { 388 void *ptr = mtxq; 389 390 return container_of(ptr, struct ieee80211_txq, drv_priv); 391 } 392 393 static inline struct ieee80211_sta * 394 wcid_to_sta(struct mt76_wcid *wcid) 395 { 396 void *ptr = wcid; 397 398 if (!wcid || !wcid->sta) 399 return NULL; 400 401 return container_of(ptr, struct ieee80211_sta, drv_priv); 402 } 403 404 int mt76_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, 405 struct sk_buff *skb, struct mt76_wcid *wcid, 406 struct ieee80211_sta *sta); 407 408 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb); 409 void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta, 410 struct mt76_wcid *wcid, struct sk_buff *skb); 411 void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq); 412 void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq); 413 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq); 414 void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta, 415 bool send_bar); 416 void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq); 417 void mt76_txq_schedule_all(struct mt76_dev *dev); 418 void mt76_release_buffered_frames(struct ieee80211_hw *hw, 419 struct ieee80211_sta *sta, 420 u16 tids, int nframes, 421 enum ieee80211_frame_release_type reason, 422 bool more_data); 423 void mt76_set_channel(struct mt76_dev *dev); 424 int mt76_get_survey(struct ieee80211_hw *hw, int idx, 425 struct survey_info *survey); 426 427 int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid, 428 u16 ssn, u8 size); 429 void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid); 430 431 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid, 432 struct ieee80211_key_conf *key); 433 434 /* internal */ 435 void mt76_tx_free(struct mt76_dev *dev); 436 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t); 437 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, 438 int queue); 439 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q); 440 void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames); 441 442 #endif 443