1 /* 2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #ifndef __MT76_H 18 #define __MT76_H 19 20 #include <linux/kernel.h> 21 #include <linux/io.h> 22 #include <linux/spinlock.h> 23 #include <linux/skbuff.h> 24 #include <linux/leds.h> 25 #include <net/mac80211.h> 26 #include "util.h" 27 28 #define MT_TX_RING_SIZE 256 29 #define MT_MCU_RING_SIZE 32 30 #define MT_RX_BUF_SIZE 2048 31 32 struct mt76_dev; 33 34 struct mt76_bus_ops { 35 u32 (*rr)(struct mt76_dev *dev, u32 offset); 36 void (*wr)(struct mt76_dev *dev, u32 offset, u32 val); 37 u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val); 38 void (*copy)(struct mt76_dev *dev, u32 offset, const void *data, 39 int len); 40 }; 41 42 enum mt76_txq_id { 43 MT_TXQ_VO = IEEE80211_AC_VO, 44 MT_TXQ_VI = IEEE80211_AC_VI, 45 MT_TXQ_BE = IEEE80211_AC_BE, 46 MT_TXQ_BK = IEEE80211_AC_BK, 47 MT_TXQ_PSD, 48 MT_TXQ_MCU, 49 MT_TXQ_BEACON, 50 MT_TXQ_CAB, 51 __MT_TXQ_MAX 52 }; 53 54 enum mt76_rxq_id { 55 MT_RXQ_MAIN, 56 MT_RXQ_MCU, 57 __MT_RXQ_MAX 58 }; 59 60 struct mt76_queue_buf { 61 dma_addr_t addr; 62 int len; 63 }; 64 65 struct mt76_queue_entry { 66 union { 67 void *buf; 68 struct sk_buff *skb; 69 }; 70 struct mt76_txwi_cache *txwi; 71 bool schedule; 72 }; 73 74 struct mt76_queue_regs { 75 u32 desc_base; 76 u32 ring_size; 77 u32 cpu_idx; 78 u32 dma_idx; 79 } __packed __aligned(4); 80 81 struct mt76_queue { 82 struct mt76_queue_regs __iomem *regs; 83 84 spinlock_t lock; 85 struct mt76_queue_entry *entry; 86 struct mt76_desc *desc; 87 88 struct list_head swq; 89 int swq_queued; 90 91 u16 head; 92 u16 tail; 93 int ndesc; 94 int queued; 95 int buf_size; 96 97 u8 buf_offset; 98 u8 hw_idx; 99 100 dma_addr_t desc_dma; 101 struct sk_buff *rx_head; 102 }; 103 104 struct mt76_queue_ops { 105 int (*init)(struct mt76_dev *dev); 106 107 int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q); 108 109 int (*add_buf)(struct mt76_dev *dev, struct mt76_queue *q, 110 struct mt76_queue_buf *buf, int nbufs, u32 info, 111 struct sk_buff *skb, void *txwi); 112 113 void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush, 114 int *len, u32 *info, bool *more); 115 116 void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid); 117 118 void (*tx_cleanup)(struct mt76_dev *dev, enum mt76_txq_id qid, 119 bool flush); 120 121 void (*kick)(struct mt76_dev *dev, struct mt76_queue *q); 122 }; 123 124 enum mt76_wcid_flags { 125 MT_WCID_FLAG_CHECK_PS, 126 MT_WCID_FLAG_PS, 127 }; 128 129 struct mt76_wcid { 130 struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS]; 131 132 struct work_struct aggr_work; 133 134 unsigned long flags; 135 136 u8 idx; 137 u8 hw_key_idx; 138 139 u8 sta:1; 140 141 u8 rx_check_pn; 142 u8 rx_key_pn[IEEE80211_NUM_TIDS][6]; 143 144 __le16 tx_rate; 145 bool tx_rate_set; 146 u8 tx_rate_nss; 147 s8 max_txpwr_adj; 148 bool sw_iv; 149 }; 150 151 struct mt76_txq { 152 struct list_head list; 153 struct mt76_queue *hwq; 154 struct mt76_wcid *wcid; 155 156 struct sk_buff_head retry_q; 157 158 u16 agg_ssn; 159 bool send_bar; 160 bool aggr; 161 }; 162 163 struct mt76_txwi_cache { 164 u32 txwi[8]; 165 dma_addr_t dma_addr; 166 struct list_head list; 167 }; 168 169 170 struct mt76_rx_tid { 171 struct rcu_head rcu_head; 172 173 struct mt76_dev *dev; 174 175 spinlock_t lock; 176 struct delayed_work reorder_work; 177 178 u16 head; 179 u8 size; 180 u8 nframes; 181 182 u8 started:1, stopped:1, timer_pending:1; 183 184 struct sk_buff *reorder_buf[]; 185 }; 186 187 enum { 188 MT76_STATE_INITIALIZED, 189 MT76_STATE_RUNNING, 190 MT76_SCANNING, 191 MT76_RESET, 192 MT76_OFFCHANNEL, 193 }; 194 195 struct mt76_hw_cap { 196 bool has_2ghz; 197 bool has_5ghz; 198 }; 199 200 struct mt76_driver_ops { 201 u16 txwi_size; 202 203 void (*update_survey)(struct mt76_dev *dev); 204 205 int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr, 206 struct sk_buff *skb, struct mt76_queue *q, 207 struct mt76_wcid *wcid, 208 struct ieee80211_sta *sta, u32 *tx_info); 209 210 void (*tx_complete_skb)(struct mt76_dev *dev, struct mt76_queue *q, 211 struct mt76_queue_entry *e, bool flush); 212 213 void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q, 214 struct sk_buff *skb); 215 216 void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q); 217 218 void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta, 219 bool ps); 220 }; 221 222 struct mt76_channel_state { 223 u64 cc_active; 224 u64 cc_busy; 225 }; 226 227 struct mt76_sband { 228 struct ieee80211_supported_band sband; 229 struct mt76_channel_state *chan; 230 }; 231 232 struct mt76_dev { 233 struct ieee80211_hw *hw; 234 struct cfg80211_chan_def chandef; 235 struct ieee80211_channel *main_chan; 236 237 spinlock_t lock; 238 spinlock_t cc_lock; 239 const struct mt76_bus_ops *bus; 240 const struct mt76_driver_ops *drv; 241 void __iomem *regs; 242 struct device *dev; 243 244 struct net_device napi_dev; 245 spinlock_t rx_lock; 246 struct napi_struct napi[__MT_RXQ_MAX]; 247 struct sk_buff_head rx_skb[__MT_RXQ_MAX]; 248 249 struct list_head txwi_cache; 250 struct mt76_queue q_tx[__MT_TXQ_MAX]; 251 struct mt76_queue q_rx[__MT_RXQ_MAX]; 252 const struct mt76_queue_ops *queue_ops; 253 254 wait_queue_head_t tx_wait; 255 256 u8 macaddr[ETH_ALEN]; 257 u32 rev; 258 unsigned long state; 259 260 u8 antenna_mask; 261 262 struct mt76_sband sband_2g; 263 struct mt76_sband sband_5g; 264 struct debugfs_blob_wrapper eeprom; 265 struct debugfs_blob_wrapper otp; 266 struct mt76_hw_cap cap; 267 268 u32 debugfs_reg; 269 270 struct led_classdev led_cdev; 271 char led_name[32]; 272 bool led_al; 273 u8 led_pin; 274 }; 275 276 enum mt76_phy_type { 277 MT_PHY_TYPE_CCK, 278 MT_PHY_TYPE_OFDM, 279 MT_PHY_TYPE_HT, 280 MT_PHY_TYPE_HT_GF, 281 MT_PHY_TYPE_VHT, 282 }; 283 284 struct mt76_rate_power { 285 union { 286 struct { 287 s8 cck[4]; 288 s8 ofdm[8]; 289 s8 ht[16]; 290 s8 vht[10]; 291 }; 292 s8 all[38]; 293 }; 294 }; 295 296 struct mt76_rx_status { 297 struct mt76_wcid *wcid; 298 299 unsigned long reorder_time; 300 301 u8 iv[6]; 302 303 u8 aggr:1; 304 u8 tid; 305 u16 seqno; 306 307 u16 freq; 308 u32 flag; 309 u8 enc_flags; 310 u8 encoding:2, bw:3; 311 u8 rate_idx; 312 u8 nss; 313 u8 band; 314 u8 signal; 315 u8 chains; 316 s8 chain_signal[IEEE80211_MAX_CHAINS]; 317 }; 318 319 #define mt76_rr(dev, ...) (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__) 320 #define mt76_wr(dev, ...) (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__) 321 #define mt76_rmw(dev, ...) (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__) 322 #define mt76_wr_copy(dev, ...) (dev)->mt76.bus->copy(&((dev)->mt76), __VA_ARGS__) 323 324 #define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val) 325 #define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0) 326 327 #define mt76_get_field(_dev, _reg, _field) \ 328 FIELD_GET(_field, mt76_rr(dev, _reg)) 329 330 #define mt76_rmw_field(_dev, _reg, _field, _val) \ 331 mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val)) 332 333 #define mt76_hw(dev) (dev)->mt76.hw 334 335 bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, 336 int timeout); 337 338 #define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__) 339 340 bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, 341 int timeout); 342 343 #define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__) 344 345 void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs); 346 347 static inline u16 mt76_chip(struct mt76_dev *dev) 348 { 349 return dev->rev >> 16; 350 } 351 352 static inline u16 mt76_rev(struct mt76_dev *dev) 353 { 354 return dev->rev & 0xffff; 355 } 356 357 #define mt76xx_chip(dev) mt76_chip(&((dev)->mt76)) 358 #define mt76xx_rev(dev) mt76_rev(&((dev)->mt76)) 359 360 #define mt76_init_queues(dev) (dev)->mt76.queue_ops->init(&((dev)->mt76)) 361 #define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__) 362 #define mt76_queue_add_buf(dev, ...) (dev)->mt76.queue_ops->add_buf(&((dev)->mt76), __VA_ARGS__) 363 #define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__) 364 #define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__) 365 #define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__) 366 367 static inline struct mt76_channel_state * 368 mt76_channel_state(struct mt76_dev *dev, struct ieee80211_channel *c) 369 { 370 struct mt76_sband *msband; 371 int idx; 372 373 if (c->band == NL80211_BAND_2GHZ) 374 msband = &dev->sband_2g; 375 else 376 msband = &dev->sband_5g; 377 378 idx = c - &msband->sband.channels[0]; 379 return &msband->chan[idx]; 380 } 381 382 struct mt76_dev *mt76_alloc_device(unsigned int size, 383 const struct ieee80211_ops *ops); 384 int mt76_register_device(struct mt76_dev *dev, bool vht, 385 struct ieee80211_rate *rates, int n_rates); 386 void mt76_unregister_device(struct mt76_dev *dev); 387 388 struct dentry *mt76_register_debugfs(struct mt76_dev *dev); 389 390 int mt76_eeprom_init(struct mt76_dev *dev, int len); 391 void mt76_eeprom_override(struct mt76_dev *dev); 392 393 static inline struct ieee80211_txq * 394 mtxq_to_txq(struct mt76_txq *mtxq) 395 { 396 void *ptr = mtxq; 397 398 return container_of(ptr, struct ieee80211_txq, drv_priv); 399 } 400 401 static inline struct ieee80211_sta * 402 wcid_to_sta(struct mt76_wcid *wcid) 403 { 404 void *ptr = wcid; 405 406 if (!wcid || !wcid->sta) 407 return NULL; 408 409 return container_of(ptr, struct ieee80211_sta, drv_priv); 410 } 411 412 int mt76_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, 413 struct sk_buff *skb, struct mt76_wcid *wcid, 414 struct ieee80211_sta *sta); 415 416 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb); 417 void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta, 418 struct mt76_wcid *wcid, struct sk_buff *skb); 419 void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq); 420 void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq); 421 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq); 422 void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta, 423 bool send_bar); 424 void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq); 425 void mt76_txq_schedule_all(struct mt76_dev *dev); 426 void mt76_release_buffered_frames(struct ieee80211_hw *hw, 427 struct ieee80211_sta *sta, 428 u16 tids, int nframes, 429 enum ieee80211_frame_release_type reason, 430 bool more_data); 431 void mt76_set_channel(struct mt76_dev *dev); 432 int mt76_get_survey(struct ieee80211_hw *hw, int idx, 433 struct survey_info *survey); 434 void mt76_set_stream_caps(struct mt76_dev *dev, bool vht); 435 436 int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid, 437 u16 ssn, u8 size); 438 void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid); 439 440 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid, 441 struct ieee80211_key_conf *key); 442 443 /* internal */ 444 void mt76_tx_free(struct mt76_dev *dev); 445 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t); 446 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, 447 int queue); 448 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q); 449 void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames); 450 451 #endif 452