1 /* 2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 3 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include <linux/kernel.h> 19 20 #include "mt76x02.h" 21 22 void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, 23 struct sk_buff *skb) 24 { 25 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 26 struct mt76x02_dev *dev = hw->priv; 27 struct ieee80211_vif *vif = info->control.vif; 28 struct mt76_wcid *wcid = &dev->mt76.global_wcid; 29 30 if (control->sta) { 31 struct mt76x02_sta *msta; 32 33 msta = (struct mt76x02_sta *)control->sta->drv_priv; 34 wcid = &msta->wcid; 35 } else if (vif) { 36 struct mt76x02_vif *mvif; 37 38 mvif = (struct mt76x02_vif *)vif->drv_priv; 39 wcid = &mvif->group_wcid; 40 } 41 42 mt76_tx(&dev->mt76, control->sta, wcid, skb); 43 } 44 EXPORT_SYMBOL_GPL(mt76x02_tx); 45 46 void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, 47 struct sk_buff *skb) 48 { 49 struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); 50 void *rxwi = skb->data; 51 52 if (q == MT_RXQ_MCU) { 53 /* this is used just by mmio code */ 54 mt76_mcu_rx_event(&dev->mt76, skb); 55 return; 56 } 57 58 skb_pull(skb, sizeof(struct mt76x02_rxwi)); 59 if (mt76x02_mac_process_rx(dev, skb, rxwi)) { 60 dev_kfree_skb(skb); 61 return; 62 } 63 64 mt76_rx(mdev, q, skb); 65 } 66 EXPORT_SYMBOL_GPL(mt76x02_queue_rx_skb); 67 68 s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev, 69 const struct ieee80211_tx_rate *rate) 70 { 71 s8 max_txpwr; 72 73 if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { 74 u8 mcs = ieee80211_rate_get_vht_mcs(rate); 75 76 if (mcs == 8 || mcs == 9) { 77 max_txpwr = dev->mt76.rate_power.vht[8]; 78 } else { 79 u8 nss, idx; 80 81 nss = ieee80211_rate_get_vht_nss(rate); 82 idx = ((nss - 1) << 3) + mcs; 83 max_txpwr = dev->mt76.rate_power.ht[idx & 0xf]; 84 } 85 } else if (rate->flags & IEEE80211_TX_RC_MCS) { 86 max_txpwr = dev->mt76.rate_power.ht[rate->idx & 0xf]; 87 } else { 88 enum nl80211_band band = dev->mt76.chandef.chan->band; 89 90 if (band == NL80211_BAND_2GHZ) { 91 const struct ieee80211_rate *r; 92 struct wiphy *wiphy = dev->mt76.hw->wiphy; 93 struct mt76_rate_power *rp = &dev->mt76.rate_power; 94 95 r = &wiphy->bands[band]->bitrates[rate->idx]; 96 if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE) 97 max_txpwr = rp->cck[r->hw_value & 0x3]; 98 else 99 max_txpwr = rp->ofdm[r->hw_value & 0x7]; 100 } else { 101 max_txpwr = dev->mt76.rate_power.ofdm[rate->idx & 0x7]; 102 } 103 } 104 105 return max_txpwr; 106 } 107 108 s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr, s8 max_txpwr_adj) 109 { 110 txpwr = min_t(s8, txpwr, dev->mt76.txpower_conf); 111 txpwr -= (dev->target_power + dev->target_power_delta[0]); 112 txpwr = min_t(s8, txpwr, max_txpwr_adj); 113 114 if (!dev->enable_tpc) 115 return 0; 116 else if (txpwr >= 0) 117 return min_t(s8, txpwr, 7); 118 else 119 return (txpwr < -16) ? 8 : (txpwr + 32) / 2; 120 } 121 122 void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr) 123 { 124 s8 txpwr_adj; 125 126 txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, txpwr, 127 dev->mt76.rate_power.ofdm[4]); 128 mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG, 129 MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj); 130 mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG, 131 MT_PROT_AUTO_TX_CFG_AUTO_PADJ, txpwr_adj); 132 } 133 EXPORT_SYMBOL_GPL(mt76x02_tx_set_txpwr_auto); 134 135 bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update) 136 { 137 struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); 138 struct mt76x02_tx_status stat; 139 140 if (!mt76x02_mac_load_tx_status(dev, &stat)) 141 return false; 142 143 mt76x02_send_tx_status(dev, &stat, update); 144 145 return true; 146 } 147 EXPORT_SYMBOL_GPL(mt76x02_tx_status_data); 148 149 int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, 150 enum mt76_txq_id qid, struct mt76_wcid *wcid, 151 struct ieee80211_sta *sta, 152 struct mt76_tx_info *tx_info) 153 { 154 struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); 155 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data; 156 struct mt76x02_txwi *txwi = txwi_ptr; 157 bool ampdu = IEEE80211_SKB_CB(tx_info->skb)->flags & IEEE80211_TX_CTL_AMPDU; 158 int hdrlen, len, pid, qsel = MT_QSEL_EDCA; 159 160 if (qid == MT_TXQ_PSD && wcid && wcid->idx < 128) 161 mt76x02_mac_wcid_set_drop(dev, wcid->idx, false); 162 163 hdrlen = ieee80211_hdrlen(hdr->frame_control); 164 len = tx_info->skb->len - (hdrlen & 2); 165 mt76x02_mac_write_txwi(dev, txwi, tx_info->skb, wcid, sta, len); 166 167 pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); 168 169 /* encode packet rate for no-skb packet id to fix up status reporting */ 170 if (pid == MT_PACKET_ID_NO_SKB) 171 pid = MT_PACKET_ID_HAS_RATE | 172 (le16_to_cpu(txwi->rate) & MT_RXWI_RATE_INDEX); 173 174 txwi->pktid = pid; 175 176 if (mt76_is_skb_pktid(pid) && ampdu) 177 qsel = MT_QSEL_MGMT; 178 179 tx_info->info = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) | 180 MT_TXD_INFO_80211; 181 182 if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv) 183 tx_info->info |= MT_TXD_INFO_WIV; 184 185 return 0; 186 } 187 EXPORT_SYMBOL_GPL(mt76x02_tx_prepare_skb); 188