1 /* 2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 3 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> 4 * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl> 5 * 6 * Permission to use, copy, modify, and/or distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "mt76x02.h" 20 21 static void mt76x02_set_beacon_offsets(struct mt76x02_dev *dev) 22 { 23 u32 regs[4] = {}; 24 u16 val; 25 int i; 26 27 for (i = 0; i < dev->beacon_ops->nslots; i++) { 28 val = i * dev->beacon_ops->slot_size; 29 regs[i / 4] |= (val / 64) << (8 * (i % 4)); 30 } 31 32 for (i = 0; i < 4; i++) 33 mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]); 34 } 35 36 static int 37 mt76x02_write_beacon(struct mt76x02_dev *dev, int offset, struct sk_buff *skb) 38 { 39 int beacon_len = dev->beacon_ops->slot_size; 40 struct mt76x02_txwi txwi; 41 42 if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x02_txwi))) 43 return -ENOSPC; 44 45 mt76x02_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len); 46 47 mt76_wr_copy(dev, offset, &txwi, sizeof(txwi)); 48 offset += sizeof(txwi); 49 50 mt76_wr_copy(dev, offset, skb->data, skb->len); 51 return 0; 52 } 53 54 static int 55 __mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 bcn_idx, 56 struct sk_buff *skb) 57 { 58 int beacon_len = dev->beacon_ops->slot_size; 59 int beacon_addr = MT_BEACON_BASE + (beacon_len * bcn_idx); 60 int ret = 0; 61 int i; 62 63 /* Prevent corrupt transmissions during update */ 64 mt76_set(dev, MT_BCN_BYPASS_MASK, BIT(bcn_idx)); 65 66 if (skb) { 67 ret = mt76x02_write_beacon(dev, beacon_addr, skb); 68 if (!ret) 69 dev->beacon_data_mask |= BIT(bcn_idx); 70 } else { 71 dev->beacon_data_mask &= ~BIT(bcn_idx); 72 for (i = 0; i < beacon_len; i += 4) 73 mt76_wr(dev, beacon_addr + i, 0); 74 } 75 76 mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xff00 | ~dev->beacon_data_mask); 77 78 return ret; 79 } 80 81 int mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx, 82 struct sk_buff *skb) 83 { 84 bool force_update = false; 85 int bcn_idx = 0; 86 int i; 87 88 for (i = 0; i < ARRAY_SIZE(dev->beacons); i++) { 89 if (vif_idx == i) { 90 force_update = !!dev->beacons[i] ^ !!skb; 91 92 if (dev->beacons[i]) 93 dev_kfree_skb(dev->beacons[i]); 94 95 dev->beacons[i] = skb; 96 __mt76x02_mac_set_beacon(dev, bcn_idx, skb); 97 } else if (force_update && dev->beacons[i]) { 98 __mt76x02_mac_set_beacon(dev, bcn_idx, 99 dev->beacons[i]); 100 } 101 102 bcn_idx += !!dev->beacons[i]; 103 } 104 105 for (i = bcn_idx; i < ARRAY_SIZE(dev->beacons); i++) { 106 if (!(dev->beacon_data_mask & BIT(i))) 107 break; 108 109 __mt76x02_mac_set_beacon(dev, i, NULL); 110 } 111 112 mt76_rmw_field(dev, MT_MAC_BSSID_DW1, MT_MAC_BSSID_DW1_MBEACON_N, 113 bcn_idx - 1); 114 return 0; 115 } 116 EXPORT_SYMBOL_GPL(mt76x02_mac_set_beacon); 117 118 static void 119 __mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev, u8 vif_idx, 120 bool val, struct sk_buff *skb) 121 { 122 u8 old_mask = dev->mt76.beacon_mask; 123 bool en; 124 u32 reg; 125 126 if (val) { 127 dev->mt76.beacon_mask |= BIT(vif_idx); 128 if (skb) 129 mt76x02_mac_set_beacon(dev, vif_idx, skb); 130 } else { 131 dev->mt76.beacon_mask &= ~BIT(vif_idx); 132 mt76x02_mac_set_beacon(dev, vif_idx, NULL); 133 } 134 135 if (!!old_mask == !!dev->mt76.beacon_mask) 136 return; 137 138 en = dev->mt76.beacon_mask; 139 140 reg = MT_BEACON_TIME_CFG_BEACON_TX | 141 MT_BEACON_TIME_CFG_TBTT_EN | 142 MT_BEACON_TIME_CFG_TIMER_EN; 143 mt76_rmw(dev, MT_BEACON_TIME_CFG, reg, reg * en); 144 145 dev->beacon_ops->beacon_enable(dev, en); 146 } 147 148 void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev, 149 struct ieee80211_vif *vif, bool val) 150 { 151 u8 vif_idx = ((struct mt76x02_vif *)vif->drv_priv)->idx; 152 struct sk_buff *skb = NULL; 153 154 dev->beacon_ops->pre_tbtt_enable(dev, false); 155 156 if (mt76_is_usb(dev)) 157 skb = ieee80211_beacon_get(mt76_hw(dev), vif); 158 159 if (!dev->mt76.beacon_mask) 160 dev->tbtt_count = 0; 161 162 __mt76x02_mac_set_beacon_enable(dev, vif_idx, val, skb); 163 164 dev->beacon_ops->pre_tbtt_enable(dev, true); 165 } 166 167 void 168 mt76x02_resync_beacon_timer(struct mt76x02_dev *dev) 169 { 170 u32 timer_val = dev->mt76.beacon_int << 4; 171 172 dev->tbtt_count++; 173 174 /* 175 * Beacon timer drifts by 1us every tick, the timer is configured 176 * in 1/16 TU (64us) units. 177 */ 178 if (dev->tbtt_count < 63) 179 return; 180 181 /* 182 * The updated beacon interval takes effect after two TBTT, because 183 * at this point the original interval has already been loaded into 184 * the next TBTT_TIMER value 185 */ 186 if (dev->tbtt_count == 63) 187 timer_val -= 1; 188 189 mt76_rmw_field(dev, MT_BEACON_TIME_CFG, 190 MT_BEACON_TIME_CFG_INTVAL, timer_val); 191 192 if (dev->tbtt_count >= 64) 193 dev->tbtt_count = 0; 194 } 195 EXPORT_SYMBOL_GPL(mt76x02_resync_beacon_timer); 196 197 void 198 mt76x02_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) 199 { 200 struct mt76x02_dev *dev = (struct mt76x02_dev *)priv; 201 struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; 202 struct sk_buff *skb = NULL; 203 204 if (!(dev->mt76.beacon_mask & BIT(mvif->idx))) 205 return; 206 207 skb = ieee80211_beacon_get(mt76_hw(dev), vif); 208 if (!skb) 209 return; 210 211 mt76x02_mac_set_beacon(dev, mvif->idx, skb); 212 } 213 EXPORT_SYMBOL_GPL(mt76x02_update_beacon_iter); 214 215 static void 216 mt76x02_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif) 217 { 218 struct beacon_bc_data *data = priv; 219 struct mt76x02_dev *dev = data->dev; 220 struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; 221 struct ieee80211_tx_info *info; 222 struct sk_buff *skb; 223 224 if (!(dev->mt76.beacon_mask & BIT(mvif->idx))) 225 return; 226 227 skb = ieee80211_get_buffered_bc(mt76_hw(dev), vif); 228 if (!skb) 229 return; 230 231 info = IEEE80211_SKB_CB(skb); 232 info->control.vif = vif; 233 info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; 234 mt76_skb_set_moredata(skb, true); 235 __skb_queue_tail(&data->q, skb); 236 data->tail[mvif->idx] = skb; 237 } 238 239 void 240 mt76x02_enqueue_buffered_bc(struct mt76x02_dev *dev, struct beacon_bc_data *data, 241 int max_nframes) 242 { 243 int i, nframes; 244 245 data->dev = dev; 246 __skb_queue_head_init(&data->q); 247 248 do { 249 nframes = skb_queue_len(&data->q); 250 ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), 251 IEEE80211_IFACE_ITER_RESUME_ALL, 252 mt76x02_add_buffered_bc, data); 253 } while (nframes != skb_queue_len(&data->q) && 254 skb_queue_len(&data->q) < max_nframes); 255 256 if (!skb_queue_len(&data->q)) 257 return; 258 259 for (i = 0; i < ARRAY_SIZE(data->tail); i++) { 260 if (!data->tail[i]) 261 continue; 262 mt76_skb_set_moredata(data->tail[i], false); 263 } 264 } 265 EXPORT_SYMBOL_GPL(mt76x02_enqueue_buffered_bc); 266 267 void mt76x02_init_beacon_config(struct mt76x02_dev *dev) 268 { 269 int i; 270 271 mt76_clear(dev, MT_BEACON_TIME_CFG, (MT_BEACON_TIME_CFG_TIMER_EN | 272 MT_BEACON_TIME_CFG_TBTT_EN | 273 MT_BEACON_TIME_CFG_BEACON_TX)); 274 mt76_set(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_SYNC_MODE); 275 mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xffff); 276 277 for (i = 0; i < 8; i++) 278 mt76x02_mac_set_beacon(dev, i, NULL); 279 280 mt76x02_set_beacon_offsets(dev); 281 } 282 EXPORT_SYMBOL_GPL(mt76x02_init_beacon_config); 283 284 285