1 // SPDX-License-Identifier: ISC 2 /* Copyright (C) 2019 MediaTek Inc. 3 * 4 * Author: Ryder Lee <ryder.lee@mediatek.com> 5 * Roy Luo <royluo@google.com> 6 * Lorenzo Bianconi <lorenzo@kernel.org> 7 * Felix Fietkau <nbd@nbd.name> 8 */ 9 10 #include "mt7615.h" 11 #include "../dma.h" 12 #include "mac.h" 13 14 static int 15 mt7615_init_tx_queue(struct mt7615_dev *dev, struct mt76_sw_queue *q, 16 int idx, int n_desc) 17 { 18 struct mt76_queue *hwq; 19 int err; 20 21 hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL); 22 if (!hwq) 23 return -ENOMEM; 24 25 err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE); 26 if (err < 0) 27 return err; 28 29 INIT_LIST_HEAD(&q->swq); 30 q->q = hwq; 31 32 return 0; 33 } 34 35 static int 36 mt7622_init_tx_queues_multi(struct mt7615_dev *dev) 37 { 38 static const u8 wmm_queue_map[] = { 39 MT7622_TXQ_AC0, 40 MT7622_TXQ_AC1, 41 MT7622_TXQ_AC2, 42 MT7622_TXQ_AC3, 43 }; 44 int ret; 45 int i; 46 47 for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) { 48 ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[i], 49 wmm_queue_map[i], 50 MT7615_TX_RING_SIZE / 2); 51 if (ret) 52 return ret; 53 } 54 55 ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD], 56 MT7622_TXQ_MGMT, MT7615_TX_MGMT_RING_SIZE); 57 if (ret) 58 return ret; 59 60 ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU], 61 MT7622_TXQ_MCU, MT7615_TX_MCU_RING_SIZE); 62 return ret; 63 } 64 65 static int 66 mt7615_init_tx_queues(struct mt7615_dev *dev) 67 { 68 struct mt76_sw_queue *q; 69 int ret, i; 70 71 ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_FWDL], 72 MT7615_TXQ_FWDL, 73 MT7615_TX_FWDL_RING_SIZE); 74 if (ret) 75 return ret; 76 77 if (!is_mt7615(&dev->mt76)) 78 return mt7622_init_tx_queues_multi(dev); 79 80 ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[0], 0, 81 MT7615_TX_RING_SIZE); 82 if (ret) 83 return ret; 84 85 for (i = 1; i < MT_TXQ_MCU; i++) { 86 q = &dev->mt76.q_tx[i]; 87 INIT_LIST_HEAD(&q->swq); 88 q->q = dev->mt76.q_tx[0].q; 89 } 90 91 ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU], 92 MT7615_TXQ_MCU, 93 MT7615_TX_MCU_RING_SIZE); 94 return 0; 95 } 96 97 void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, 98 struct sk_buff *skb) 99 { 100 struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); 101 __le32 *rxd = (__le32 *)skb->data; 102 __le32 *end = (__le32 *)&skb->data[skb->len]; 103 enum rx_pkt_type type; 104 u16 flag; 105 106 type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0])); 107 flag = FIELD_GET(MT_RXD0_PKT_FLAG, le32_to_cpu(rxd[0])); 108 if (type == PKT_TYPE_RX_EVENT && flag == 0x1) 109 type = PKT_TYPE_NORMAL_MCU; 110 111 switch (type) { 112 case PKT_TYPE_TXS: 113 for (rxd++; rxd + 7 <= end; rxd += 7) 114 mt7615_mac_add_txs(dev, rxd); 115 dev_kfree_skb(skb); 116 break; 117 case PKT_TYPE_TXRX_NOTIFY: 118 mt7615_mac_tx_free(dev, skb); 119 break; 120 case PKT_TYPE_RX_EVENT: 121 mt7615_mcu_rx_event(dev, skb); 122 break; 123 case PKT_TYPE_NORMAL_MCU: 124 case PKT_TYPE_NORMAL: 125 if (!mt7615_mac_fill_rx(dev, skb)) { 126 mt76_rx(&dev->mt76, q, skb); 127 return; 128 } 129 /* fall through */ 130 default: 131 dev_kfree_skb(skb); 132 break; 133 } 134 } 135 136 static void 137 mt7615_tx_cleanup(struct mt7615_dev *dev) 138 { 139 int i; 140 141 mt76_queue_tx_cleanup(dev, MT_TXQ_MCU, false); 142 if (is_mt7615(&dev->mt76)) { 143 mt76_queue_tx_cleanup(dev, MT_TXQ_BE, false); 144 } else { 145 for (i = 0; i < IEEE80211_NUM_ACS; i++) 146 mt76_queue_tx_cleanup(dev, i, false); 147 } 148 } 149 150 static int mt7615_poll_tx(struct napi_struct *napi, int budget) 151 { 152 struct mt7615_dev *dev; 153 154 dev = container_of(napi, struct mt7615_dev, mt76.tx_napi); 155 156 mt7615_tx_cleanup(dev); 157 158 if (napi_complete_done(napi, 0)) 159 mt7615_irq_enable(dev, MT_INT_TX_DONE_ALL); 160 161 mt7615_tx_cleanup(dev); 162 163 mt7615_mac_sta_poll(dev); 164 165 tasklet_schedule(&dev->mt76.tx_tasklet); 166 167 return 0; 168 } 169 170 static void mt7622_dma_sched_init(struct mt7615_dev *dev) 171 { 172 u32 reg = mt7615_reg_map(dev, MT_DMASHDL_BASE); 173 int i; 174 175 mt76_rmw(dev, reg + MT_DMASHDL_PKT_MAX_SIZE, 176 MT_DMASHDL_PKT_MAX_SIZE_PLE | MT_DMASHDL_PKT_MAX_SIZE_PSE, 177 FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PLE, 1) | 178 FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PSE, 8)); 179 180 for (i = 0; i <= 5; i++) 181 mt76_wr(dev, reg + MT_DMASHDL_GROUP_QUOTA(i), 182 FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x10) | 183 FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0x800)); 184 185 mt76_wr(dev, reg + MT_DMASHDL_Q_MAP(0), 0x42104210); 186 mt76_wr(dev, reg + MT_DMASHDL_Q_MAP(1), 0x42104210); 187 mt76_wr(dev, reg + MT_DMASHDL_Q_MAP(2), 0x5); 188 mt76_wr(dev, reg + MT_DMASHDL_Q_MAP(3), 0); 189 190 mt76_wr(dev, reg + MT_DMASHDL_SCHED_SET0, 0x6012345f); 191 mt76_wr(dev, reg + MT_DMASHDL_SCHED_SET1, 0xedcba987); 192 } 193 194 static void mt7663_dma_sched_init(struct mt7615_dev *dev) 195 { 196 int i; 197 198 mt76_rmw(dev, MT_DMA_SHDL(MT_DMASHDL_PKT_MAX_SIZE), 199 MT_DMASHDL_PKT_MAX_SIZE_PLE | MT_DMASHDL_PKT_MAX_SIZE_PSE, 200 FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PLE, 1) | 201 FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PSE, 8)); 202 203 /* enable refill control group 0, 1, 2, 4, 5 */ 204 mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_REFILL), 0xffc80000); 205 /* enable group 0, 1, 2, 4, 5, 15 */ 206 mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_OPTIONAL), 0x70068037); 207 208 /* each group min quota must larger then PLE_PKT_MAX_SIZE_NUM */ 209 for (i = 0; i < 5; i++) 210 mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_GROUP_QUOTA(i)), 211 FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x40) | 212 FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0x800)); 213 mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_GROUP_QUOTA(5)), 214 FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x40) | 215 FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0x40)); 216 mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_GROUP_QUOTA(15)), 217 FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x20) | 218 FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0x20)); 219 220 mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(0)), 0x42104210); 221 mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(1)), 0x42104210); 222 mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(2)), 0x00050005); 223 mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(3)), 0); 224 /* ALTX0 and ALTX1 QID mapping to group 5 */ 225 mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_SCHED_SET0), 0x6012345f); 226 mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_SCHED_SET1), 0xedcba987); 227 } 228 229 int mt7615_dma_init(struct mt7615_dev *dev) 230 { 231 int rx_ring_size = MT7615_RX_RING_SIZE; 232 int ret; 233 234 mt76_dma_attach(&dev->mt76); 235 236 mt76_wr(dev, MT_WPDMA_GLO_CFG, 237 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE | 238 MT_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN | 239 MT_WPDMA_GLO_CFG_OMIT_TX_INFO); 240 241 mt76_rmw_field(dev, MT_WPDMA_GLO_CFG, 242 MT_WPDMA_GLO_CFG_TX_BT_SIZE_BIT0, 0x1); 243 244 mt76_rmw_field(dev, MT_WPDMA_GLO_CFG, 245 MT_WPDMA_GLO_CFG_TX_BT_SIZE_BIT21, 0x1); 246 247 mt76_rmw_field(dev, MT_WPDMA_GLO_CFG, 248 MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 0x3); 249 250 mt76_rmw_field(dev, MT_WPDMA_GLO_CFG, 251 MT_WPDMA_GLO_CFG_MULTI_DMA_EN, 0x3); 252 253 if (is_mt7615(&dev->mt76)) { 254 mt76_set(dev, MT_WPDMA_GLO_CFG, 255 MT_WPDMA_GLO_CFG_FIRST_TOKEN_ONLY); 256 257 mt76_wr(dev, MT_WPDMA_GLO_CFG1, 0x1); 258 mt76_wr(dev, MT_WPDMA_TX_PRE_CFG, 0xf0000); 259 mt76_wr(dev, MT_WPDMA_RX_PRE_CFG, 0xf7f0000); 260 mt76_wr(dev, MT_WPDMA_ABT_CFG, 0x4000026); 261 mt76_wr(dev, MT_WPDMA_ABT_CFG1, 0x18811881); 262 mt76_set(dev, 0x7158, BIT(16)); 263 mt76_clear(dev, 0x7000, BIT(23)); 264 } 265 266 mt76_wr(dev, MT_WPDMA_RST_IDX, ~0); 267 268 ret = mt7615_init_tx_queues(dev); 269 if (ret) 270 return ret; 271 272 /* init rx queues */ 273 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1, 274 MT7615_RX_MCU_RING_SIZE, MT_RX_BUF_SIZE, 275 MT_RX_RING_BASE); 276 if (ret) 277 return ret; 278 279 if (!is_mt7615(&dev->mt76)) 280 rx_ring_size /= 2; 281 282 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], 0, 283 rx_ring_size, MT_RX_BUF_SIZE, MT_RX_RING_BASE); 284 if (ret) 285 return ret; 286 287 mt76_wr(dev, MT_DELAY_INT_CFG, 0); 288 289 ret = mt76_init_queues(dev); 290 if (ret < 0) 291 return ret; 292 293 netif_tx_napi_add(&dev->mt76.napi_dev, &dev->mt76.tx_napi, 294 mt7615_poll_tx, NAPI_POLL_WEIGHT); 295 napi_enable(&dev->mt76.tx_napi); 296 297 mt76_poll(dev, MT_WPDMA_GLO_CFG, 298 MT_WPDMA_GLO_CFG_TX_DMA_BUSY | 299 MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 1000); 300 301 /* start dma engine */ 302 mt76_set(dev, MT_WPDMA_GLO_CFG, 303 MT_WPDMA_GLO_CFG_TX_DMA_EN | 304 MT_WPDMA_GLO_CFG_RX_DMA_EN); 305 306 /* enable interrupts for TX/RX rings */ 307 mt7615_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL | 308 MT_INT_MCU_CMD); 309 310 if (is_mt7622(&dev->mt76)) 311 mt7622_dma_sched_init(dev); 312 313 if (is_mt7663(&dev->mt76)) 314 mt7663_dma_sched_init(dev); 315 316 return 0; 317 } 318 319 void mt7615_dma_cleanup(struct mt7615_dev *dev) 320 { 321 mt76_clear(dev, MT_WPDMA_GLO_CFG, 322 MT_WPDMA_GLO_CFG_TX_DMA_EN | 323 MT_WPDMA_GLO_CFG_RX_DMA_EN); 324 mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_SW_RESET); 325 326 tasklet_kill(&dev->mt76.tx_tasklet); 327 mt76_dma_cleanup(&dev->mt76); 328 } 329