1 /*
2  * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11  * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13  * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 /* DXE - DMA transfer engine
18  * we have 2 channels(High prio and Low prio) for TX and 2 channels for RX.
19  * through low channels data packets are transfered
20  * through high channels managment packets are transfered
21  */
22 
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24 
25 #include <linux/interrupt.h>
26 #include <linux/soc/qcom/smem_state.h>
27 #include "wcn36xx.h"
28 #include "txrx.h"
29 
30 static void wcn36xx_ccu_write_register(struct wcn36xx *wcn, int addr, int data)
31 {
32 	wcn36xx_dbg(WCN36XX_DBG_DXE,
33 		    "wcn36xx_ccu_write_register: addr=%x, data=%x\n",
34 		    addr, data);
35 
36 	writel(data, wcn->ccu_base + addr);
37 }
38 
39 static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
40 {
41 	wcn36xx_dbg(WCN36XX_DBG_DXE,
42 		    "wcn36xx_dxe_write_register: addr=%x, data=%x\n",
43 		    addr, data);
44 
45 	writel(data, wcn->dxe_base + addr);
46 }
47 
48 static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data)
49 {
50 	*data = readl(wcn->dxe_base + addr);
51 
52 	wcn36xx_dbg(WCN36XX_DBG_DXE,
53 		    "wcn36xx_dxe_read_register: addr=%x, data=%x\n",
54 		    addr, *data);
55 }
56 
57 static void wcn36xx_dxe_free_ctl_block(struct wcn36xx_dxe_ch *ch)
58 {
59 	struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl, *next;
60 	int i;
61 
62 	for (i = 0; i < ch->desc_num && ctl; i++) {
63 		next = ctl->next;
64 		kfree(ctl);
65 		ctl = next;
66 	}
67 }
68 
69 static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch)
70 {
71 	struct wcn36xx_dxe_ctl *prev_ctl = NULL;
72 	struct wcn36xx_dxe_ctl *cur_ctl = NULL;
73 	int i;
74 
75 	spin_lock_init(&ch->lock);
76 	for (i = 0; i < ch->desc_num; i++) {
77 		cur_ctl = kzalloc(sizeof(*cur_ctl), GFP_KERNEL);
78 		if (!cur_ctl)
79 			goto out_fail;
80 
81 		cur_ctl->ctl_blk_order = i;
82 		if (i == 0) {
83 			ch->head_blk_ctl = cur_ctl;
84 			ch->tail_blk_ctl = cur_ctl;
85 		} else if (ch->desc_num - 1 == i) {
86 			prev_ctl->next = cur_ctl;
87 			cur_ctl->next = ch->head_blk_ctl;
88 		} else {
89 			prev_ctl->next = cur_ctl;
90 		}
91 		prev_ctl = cur_ctl;
92 	}
93 
94 	return 0;
95 
96 out_fail:
97 	wcn36xx_dxe_free_ctl_block(ch);
98 	return -ENOMEM;
99 }
100 
101 int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn)
102 {
103 	int ret;
104 
105 	wcn->dxe_tx_l_ch.ch_type = WCN36XX_DXE_CH_TX_L;
106 	wcn->dxe_tx_h_ch.ch_type = WCN36XX_DXE_CH_TX_H;
107 	wcn->dxe_rx_l_ch.ch_type = WCN36XX_DXE_CH_RX_L;
108 	wcn->dxe_rx_h_ch.ch_type = WCN36XX_DXE_CH_RX_H;
109 
110 	wcn->dxe_tx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_L;
111 	wcn->dxe_tx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_H;
112 	wcn->dxe_rx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_L;
113 	wcn->dxe_rx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_H;
114 
115 	wcn->dxe_tx_l_ch.dxe_wq =  WCN36XX_DXE_WQ_TX_L;
116 	wcn->dxe_tx_h_ch.dxe_wq =  WCN36XX_DXE_WQ_TX_H;
117 
118 	wcn->dxe_tx_l_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_L_BD;
119 	wcn->dxe_tx_h_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_H_BD;
120 
121 	wcn->dxe_tx_l_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_L_SKB;
122 	wcn->dxe_tx_h_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_H_SKB;
123 
124 	wcn->dxe_tx_l_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_L;
125 	wcn->dxe_tx_h_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_H;
126 
127 	wcn->dxe_tx_l_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_L;
128 	wcn->dxe_tx_h_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_H;
129 
130 	/* DXE control block allocation */
131 	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_l_ch);
132 	if (ret)
133 		goto out_err;
134 	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_h_ch);
135 	if (ret)
136 		goto out_err;
137 	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_l_ch);
138 	if (ret)
139 		goto out_err;
140 	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_h_ch);
141 	if (ret)
142 		goto out_err;
143 
144 	/* Initialize SMSM state  Clear TX Enable RING EMPTY STATE */
145 	ret = qcom_smem_state_update_bits(wcn->tx_enable_state,
146 					  WCN36XX_SMSM_WLAN_TX_ENABLE |
147 					  WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY,
148 					  WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY);
149 	if (ret)
150 		goto out_err;
151 
152 	return 0;
153 
154 out_err:
155 	wcn36xx_err("Failed to allocate DXE control blocks\n");
156 	wcn36xx_dxe_free_ctl_blks(wcn);
157 	return -ENOMEM;
158 }
159 
160 void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn)
161 {
162 	wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_l_ch);
163 	wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_h_ch);
164 	wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_l_ch);
165 	wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_h_ch);
166 }
167 
168 static int wcn36xx_dxe_init_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn_ch)
169 {
170 	struct wcn36xx_dxe_desc *cur_dxe = NULL;
171 	struct wcn36xx_dxe_desc *prev_dxe = NULL;
172 	struct wcn36xx_dxe_ctl *cur_ctl = NULL;
173 	size_t size;
174 	int i;
175 
176 	size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
177 	wcn_ch->cpu_addr = dma_alloc_coherent(dev, size, &wcn_ch->dma_addr,
178 					      GFP_KERNEL);
179 	if (!wcn_ch->cpu_addr)
180 		return -ENOMEM;
181 
182 	memset(wcn_ch->cpu_addr, 0, size);
183 
184 	cur_dxe = (struct wcn36xx_dxe_desc *)wcn_ch->cpu_addr;
185 	cur_ctl = wcn_ch->head_blk_ctl;
186 
187 	for (i = 0; i < wcn_ch->desc_num; i++) {
188 		cur_ctl->desc = cur_dxe;
189 		cur_ctl->desc_phy_addr = wcn_ch->dma_addr +
190 			i * sizeof(struct wcn36xx_dxe_desc);
191 
192 		switch (wcn_ch->ch_type) {
193 		case WCN36XX_DXE_CH_TX_L:
194 			cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_L;
195 			cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_L;
196 			break;
197 		case WCN36XX_DXE_CH_TX_H:
198 			cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_H;
199 			cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_H;
200 			break;
201 		case WCN36XX_DXE_CH_RX_L:
202 			cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
203 			cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_L;
204 			break;
205 		case WCN36XX_DXE_CH_RX_H:
206 			cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
207 			cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_H;
208 			break;
209 		}
210 		if (0 == i) {
211 			cur_dxe->phy_next_l = 0;
212 		} else if ((0 < i) && (i < wcn_ch->desc_num - 1)) {
213 			prev_dxe->phy_next_l =
214 				cur_ctl->desc_phy_addr;
215 		} else if (i == (wcn_ch->desc_num - 1)) {
216 			prev_dxe->phy_next_l =
217 				cur_ctl->desc_phy_addr;
218 			cur_dxe->phy_next_l =
219 				wcn_ch->head_blk_ctl->desc_phy_addr;
220 		}
221 		cur_ctl = cur_ctl->next;
222 		prev_dxe = cur_dxe;
223 		cur_dxe++;
224 	}
225 
226 	return 0;
227 }
228 
229 static void wcn36xx_dxe_deinit_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn_ch)
230 {
231 	size_t size;
232 
233 	size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
234 	dma_free_coherent(dev, size,wcn_ch->cpu_addr, wcn_ch->dma_addr);
235 }
236 
237 static void wcn36xx_dxe_init_tx_bd(struct wcn36xx_dxe_ch *ch,
238 				   struct wcn36xx_dxe_mem_pool *pool)
239 {
240 	int i, chunk_size = pool->chunk_size;
241 	dma_addr_t bd_phy_addr = pool->phy_addr;
242 	void *bd_cpu_addr = pool->virt_addr;
243 	struct wcn36xx_dxe_ctl *cur = ch->head_blk_ctl;
244 
245 	for (i = 0; i < ch->desc_num; i++) {
246 		/* Only every second dxe needs a bd pointer,
247 		   the other will point to the skb data */
248 		if (!(i & 1)) {
249 			cur->bd_phy_addr = bd_phy_addr;
250 			cur->bd_cpu_addr = bd_cpu_addr;
251 			bd_phy_addr += chunk_size;
252 			bd_cpu_addr += chunk_size;
253 		} else {
254 			cur->bd_phy_addr = 0;
255 			cur->bd_cpu_addr = NULL;
256 		}
257 		cur = cur->next;
258 	}
259 }
260 
261 static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
262 {
263 	int reg_data = 0;
264 
265 	wcn36xx_dxe_read_register(wcn,
266 				  WCN36XX_DXE_INT_MASK_REG,
267 				  &reg_data);
268 
269 	reg_data |= wcn_ch;
270 
271 	wcn36xx_dxe_write_register(wcn,
272 				   WCN36XX_DXE_INT_MASK_REG,
273 				   (int)reg_data);
274 	return 0;
275 }
276 
277 static int wcn36xx_dxe_fill_skb(struct device *dev,
278 				struct wcn36xx_dxe_ctl *ctl,
279 				gfp_t gfp)
280 {
281 	struct wcn36xx_dxe_desc *dxe = ctl->desc;
282 	struct sk_buff *skb;
283 
284 	skb = alloc_skb(WCN36XX_PKT_SIZE, gfp);
285 	if (skb == NULL)
286 		return -ENOMEM;
287 
288 	dxe->dst_addr_l = dma_map_single(dev,
289 					 skb_tail_pointer(skb),
290 					 WCN36XX_PKT_SIZE,
291 					 DMA_FROM_DEVICE);
292 	if (dma_mapping_error(dev, dxe->dst_addr_l)) {
293 		dev_err(dev, "unable to map skb\n");
294 		kfree_skb(skb);
295 		return -ENOMEM;
296 	}
297 	ctl->skb = skb;
298 
299 	return 0;
300 }
301 
302 static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn,
303 				    struct wcn36xx_dxe_ch *wcn_ch)
304 {
305 	int i;
306 	struct wcn36xx_dxe_ctl *cur_ctl = NULL;
307 
308 	cur_ctl = wcn_ch->head_blk_ctl;
309 
310 	for (i = 0; i < wcn_ch->desc_num; i++) {
311 		wcn36xx_dxe_fill_skb(wcn->dev, cur_ctl, GFP_KERNEL);
312 		cur_ctl = cur_ctl->next;
313 	}
314 
315 	return 0;
316 }
317 
318 static void wcn36xx_dxe_ch_free_skbs(struct wcn36xx *wcn,
319 				     struct wcn36xx_dxe_ch *wcn_ch)
320 {
321 	struct wcn36xx_dxe_ctl *cur = wcn_ch->head_blk_ctl;
322 	int i;
323 
324 	for (i = 0; i < wcn_ch->desc_num; i++) {
325 		kfree_skb(cur->skb);
326 		cur = cur->next;
327 	}
328 }
329 
330 void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status)
331 {
332 	struct ieee80211_tx_info *info;
333 	struct sk_buff *skb;
334 	unsigned long flags;
335 
336 	spin_lock_irqsave(&wcn->dxe_lock, flags);
337 	skb = wcn->tx_ack_skb;
338 	wcn->tx_ack_skb = NULL;
339 	spin_unlock_irqrestore(&wcn->dxe_lock, flags);
340 
341 	if (!skb) {
342 		wcn36xx_warn("Spurious TX complete indication\n");
343 		return;
344 	}
345 
346 	info = IEEE80211_SKB_CB(skb);
347 
348 	if (status == 1)
349 		info->flags |= IEEE80211_TX_STAT_ACK;
350 
351 	wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ack status: %d\n", status);
352 
353 	ieee80211_tx_status_irqsafe(wcn->hw, skb);
354 	ieee80211_wake_queues(wcn->hw);
355 }
356 
357 static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
358 {
359 	struct wcn36xx_dxe_ctl *ctl;
360 	struct ieee80211_tx_info *info;
361 	unsigned long flags;
362 
363 	/*
364 	 * Make at least one loop of do-while because in case ring is
365 	 * completely full head and tail are pointing to the same element
366 	 * and while-do will not make any cycles.
367 	 */
368 	spin_lock_irqsave(&ch->lock, flags);
369 	ctl = ch->tail_blk_ctl;
370 	do {
371 		if (READ_ONCE(ctl->desc->ctrl) & WCN36xx_DXE_CTRL_VLD)
372 			break;
373 
374 		if (ctl->skb &&
375 		    READ_ONCE(ctl->desc->ctrl) & WCN36xx_DXE_CTRL_EOP) {
376 			dma_unmap_single(wcn->dev, ctl->desc->src_addr_l,
377 					 ctl->skb->len, DMA_TO_DEVICE);
378 			info = IEEE80211_SKB_CB(ctl->skb);
379 			if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) {
380 				/* Keep frame until TX status comes */
381 				ieee80211_free_txskb(wcn->hw, ctl->skb);
382 			}
383 
384 			if (wcn->queues_stopped) {
385 				wcn->queues_stopped = false;
386 				ieee80211_wake_queues(wcn->hw);
387 			}
388 
389 			ctl->skb = NULL;
390 		}
391 		ctl = ctl->next;
392 	} while (ctl != ch->head_blk_ctl);
393 
394 	ch->tail_blk_ctl = ctl;
395 	spin_unlock_irqrestore(&ch->lock, flags);
396 }
397 
398 static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
399 {
400 	struct wcn36xx *wcn = (struct wcn36xx *)dev;
401 	int int_src, int_reason;
402 
403 	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
404 
405 	if (int_src & WCN36XX_INT_MASK_CHAN_TX_H) {
406 		wcn36xx_dxe_read_register(wcn,
407 					  WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H,
408 					  &int_reason);
409 
410 		wcn36xx_dxe_write_register(wcn,
411 					   WCN36XX_DXE_0_INT_CLR,
412 					   WCN36XX_INT_MASK_CHAN_TX_H);
413 
414 		if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK ) {
415 			wcn36xx_dxe_write_register(wcn,
416 						   WCN36XX_DXE_0_INT_ERR_CLR,
417 						   WCN36XX_INT_MASK_CHAN_TX_H);
418 
419 			wcn36xx_err("DXE IRQ reported error: 0x%x in high TX channel\n",
420 					int_src);
421 		}
422 
423 		if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK) {
424 			wcn36xx_dxe_write_register(wcn,
425 						   WCN36XX_DXE_0_INT_DONE_CLR,
426 						   WCN36XX_INT_MASK_CHAN_TX_H);
427 		}
428 
429 		if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK) {
430 			wcn36xx_dxe_write_register(wcn,
431 						   WCN36XX_DXE_0_INT_ED_CLR,
432 						   WCN36XX_INT_MASK_CHAN_TX_H);
433 		}
434 
435 		wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high, reason %08x\n",
436 			    int_reason);
437 
438 		if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
439 				  WCN36XX_CH_STAT_INT_ED_MASK))
440 			reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch);
441 	}
442 
443 	if (int_src & WCN36XX_INT_MASK_CHAN_TX_L) {
444 		wcn36xx_dxe_read_register(wcn,
445 					  WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L,
446 					  &int_reason);
447 
448 		wcn36xx_dxe_write_register(wcn,
449 					   WCN36XX_DXE_0_INT_CLR,
450 					   WCN36XX_INT_MASK_CHAN_TX_L);
451 
452 
453 		if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK ) {
454 			wcn36xx_dxe_write_register(wcn,
455 						   WCN36XX_DXE_0_INT_ERR_CLR,
456 						   WCN36XX_INT_MASK_CHAN_TX_L);
457 
458 			wcn36xx_err("DXE IRQ reported error: 0x%x in low TX channel\n",
459 					int_src);
460 		}
461 
462 		if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK) {
463 			wcn36xx_dxe_write_register(wcn,
464 						   WCN36XX_DXE_0_INT_DONE_CLR,
465 						   WCN36XX_INT_MASK_CHAN_TX_L);
466 		}
467 
468 		if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK) {
469 			wcn36xx_dxe_write_register(wcn,
470 						   WCN36XX_DXE_0_INT_ED_CLR,
471 						   WCN36XX_INT_MASK_CHAN_TX_L);
472 		}
473 
474 		wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low, reason %08x\n",
475 			    int_reason);
476 
477 		if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
478 				  WCN36XX_CH_STAT_INT_ED_MASK))
479 			reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch);
480 	}
481 
482 	return IRQ_HANDLED;
483 }
484 
485 static irqreturn_t wcn36xx_irq_rx_ready(int irq, void *dev)
486 {
487 	struct wcn36xx *wcn = (struct wcn36xx *)dev;
488 
489 	wcn36xx_dxe_rx_frame(wcn);
490 
491 	return IRQ_HANDLED;
492 }
493 
494 static int wcn36xx_dxe_request_irqs(struct wcn36xx *wcn)
495 {
496 	int ret;
497 
498 	ret = request_irq(wcn->tx_irq, wcn36xx_irq_tx_complete,
499 			  IRQF_TRIGGER_HIGH, "wcn36xx_tx", wcn);
500 	if (ret) {
501 		wcn36xx_err("failed to alloc tx irq\n");
502 		goto out_err;
503 	}
504 
505 	ret = request_irq(wcn->rx_irq, wcn36xx_irq_rx_ready, IRQF_TRIGGER_HIGH,
506 			  "wcn36xx_rx", wcn);
507 	if (ret) {
508 		wcn36xx_err("failed to alloc rx irq\n");
509 		goto out_txirq;
510 	}
511 
512 	enable_irq_wake(wcn->rx_irq);
513 
514 	return 0;
515 
516 out_txirq:
517 	free_irq(wcn->tx_irq, wcn);
518 out_err:
519 	return ret;
520 
521 }
522 
523 static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
524 				     struct wcn36xx_dxe_ch *ch,
525 				     u32 ctrl,
526 				     u32 en_mask,
527 				     u32 int_mask,
528 				     u32 status_reg)
529 {
530 	struct wcn36xx_dxe_desc *dxe;
531 	struct wcn36xx_dxe_ctl *ctl;
532 	dma_addr_t  dma_addr;
533 	struct sk_buff *skb;
534 	u32 int_reason;
535 	int ret;
536 
537 	wcn36xx_dxe_read_register(wcn, status_reg, &int_reason);
538 	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR, int_mask);
539 
540 	if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK) {
541 		wcn36xx_dxe_write_register(wcn,
542 					   WCN36XX_DXE_0_INT_ERR_CLR,
543 					   int_mask);
544 
545 		wcn36xx_err("DXE IRQ reported error on RX channel\n");
546 	}
547 
548 	if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK)
549 		wcn36xx_dxe_write_register(wcn,
550 					   WCN36XX_DXE_0_INT_DONE_CLR,
551 					   int_mask);
552 
553 	if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK)
554 		wcn36xx_dxe_write_register(wcn,
555 					   WCN36XX_DXE_0_INT_ED_CLR,
556 					   int_mask);
557 
558 	if (!(int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
559 			    WCN36XX_CH_STAT_INT_ED_MASK)))
560 		return 0;
561 
562 	spin_lock(&ch->lock);
563 
564 	ctl = ch->head_blk_ctl;
565 	dxe = ctl->desc;
566 
567 	while (!(READ_ONCE(dxe->ctrl) & WCN36xx_DXE_CTRL_VLD)) {
568 		skb = ctl->skb;
569 		dma_addr = dxe->dst_addr_l;
570 		ret = wcn36xx_dxe_fill_skb(wcn->dev, ctl, GFP_ATOMIC);
571 		if (0 == ret) {
572 			/* new skb allocation ok. Use the new one and queue
573 			 * the old one to network system.
574 			 */
575 			dma_unmap_single(wcn->dev, dma_addr, WCN36XX_PKT_SIZE,
576 					DMA_FROM_DEVICE);
577 			wcn36xx_rx_skb(wcn, skb);
578 		} /* else keep old skb not submitted and use it for rx DMA */
579 
580 		dxe->ctrl = ctrl;
581 		ctl = ctl->next;
582 		dxe = ctl->desc;
583 	}
584 	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR, en_mask);
585 
586 	ch->head_blk_ctl = ctl;
587 
588 	spin_unlock(&ch->lock);
589 
590 	return 0;
591 }
592 
593 void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn)
594 {
595 	int int_src;
596 
597 	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
598 
599 	/* RX_LOW_PRI */
600 	if (int_src & WCN36XX_DXE_INT_CH1_MASK)
601 		wcn36xx_rx_handle_packets(wcn, &wcn->dxe_rx_l_ch,
602 					  WCN36XX_DXE_CTRL_RX_L,
603 					  WCN36XX_DXE_INT_CH1_MASK,
604 					  WCN36XX_INT_MASK_CHAN_RX_L,
605 					  WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_L);
606 
607 	/* RX_HIGH_PRI */
608 	if (int_src & WCN36XX_DXE_INT_CH3_MASK)
609 		wcn36xx_rx_handle_packets(wcn, &wcn->dxe_rx_h_ch,
610 					  WCN36XX_DXE_CTRL_RX_H,
611 					  WCN36XX_DXE_INT_CH3_MASK,
612 					  WCN36XX_INT_MASK_CHAN_RX_H,
613 					  WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_H);
614 
615 	if (!int_src)
616 		wcn36xx_warn("No DXE interrupt pending\n");
617 }
618 
619 int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
620 {
621 	size_t s;
622 	void *cpu_addr;
623 
624 	/* Allocate BD headers for MGMT frames */
625 
626 	/* Where this come from ask QC */
627 	wcn->mgmt_mem_pool.chunk_size =	WCN36XX_BD_CHUNK_SIZE +
628 		16 - (WCN36XX_BD_CHUNK_SIZE % 8);
629 
630 	s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
631 	cpu_addr = dma_alloc_coherent(wcn->dev, s, &wcn->mgmt_mem_pool.phy_addr,
632 				      GFP_KERNEL);
633 	if (!cpu_addr)
634 		goto out_err;
635 
636 	wcn->mgmt_mem_pool.virt_addr = cpu_addr;
637 	memset(cpu_addr, 0, s);
638 
639 	/* Allocate BD headers for DATA frames */
640 
641 	/* Where this come from ask QC */
642 	wcn->data_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
643 		16 - (WCN36XX_BD_CHUNK_SIZE % 8);
644 
645 	s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
646 	cpu_addr = dma_alloc_coherent(wcn->dev, s, &wcn->data_mem_pool.phy_addr,
647 				      GFP_KERNEL);
648 	if (!cpu_addr)
649 		goto out_err;
650 
651 	wcn->data_mem_pool.virt_addr = cpu_addr;
652 	memset(cpu_addr, 0, s);
653 
654 	return 0;
655 
656 out_err:
657 	wcn36xx_dxe_free_mem_pools(wcn);
658 	wcn36xx_err("Failed to allocate BD mempool\n");
659 	return -ENOMEM;
660 }
661 
662 void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn)
663 {
664 	if (wcn->mgmt_mem_pool.virt_addr)
665 		dma_free_coherent(wcn->dev, wcn->mgmt_mem_pool.chunk_size *
666 				  WCN36XX_DXE_CH_DESC_NUMB_TX_H,
667 				  wcn->mgmt_mem_pool.virt_addr,
668 				  wcn->mgmt_mem_pool.phy_addr);
669 
670 	if (wcn->data_mem_pool.virt_addr) {
671 		dma_free_coherent(wcn->dev, wcn->data_mem_pool.chunk_size *
672 				  WCN36XX_DXE_CH_DESC_NUMB_TX_L,
673 				  wcn->data_mem_pool.virt_addr,
674 				  wcn->data_mem_pool.phy_addr);
675 	}
676 }
677 
678 int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
679 			 struct wcn36xx_vif *vif_priv,
680 			 struct wcn36xx_tx_bd *bd,
681 			 struct sk_buff *skb,
682 			 bool is_low)
683 {
684 	struct wcn36xx_dxe_desc *desc_bd, *desc_skb;
685 	struct wcn36xx_dxe_ctl *ctl_bd, *ctl_skb;
686 	struct wcn36xx_dxe_ch *ch = NULL;
687 	unsigned long flags;
688 	int ret;
689 
690 	ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch;
691 
692 	spin_lock_irqsave(&ch->lock, flags);
693 	ctl_bd = ch->head_blk_ctl;
694 	ctl_skb = ctl_bd->next;
695 
696 	/*
697 	 * If skb is not null that means that we reached the tail of the ring
698 	 * hence ring is full. Stop queues to let mac80211 back off until ring
699 	 * has an empty slot again.
700 	 */
701 	if (NULL != ctl_skb->skb) {
702 		ieee80211_stop_queues(wcn->hw);
703 		wcn->queues_stopped = true;
704 		spin_unlock_irqrestore(&ch->lock, flags);
705 		return -EBUSY;
706 	}
707 
708 	if (unlikely(ctl_skb->bd_cpu_addr)) {
709 		wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
710 		ret = -EINVAL;
711 		goto unlock;
712 	}
713 
714 	desc_bd = ctl_bd->desc;
715 	desc_skb = ctl_skb->desc;
716 
717 	ctl_bd->skb = NULL;
718 
719 	/* write buffer descriptor */
720 	memcpy(ctl_bd->bd_cpu_addr, bd, sizeof(*bd));
721 
722 	/* Set source address of the BD we send */
723 	desc_bd->src_addr_l = ctl_bd->bd_phy_addr;
724 	desc_bd->dst_addr_l = ch->dxe_wq;
725 	desc_bd->fr_len = sizeof(struct wcn36xx_tx_bd);
726 
727 	wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX\n");
728 
729 	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ",
730 			 (char *)desc_bd, sizeof(*desc_bd));
731 	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP,
732 			 "BD   >>> ", (char *)ctl_bd->bd_cpu_addr,
733 			 sizeof(struct wcn36xx_tx_bd));
734 
735 	desc_skb->src_addr_l = dma_map_single(wcn->dev,
736 					      skb->data,
737 					      skb->len,
738 					      DMA_TO_DEVICE);
739 	if (dma_mapping_error(wcn->dev, desc_skb->src_addr_l)) {
740 		dev_err(wcn->dev, "unable to DMA map src_addr_l\n");
741 		ret = -ENOMEM;
742 		goto unlock;
743 	}
744 
745 	ctl_skb->skb = skb;
746 	desc_skb->dst_addr_l = ch->dxe_wq;
747 	desc_skb->fr_len = ctl_skb->skb->len;
748 
749 	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC2 >>> ",
750 			 (char *)desc_skb, sizeof(*desc_skb));
751 	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "SKB   >>> ",
752 			 (char *)ctl_skb->skb->data, ctl_skb->skb->len);
753 
754 	/* Move the head of the ring to the next empty descriptor */
755 	 ch->head_blk_ctl = ctl_skb->next;
756 
757 	/* Commit all previous writes and set descriptors to VALID */
758 	wmb();
759 	desc_skb->ctrl = ch->ctrl_skb;
760 	wmb();
761 	desc_bd->ctrl = ch->ctrl_bd;
762 
763 	/*
764 	 * When connected and trying to send data frame chip can be in sleep
765 	 * mode and writing to the register will not wake up the chip. Instead
766 	 * notify chip about new frame through SMSM bus.
767 	 */
768 	if (is_low &&  vif_priv->pw_state == WCN36XX_BMPS) {
769 		qcom_smem_state_update_bits(wcn->tx_rings_empty_state,
770 					    WCN36XX_SMSM_WLAN_TX_ENABLE,
771 					    WCN36XX_SMSM_WLAN_TX_ENABLE);
772 	} else {
773 		/* indicate End Of Packet and generate interrupt on descriptor
774 		 * done.
775 		 */
776 		wcn36xx_dxe_write_register(wcn,
777 			ch->reg_ctrl, ch->def_ctrl);
778 	}
779 
780 	ret = 0;
781 unlock:
782 	spin_unlock_irqrestore(&ch->lock, flags);
783 	return ret;
784 }
785 
786 int wcn36xx_dxe_init(struct wcn36xx *wcn)
787 {
788 	int reg_data = 0, ret;
789 
790 	reg_data = WCN36XX_DXE_REG_RESET;
791 	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
792 
793 	/* Select channels for rx avail and xfer done interrupts... */
794 	reg_data = (WCN36XX_DXE_INT_CH3_MASK | WCN36XX_DXE_INT_CH1_MASK) << 16 |
795 		    WCN36XX_DXE_INT_CH0_MASK | WCN36XX_DXE_INT_CH4_MASK;
796 	if (wcn->is_pronto)
797 		wcn36xx_ccu_write_register(wcn, WCN36XX_CCU_DXE_INT_SELECT_PRONTO, reg_data);
798 	else
799 		wcn36xx_ccu_write_register(wcn, WCN36XX_CCU_DXE_INT_SELECT_RIVA, reg_data);
800 
801 	/***************************************/
802 	/* Init descriptors for TX LOW channel */
803 	/***************************************/
804 	ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_l_ch);
805 	if (ret) {
806 		dev_err(wcn->dev, "Error allocating descriptor\n");
807 		return ret;
808 	}
809 	wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_l_ch, &wcn->data_mem_pool);
810 
811 	/* Write channel head to a NEXT register */
812 	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L,
813 		wcn->dxe_tx_l_ch.head_blk_ctl->desc_phy_addr);
814 
815 	/* Program DMA destination addr for TX LOW */
816 	wcn36xx_dxe_write_register(wcn,
817 		WCN36XX_DXE_CH_DEST_ADDR_TX_L,
818 		WCN36XX_DXE_WQ_TX_L);
819 
820 	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
821 	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
822 
823 	/***************************************/
824 	/* Init descriptors for TX HIGH channel */
825 	/***************************************/
826 	ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_h_ch);
827 	if (ret) {
828 		dev_err(wcn->dev, "Error allocating descriptor\n");
829 		goto out_err_txh_ch;
830 	}
831 
832 	wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_h_ch, &wcn->mgmt_mem_pool);
833 
834 	/* Write channel head to a NEXT register */
835 	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H,
836 		wcn->dxe_tx_h_ch.head_blk_ctl->desc_phy_addr);
837 
838 	/* Program DMA destination addr for TX HIGH */
839 	wcn36xx_dxe_write_register(wcn,
840 		WCN36XX_DXE_CH_DEST_ADDR_TX_H,
841 		WCN36XX_DXE_WQ_TX_H);
842 
843 	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
844 
845 	/* Enable channel interrupts */
846 	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
847 
848 	/***************************************/
849 	/* Init descriptors for RX LOW channel */
850 	/***************************************/
851 	ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_l_ch);
852 	if (ret) {
853 		dev_err(wcn->dev, "Error allocating descriptor\n");
854 		goto out_err_rxl_ch;
855 	}
856 
857 
858 	/* For RX we need to preallocated buffers */
859 	wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch);
860 
861 	/* Write channel head to a NEXT register */
862 	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L,
863 		wcn->dxe_rx_l_ch.head_blk_ctl->desc_phy_addr);
864 
865 	/* Write DMA source address */
866 	wcn36xx_dxe_write_register(wcn,
867 		WCN36XX_DXE_CH_SRC_ADDR_RX_L,
868 		WCN36XX_DXE_WQ_RX_L);
869 
870 	/* Program preallocated destination address */
871 	wcn36xx_dxe_write_register(wcn,
872 		WCN36XX_DXE_CH_DEST_ADDR_RX_L,
873 		wcn->dxe_rx_l_ch.head_blk_ctl->desc->phy_next_l);
874 
875 	/* Enable default control registers */
876 	wcn36xx_dxe_write_register(wcn,
877 		WCN36XX_DXE_REG_CTL_RX_L,
878 		WCN36XX_DXE_CH_DEFAULT_CTL_RX_L);
879 
880 	/* Enable channel interrupts */
881 	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
882 
883 	/***************************************/
884 	/* Init descriptors for RX HIGH channel */
885 	/***************************************/
886 	ret = wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_h_ch);
887 	if (ret) {
888 		dev_err(wcn->dev, "Error allocating descriptor\n");
889 		goto out_err_rxh_ch;
890 	}
891 
892 	/* For RX we need to prealocat buffers */
893 	wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_h_ch);
894 
895 	/* Write chanel head to a NEXT register */
896 	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H,
897 		wcn->dxe_rx_h_ch.head_blk_ctl->desc_phy_addr);
898 
899 	/* Write DMA source address */
900 	wcn36xx_dxe_write_register(wcn,
901 		WCN36XX_DXE_CH_SRC_ADDR_RX_H,
902 		WCN36XX_DXE_WQ_RX_H);
903 
904 	/* Program preallocated destination address */
905 	wcn36xx_dxe_write_register(wcn,
906 		WCN36XX_DXE_CH_DEST_ADDR_RX_H,
907 		 wcn->dxe_rx_h_ch.head_blk_ctl->desc->phy_next_l);
908 
909 	/* Enable default control registers */
910 	wcn36xx_dxe_write_register(wcn,
911 		WCN36XX_DXE_REG_CTL_RX_H,
912 		WCN36XX_DXE_CH_DEFAULT_CTL_RX_H);
913 
914 	/* Enable channel interrupts */
915 	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
916 
917 	ret = wcn36xx_dxe_request_irqs(wcn);
918 	if (ret < 0)
919 		goto out_err_irq;
920 
921 	return 0;
922 
923 out_err_irq:
924 	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_h_ch);
925 out_err_rxh_ch:
926 	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_l_ch);
927 out_err_rxl_ch:
928 	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_h_ch);
929 out_err_txh_ch:
930 	wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_l_ch);
931 
932 	return ret;
933 }
934 
935 void wcn36xx_dxe_deinit(struct wcn36xx *wcn)
936 {
937 	free_irq(wcn->tx_irq, wcn);
938 	free_irq(wcn->rx_irq, wcn);
939 
940 	if (wcn->tx_ack_skb) {
941 		ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
942 		wcn->tx_ack_skb = NULL;
943 	}
944 
945 	wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_l_ch);
946 	wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_h_ch);
947 }
948