1 /*
2  * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11  * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13  * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 /* DXE - DMA transfer engine
18  * we have 2 channels(High prio and Low prio) for TX and 2 channels for RX.
19  * through low channels data packets are transfered
20  * through high channels managment packets are transfered
21  */
22 
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24 
25 #include <linux/interrupt.h>
26 #include "wcn36xx.h"
27 #include "txrx.h"
28 
29 void *wcn36xx_dxe_get_next_bd(struct wcn36xx *wcn, bool is_low)
30 {
31 	struct wcn36xx_dxe_ch *ch = is_low ?
32 		&wcn->dxe_tx_l_ch :
33 		&wcn->dxe_tx_h_ch;
34 
35 	return ch->head_blk_ctl->bd_cpu_addr;
36 }
37 
38 static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
39 {
40 	wcn36xx_dbg(WCN36XX_DBG_DXE,
41 		    "wcn36xx_dxe_write_register: addr=%x, data=%x\n",
42 		    addr, data);
43 
44 	writel(data, wcn->mmio + addr);
45 }
46 
47 static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data)
48 {
49 	*data = readl(wcn->mmio + addr);
50 
51 	wcn36xx_dbg(WCN36XX_DBG_DXE,
52 		    "wcn36xx_dxe_read_register: addr=%x, data=%x\n",
53 		    addr, *data);
54 }
55 
56 static void wcn36xx_dxe_free_ctl_block(struct wcn36xx_dxe_ch *ch)
57 {
58 	struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl, *next;
59 	int i;
60 
61 	for (i = 0; i < ch->desc_num && ctl; i++) {
62 		next = ctl->next;
63 		kfree(ctl);
64 		ctl = next;
65 	}
66 }
67 
68 static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch)
69 {
70 	struct wcn36xx_dxe_ctl *prev_ctl = NULL;
71 	struct wcn36xx_dxe_ctl *cur_ctl = NULL;
72 	int i;
73 
74 	for (i = 0; i < ch->desc_num; i++) {
75 		cur_ctl = kzalloc(sizeof(*cur_ctl), GFP_KERNEL);
76 		if (!cur_ctl)
77 			goto out_fail;
78 
79 		cur_ctl->ctl_blk_order = i;
80 		if (i == 0) {
81 			ch->head_blk_ctl = cur_ctl;
82 			ch->tail_blk_ctl = cur_ctl;
83 		} else if (ch->desc_num - 1 == i) {
84 			prev_ctl->next = cur_ctl;
85 			cur_ctl->next = ch->head_blk_ctl;
86 		} else {
87 			prev_ctl->next = cur_ctl;
88 		}
89 		prev_ctl = cur_ctl;
90 	}
91 
92 	return 0;
93 
94 out_fail:
95 	wcn36xx_dxe_free_ctl_block(ch);
96 	return -ENOMEM;
97 }
98 
99 int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn)
100 {
101 	int ret;
102 
103 	wcn->dxe_tx_l_ch.ch_type = WCN36XX_DXE_CH_TX_L;
104 	wcn->dxe_tx_h_ch.ch_type = WCN36XX_DXE_CH_TX_H;
105 	wcn->dxe_rx_l_ch.ch_type = WCN36XX_DXE_CH_RX_L;
106 	wcn->dxe_rx_h_ch.ch_type = WCN36XX_DXE_CH_RX_H;
107 
108 	wcn->dxe_tx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_L;
109 	wcn->dxe_tx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_H;
110 	wcn->dxe_rx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_L;
111 	wcn->dxe_rx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_H;
112 
113 	wcn->dxe_tx_l_ch.dxe_wq =  WCN36XX_DXE_WQ_TX_L;
114 	wcn->dxe_tx_h_ch.dxe_wq =  WCN36XX_DXE_WQ_TX_H;
115 
116 	wcn->dxe_tx_l_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_L_BD;
117 	wcn->dxe_tx_h_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_H_BD;
118 
119 	wcn->dxe_tx_l_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_L_SKB;
120 	wcn->dxe_tx_h_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_H_SKB;
121 
122 	wcn->dxe_tx_l_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_L;
123 	wcn->dxe_tx_h_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_H;
124 
125 	wcn->dxe_tx_l_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_L;
126 	wcn->dxe_tx_h_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_H;
127 
128 	/* DXE control block allocation */
129 	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_l_ch);
130 	if (ret)
131 		goto out_err;
132 	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_h_ch);
133 	if (ret)
134 		goto out_err;
135 	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_l_ch);
136 	if (ret)
137 		goto out_err;
138 	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_h_ch);
139 	if (ret)
140 		goto out_err;
141 
142 	/* Initialize SMSM state  Clear TX Enable RING EMPTY STATE */
143 	ret = wcn->ctrl_ops->smsm_change_state(
144 		WCN36XX_SMSM_WLAN_TX_ENABLE,
145 		WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY);
146 
147 	return 0;
148 
149 out_err:
150 	wcn36xx_err("Failed to allocate DXE control blocks\n");
151 	wcn36xx_dxe_free_ctl_blks(wcn);
152 	return -ENOMEM;
153 }
154 
155 void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn)
156 {
157 	wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_l_ch);
158 	wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_h_ch);
159 	wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_l_ch);
160 	wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_h_ch);
161 }
162 
163 static int wcn36xx_dxe_init_descs(struct wcn36xx_dxe_ch *wcn_ch)
164 {
165 	struct wcn36xx_dxe_desc *cur_dxe = NULL;
166 	struct wcn36xx_dxe_desc *prev_dxe = NULL;
167 	struct wcn36xx_dxe_ctl *cur_ctl = NULL;
168 	size_t size;
169 	int i;
170 
171 	size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
172 	wcn_ch->cpu_addr = dma_alloc_coherent(NULL, size, &wcn_ch->dma_addr,
173 					      GFP_KERNEL);
174 	if (!wcn_ch->cpu_addr)
175 		return -ENOMEM;
176 
177 	memset(wcn_ch->cpu_addr, 0, size);
178 
179 	cur_dxe = (struct wcn36xx_dxe_desc *)wcn_ch->cpu_addr;
180 	cur_ctl = wcn_ch->head_blk_ctl;
181 
182 	for (i = 0; i < wcn_ch->desc_num; i++) {
183 		cur_ctl->desc = cur_dxe;
184 		cur_ctl->desc_phy_addr = wcn_ch->dma_addr +
185 			i * sizeof(struct wcn36xx_dxe_desc);
186 
187 		switch (wcn_ch->ch_type) {
188 		case WCN36XX_DXE_CH_TX_L:
189 			cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_L;
190 			cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_L;
191 			break;
192 		case WCN36XX_DXE_CH_TX_H:
193 			cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_H;
194 			cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_H;
195 			break;
196 		case WCN36XX_DXE_CH_RX_L:
197 			cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
198 			cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_L;
199 			break;
200 		case WCN36XX_DXE_CH_RX_H:
201 			cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
202 			cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_H;
203 			break;
204 		}
205 		if (0 == i) {
206 			cur_dxe->phy_next_l = 0;
207 		} else if ((0 < i) && (i < wcn_ch->desc_num - 1)) {
208 			prev_dxe->phy_next_l =
209 				cur_ctl->desc_phy_addr;
210 		} else if (i == (wcn_ch->desc_num - 1)) {
211 			prev_dxe->phy_next_l =
212 				cur_ctl->desc_phy_addr;
213 			cur_dxe->phy_next_l =
214 				wcn_ch->head_blk_ctl->desc_phy_addr;
215 		}
216 		cur_ctl = cur_ctl->next;
217 		prev_dxe = cur_dxe;
218 		cur_dxe++;
219 	}
220 
221 	return 0;
222 }
223 
224 static void wcn36xx_dxe_init_tx_bd(struct wcn36xx_dxe_ch *ch,
225 				   struct wcn36xx_dxe_mem_pool *pool)
226 {
227 	int i, chunk_size = pool->chunk_size;
228 	dma_addr_t bd_phy_addr = pool->phy_addr;
229 	void *bd_cpu_addr = pool->virt_addr;
230 	struct wcn36xx_dxe_ctl *cur = ch->head_blk_ctl;
231 
232 	for (i = 0; i < ch->desc_num; i++) {
233 		/* Only every second dxe needs a bd pointer,
234 		   the other will point to the skb data */
235 		if (!(i & 1)) {
236 			cur->bd_phy_addr = bd_phy_addr;
237 			cur->bd_cpu_addr = bd_cpu_addr;
238 			bd_phy_addr += chunk_size;
239 			bd_cpu_addr += chunk_size;
240 		} else {
241 			cur->bd_phy_addr = 0;
242 			cur->bd_cpu_addr = NULL;
243 		}
244 		cur = cur->next;
245 	}
246 }
247 
248 static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
249 {
250 	int reg_data = 0;
251 
252 	wcn36xx_dxe_read_register(wcn,
253 				  WCN36XX_DXE_INT_MASK_REG,
254 				  &reg_data);
255 
256 	reg_data |= wcn_ch;
257 
258 	wcn36xx_dxe_write_register(wcn,
259 				   WCN36XX_DXE_INT_MASK_REG,
260 				   (int)reg_data);
261 	return 0;
262 }
263 
264 static int wcn36xx_dxe_fill_skb(struct wcn36xx_dxe_ctl *ctl)
265 {
266 	struct wcn36xx_dxe_desc *dxe = ctl->desc;
267 	struct sk_buff *skb;
268 
269 	skb = alloc_skb(WCN36XX_PKT_SIZE, GFP_ATOMIC);
270 	if (skb == NULL)
271 		return -ENOMEM;
272 
273 	dxe->dst_addr_l = dma_map_single(NULL,
274 					 skb_tail_pointer(skb),
275 					 WCN36XX_PKT_SIZE,
276 					 DMA_FROM_DEVICE);
277 	ctl->skb = skb;
278 
279 	return 0;
280 }
281 
282 static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn,
283 				    struct wcn36xx_dxe_ch *wcn_ch)
284 {
285 	int i;
286 	struct wcn36xx_dxe_ctl *cur_ctl = NULL;
287 
288 	cur_ctl = wcn_ch->head_blk_ctl;
289 
290 	for (i = 0; i < wcn_ch->desc_num; i++) {
291 		wcn36xx_dxe_fill_skb(cur_ctl);
292 		cur_ctl = cur_ctl->next;
293 	}
294 
295 	return 0;
296 }
297 
298 static void wcn36xx_dxe_ch_free_skbs(struct wcn36xx *wcn,
299 				     struct wcn36xx_dxe_ch *wcn_ch)
300 {
301 	struct wcn36xx_dxe_ctl *cur = wcn_ch->head_blk_ctl;
302 	int i;
303 
304 	for (i = 0; i < wcn_ch->desc_num; i++) {
305 		kfree_skb(cur->skb);
306 		cur = cur->next;
307 	}
308 }
309 
310 void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status)
311 {
312 	struct ieee80211_tx_info *info;
313 	struct sk_buff *skb;
314 	unsigned long flags;
315 
316 	spin_lock_irqsave(&wcn->dxe_lock, flags);
317 	skb = wcn->tx_ack_skb;
318 	wcn->tx_ack_skb = NULL;
319 	spin_unlock_irqrestore(&wcn->dxe_lock, flags);
320 
321 	if (!skb) {
322 		wcn36xx_warn("Spurious TX complete indication\n");
323 		return;
324 	}
325 
326 	info = IEEE80211_SKB_CB(skb);
327 
328 	if (status == 1)
329 		info->flags |= IEEE80211_TX_STAT_ACK;
330 
331 	wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ack status: %d\n", status);
332 
333 	ieee80211_tx_status_irqsafe(wcn->hw, skb);
334 	ieee80211_wake_queues(wcn->hw);
335 }
336 
337 static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
338 {
339 	struct wcn36xx_dxe_ctl *ctl = ch->tail_blk_ctl;
340 	struct ieee80211_tx_info *info;
341 	unsigned long flags;
342 
343 	/*
344 	 * Make at least one loop of do-while because in case ring is
345 	 * completely full head and tail are pointing to the same element
346 	 * and while-do will not make any cycles.
347 	 */
348 	do {
349 		if (ctl->skb) {
350 			dma_unmap_single(NULL, ctl->desc->src_addr_l,
351 					 ctl->skb->len, DMA_TO_DEVICE);
352 			info = IEEE80211_SKB_CB(ctl->skb);
353 			if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) {
354 				/* Keep frame until TX status comes */
355 				ieee80211_free_txskb(wcn->hw, ctl->skb);
356 			}
357 			spin_lock_irqsave(&ctl->skb_lock, flags);
358 			if (wcn->queues_stopped) {
359 				wcn->queues_stopped = false;
360 				ieee80211_wake_queues(wcn->hw);
361 			}
362 			spin_unlock_irqrestore(&ctl->skb_lock, flags);
363 
364 			ctl->skb = NULL;
365 		}
366 		ctl = ctl->next;
367 	} while (ctl != ch->head_blk_ctl &&
368 	       !(ctl->desc->ctrl & WCN36XX_DXE_CTRL_VALID_MASK));
369 
370 	ch->tail_blk_ctl = ctl;
371 }
372 
373 static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
374 {
375 	struct wcn36xx *wcn = (struct wcn36xx *)dev;
376 	int int_src, int_reason;
377 
378 	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
379 
380 	if (int_src & WCN36XX_INT_MASK_CHAN_TX_H) {
381 		wcn36xx_dxe_read_register(wcn,
382 					  WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H,
383 					  &int_reason);
384 
385 		/* TODO: Check int_reason */
386 
387 		wcn36xx_dxe_write_register(wcn,
388 					   WCN36XX_DXE_0_INT_CLR,
389 					   WCN36XX_INT_MASK_CHAN_TX_H);
390 
391 		wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ED_CLR,
392 					   WCN36XX_INT_MASK_CHAN_TX_H);
393 		wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high\n");
394 		reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch);
395 	}
396 
397 	if (int_src & WCN36XX_INT_MASK_CHAN_TX_L) {
398 		wcn36xx_dxe_read_register(wcn,
399 					  WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L,
400 					  &int_reason);
401 		/* TODO: Check int_reason */
402 
403 		wcn36xx_dxe_write_register(wcn,
404 					   WCN36XX_DXE_0_INT_CLR,
405 					   WCN36XX_INT_MASK_CHAN_TX_L);
406 
407 		wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ED_CLR,
408 					   WCN36XX_INT_MASK_CHAN_TX_L);
409 		wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low\n");
410 		reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch);
411 	}
412 
413 	return IRQ_HANDLED;
414 }
415 
416 static irqreturn_t wcn36xx_irq_rx_ready(int irq, void *dev)
417 {
418 	struct wcn36xx *wcn = (struct wcn36xx *)dev;
419 
420 	disable_irq_nosync(wcn->rx_irq);
421 	wcn36xx_dxe_rx_frame(wcn);
422 	enable_irq(wcn->rx_irq);
423 	return IRQ_HANDLED;
424 }
425 
426 static int wcn36xx_dxe_request_irqs(struct wcn36xx *wcn)
427 {
428 	int ret;
429 
430 	ret = request_irq(wcn->tx_irq, wcn36xx_irq_tx_complete,
431 			  IRQF_TRIGGER_HIGH, "wcn36xx_tx", wcn);
432 	if (ret) {
433 		wcn36xx_err("failed to alloc tx irq\n");
434 		goto out_err;
435 	}
436 
437 	ret = request_irq(wcn->rx_irq, wcn36xx_irq_rx_ready, IRQF_TRIGGER_HIGH,
438 			  "wcn36xx_rx", wcn);
439 	if (ret) {
440 		wcn36xx_err("failed to alloc rx irq\n");
441 		goto out_txirq;
442 	}
443 
444 	enable_irq_wake(wcn->rx_irq);
445 
446 	return 0;
447 
448 out_txirq:
449 	free_irq(wcn->tx_irq, wcn);
450 out_err:
451 	return ret;
452 
453 }
454 
455 static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
456 				     struct wcn36xx_dxe_ch *ch)
457 {
458 	struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl;
459 	struct wcn36xx_dxe_desc *dxe = ctl->desc;
460 	dma_addr_t  dma_addr;
461 	struct sk_buff *skb;
462 
463 	while (!(dxe->ctrl & WCN36XX_DXE_CTRL_VALID_MASK)) {
464 		skb = ctl->skb;
465 		dma_addr = dxe->dst_addr_l;
466 		wcn36xx_dxe_fill_skb(ctl);
467 
468 		switch (ch->ch_type) {
469 		case WCN36XX_DXE_CH_RX_L:
470 			dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
471 			wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR,
472 						   WCN36XX_DXE_INT_CH1_MASK);
473 			break;
474 		case WCN36XX_DXE_CH_RX_H:
475 			dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
476 			wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR,
477 						   WCN36XX_DXE_INT_CH3_MASK);
478 			break;
479 		default:
480 			wcn36xx_warn("Unknown channel\n");
481 		}
482 
483 		dma_unmap_single(NULL, dma_addr, WCN36XX_PKT_SIZE,
484 				 DMA_FROM_DEVICE);
485 		wcn36xx_rx_skb(wcn, skb);
486 		ctl = ctl->next;
487 		dxe = ctl->desc;
488 	}
489 
490 	ch->head_blk_ctl = ctl;
491 
492 	return 0;
493 }
494 
495 void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn)
496 {
497 	int int_src;
498 
499 	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
500 
501 	/* RX_LOW_PRI */
502 	if (int_src & WCN36XX_DXE_INT_CH1_MASK) {
503 		wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
504 					   WCN36XX_DXE_INT_CH1_MASK);
505 		wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_l_ch));
506 	}
507 
508 	/* RX_HIGH_PRI */
509 	if (int_src & WCN36XX_DXE_INT_CH3_MASK) {
510 		/* Clean up all the INT within this channel */
511 		wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
512 					   WCN36XX_DXE_INT_CH3_MASK);
513 		wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_h_ch));
514 	}
515 
516 	if (!int_src)
517 		wcn36xx_warn("No DXE interrupt pending\n");
518 }
519 
520 int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
521 {
522 	size_t s;
523 	void *cpu_addr;
524 
525 	/* Allocate BD headers for MGMT frames */
526 
527 	/* Where this come from ask QC */
528 	wcn->mgmt_mem_pool.chunk_size =	WCN36XX_BD_CHUNK_SIZE +
529 		16 - (WCN36XX_BD_CHUNK_SIZE % 8);
530 
531 	s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
532 	cpu_addr = dma_alloc_coherent(NULL, s, &wcn->mgmt_mem_pool.phy_addr,
533 				      GFP_KERNEL);
534 	if (!cpu_addr)
535 		goto out_err;
536 
537 	wcn->mgmt_mem_pool.virt_addr = cpu_addr;
538 	memset(cpu_addr, 0, s);
539 
540 	/* Allocate BD headers for DATA frames */
541 
542 	/* Where this come from ask QC */
543 	wcn->data_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
544 		16 - (WCN36XX_BD_CHUNK_SIZE % 8);
545 
546 	s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
547 	cpu_addr = dma_alloc_coherent(NULL, s, &wcn->data_mem_pool.phy_addr,
548 				      GFP_KERNEL);
549 	if (!cpu_addr)
550 		goto out_err;
551 
552 	wcn->data_mem_pool.virt_addr = cpu_addr;
553 	memset(cpu_addr, 0, s);
554 
555 	return 0;
556 
557 out_err:
558 	wcn36xx_dxe_free_mem_pools(wcn);
559 	wcn36xx_err("Failed to allocate BD mempool\n");
560 	return -ENOMEM;
561 }
562 
563 void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn)
564 {
565 	if (wcn->mgmt_mem_pool.virt_addr)
566 		dma_free_coherent(NULL, wcn->mgmt_mem_pool.chunk_size *
567 				  WCN36XX_DXE_CH_DESC_NUMB_TX_H,
568 				  wcn->mgmt_mem_pool.virt_addr,
569 				  wcn->mgmt_mem_pool.phy_addr);
570 
571 	if (wcn->data_mem_pool.virt_addr) {
572 		dma_free_coherent(NULL, wcn->data_mem_pool.chunk_size *
573 				  WCN36XX_DXE_CH_DESC_NUMB_TX_L,
574 				  wcn->data_mem_pool.virt_addr,
575 				  wcn->data_mem_pool.phy_addr);
576 	}
577 }
578 
579 int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
580 			 struct wcn36xx_vif *vif_priv,
581 			 struct sk_buff *skb,
582 			 bool is_low)
583 {
584 	struct wcn36xx_dxe_ctl *ctl = NULL;
585 	struct wcn36xx_dxe_desc *desc = NULL;
586 	struct wcn36xx_dxe_ch *ch = NULL;
587 	unsigned long flags;
588 
589 	ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch;
590 
591 	ctl = ch->head_blk_ctl;
592 
593 	spin_lock_irqsave(&ctl->next->skb_lock, flags);
594 
595 	/*
596 	 * If skb is not null that means that we reached the tail of the ring
597 	 * hence ring is full. Stop queues to let mac80211 back off until ring
598 	 * has an empty slot again.
599 	 */
600 	if (NULL != ctl->next->skb) {
601 		ieee80211_stop_queues(wcn->hw);
602 		wcn->queues_stopped = true;
603 		spin_unlock_irqrestore(&ctl->next->skb_lock, flags);
604 		return -EBUSY;
605 	}
606 	spin_unlock_irqrestore(&ctl->next->skb_lock, flags);
607 
608 	ctl->skb = NULL;
609 	desc = ctl->desc;
610 
611 	/* Set source address of the BD we send */
612 	desc->src_addr_l = ctl->bd_phy_addr;
613 
614 	desc->dst_addr_l = ch->dxe_wq;
615 	desc->fr_len = sizeof(struct wcn36xx_tx_bd);
616 	desc->ctrl = ch->ctrl_bd;
617 
618 	wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX\n");
619 
620 	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ",
621 			 (char *)desc, sizeof(*desc));
622 	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP,
623 			 "BD   >>> ", (char *)ctl->bd_cpu_addr,
624 			 sizeof(struct wcn36xx_tx_bd));
625 
626 	/* Set source address of the SKB we send */
627 	ctl = ctl->next;
628 	ctl->skb = skb;
629 	desc = ctl->desc;
630 	if (ctl->bd_cpu_addr) {
631 		wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
632 		return -EINVAL;
633 	}
634 
635 	desc->src_addr_l = dma_map_single(NULL,
636 					  ctl->skb->data,
637 					  ctl->skb->len,
638 					  DMA_TO_DEVICE);
639 
640 	desc->dst_addr_l = ch->dxe_wq;
641 	desc->fr_len = ctl->skb->len;
642 
643 	/* set dxe descriptor to VALID */
644 	desc->ctrl = ch->ctrl_skb;
645 
646 	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC2 >>> ",
647 			 (char *)desc, sizeof(*desc));
648 	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "SKB   >>> ",
649 			 (char *)ctl->skb->data, ctl->skb->len);
650 
651 	/* Move the head of the ring to the next empty descriptor */
652 	 ch->head_blk_ctl = ctl->next;
653 
654 	/*
655 	 * When connected and trying to send data frame chip can be in sleep
656 	 * mode and writing to the register will not wake up the chip. Instead
657 	 * notify chip about new frame through SMSM bus.
658 	 */
659 	if (is_low &&  vif_priv->pw_state == WCN36XX_BMPS) {
660 		wcn->ctrl_ops->smsm_change_state(
661 				  0,
662 				  WCN36XX_SMSM_WLAN_TX_ENABLE);
663 	} else {
664 		/* indicate End Of Packet and generate interrupt on descriptor
665 		 * done.
666 		 */
667 		wcn36xx_dxe_write_register(wcn,
668 			ch->reg_ctrl, ch->def_ctrl);
669 	}
670 
671 	return 0;
672 }
673 
674 int wcn36xx_dxe_init(struct wcn36xx *wcn)
675 {
676 	int reg_data = 0, ret;
677 
678 	reg_data = WCN36XX_DXE_REG_RESET;
679 	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
680 
681 	/* Setting interrupt path */
682 	reg_data = WCN36XX_DXE_CCU_INT;
683 	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CCU_INT, reg_data);
684 
685 	/***************************************/
686 	/* Init descriptors for TX LOW channel */
687 	/***************************************/
688 	wcn36xx_dxe_init_descs(&wcn->dxe_tx_l_ch);
689 	wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_l_ch, &wcn->data_mem_pool);
690 
691 	/* Write channel head to a NEXT register */
692 	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L,
693 		wcn->dxe_tx_l_ch.head_blk_ctl->desc_phy_addr);
694 
695 	/* Program DMA destination addr for TX LOW */
696 	wcn36xx_dxe_write_register(wcn,
697 		WCN36XX_DXE_CH_DEST_ADDR_TX_L,
698 		WCN36XX_DXE_WQ_TX_L);
699 
700 	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
701 	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
702 
703 	/***************************************/
704 	/* Init descriptors for TX HIGH channel */
705 	/***************************************/
706 	wcn36xx_dxe_init_descs(&wcn->dxe_tx_h_ch);
707 	wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_h_ch, &wcn->mgmt_mem_pool);
708 
709 	/* Write channel head to a NEXT register */
710 	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H,
711 		wcn->dxe_tx_h_ch.head_blk_ctl->desc_phy_addr);
712 
713 	/* Program DMA destination addr for TX HIGH */
714 	wcn36xx_dxe_write_register(wcn,
715 		WCN36XX_DXE_CH_DEST_ADDR_TX_H,
716 		WCN36XX_DXE_WQ_TX_H);
717 
718 	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
719 
720 	/* Enable channel interrupts */
721 	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
722 
723 	/***************************************/
724 	/* Init descriptors for RX LOW channel */
725 	/***************************************/
726 	wcn36xx_dxe_init_descs(&wcn->dxe_rx_l_ch);
727 
728 	/* For RX we need to preallocated buffers */
729 	wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch);
730 
731 	/* Write channel head to a NEXT register */
732 	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L,
733 		wcn->dxe_rx_l_ch.head_blk_ctl->desc_phy_addr);
734 
735 	/* Write DMA source address */
736 	wcn36xx_dxe_write_register(wcn,
737 		WCN36XX_DXE_CH_SRC_ADDR_RX_L,
738 		WCN36XX_DXE_WQ_RX_L);
739 
740 	/* Program preallocated destination address */
741 	wcn36xx_dxe_write_register(wcn,
742 		WCN36XX_DXE_CH_DEST_ADDR_RX_L,
743 		wcn->dxe_rx_l_ch.head_blk_ctl->desc->phy_next_l);
744 
745 	/* Enable default control registers */
746 	wcn36xx_dxe_write_register(wcn,
747 		WCN36XX_DXE_REG_CTL_RX_L,
748 		WCN36XX_DXE_CH_DEFAULT_CTL_RX_L);
749 
750 	/* Enable channel interrupts */
751 	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
752 
753 	/***************************************/
754 	/* Init descriptors for RX HIGH channel */
755 	/***************************************/
756 	wcn36xx_dxe_init_descs(&wcn->dxe_rx_h_ch);
757 
758 	/* For RX we need to prealocat buffers */
759 	wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_h_ch);
760 
761 	/* Write chanel head to a NEXT register */
762 	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H,
763 		wcn->dxe_rx_h_ch.head_blk_ctl->desc_phy_addr);
764 
765 	/* Write DMA source address */
766 	wcn36xx_dxe_write_register(wcn,
767 		WCN36XX_DXE_CH_SRC_ADDR_RX_H,
768 		WCN36XX_DXE_WQ_RX_H);
769 
770 	/* Program preallocated destination address */
771 	wcn36xx_dxe_write_register(wcn,
772 		WCN36XX_DXE_CH_DEST_ADDR_RX_H,
773 		 wcn->dxe_rx_h_ch.head_blk_ctl->desc->phy_next_l);
774 
775 	/* Enable default control registers */
776 	wcn36xx_dxe_write_register(wcn,
777 		WCN36XX_DXE_REG_CTL_RX_H,
778 		WCN36XX_DXE_CH_DEFAULT_CTL_RX_H);
779 
780 	/* Enable channel interrupts */
781 	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
782 
783 	ret = wcn36xx_dxe_request_irqs(wcn);
784 	if (ret < 0)
785 		goto out_err;
786 
787 	return 0;
788 
789 out_err:
790 	return ret;
791 }
792 
793 void wcn36xx_dxe_deinit(struct wcn36xx *wcn)
794 {
795 	free_irq(wcn->tx_irq, wcn);
796 	free_irq(wcn->rx_irq, wcn);
797 
798 	if (wcn->tx_ack_skb) {
799 		ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
800 		wcn->tx_ack_skb = NULL;
801 	}
802 
803 	wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_l_ch);
804 	wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_h_ch);
805 }
806