1 /*
2  * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11  * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13  * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 /* DXE - DMA transfer engine
18  * we have 2 channels(High prio and Low prio) for TX and 2 channels for RX.
19  * through low channels data packets are transfered
20  * through high channels managment packets are transfered
21  */
22 
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24 
25 #include <linux/interrupt.h>
26 #include <linux/soc/qcom/smem_state.h>
27 #include "wcn36xx.h"
28 #include "txrx.h"
29 
30 void *wcn36xx_dxe_get_next_bd(struct wcn36xx *wcn, bool is_low)
31 {
32 	struct wcn36xx_dxe_ch *ch = is_low ?
33 		&wcn->dxe_tx_l_ch :
34 		&wcn->dxe_tx_h_ch;
35 
36 	return ch->head_blk_ctl->bd_cpu_addr;
37 }
38 
39 static void wcn36xx_ccu_write_register(struct wcn36xx *wcn, int addr, int data)
40 {
41 	wcn36xx_dbg(WCN36XX_DBG_DXE,
42 		    "wcn36xx_ccu_write_register: addr=%x, data=%x\n",
43 		    addr, data);
44 
45 	writel(data, wcn->ccu_base + addr);
46 }
47 
48 static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
49 {
50 	wcn36xx_dbg(WCN36XX_DBG_DXE,
51 		    "wcn36xx_dxe_write_register: addr=%x, data=%x\n",
52 		    addr, data);
53 
54 	writel(data, wcn->dxe_base + addr);
55 }
56 
57 static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data)
58 {
59 	*data = readl(wcn->dxe_base + addr);
60 
61 	wcn36xx_dbg(WCN36XX_DBG_DXE,
62 		    "wcn36xx_dxe_read_register: addr=%x, data=%x\n",
63 		    addr, *data);
64 }
65 
66 static void wcn36xx_dxe_free_ctl_block(struct wcn36xx_dxe_ch *ch)
67 {
68 	struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl, *next;
69 	int i;
70 
71 	for (i = 0; i < ch->desc_num && ctl; i++) {
72 		next = ctl->next;
73 		kfree(ctl);
74 		ctl = next;
75 	}
76 }
77 
78 static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch)
79 {
80 	struct wcn36xx_dxe_ctl *prev_ctl = NULL;
81 	struct wcn36xx_dxe_ctl *cur_ctl = NULL;
82 	int i;
83 
84 	spin_lock_init(&ch->lock);
85 	for (i = 0; i < ch->desc_num; i++) {
86 		cur_ctl = kzalloc(sizeof(*cur_ctl), GFP_KERNEL);
87 		if (!cur_ctl)
88 			goto out_fail;
89 
90 		spin_lock_init(&cur_ctl->skb_lock);
91 		cur_ctl->ctl_blk_order = i;
92 		if (i == 0) {
93 			ch->head_blk_ctl = cur_ctl;
94 			ch->tail_blk_ctl = cur_ctl;
95 		} else if (ch->desc_num - 1 == i) {
96 			prev_ctl->next = cur_ctl;
97 			cur_ctl->next = ch->head_blk_ctl;
98 		} else {
99 			prev_ctl->next = cur_ctl;
100 		}
101 		prev_ctl = cur_ctl;
102 	}
103 
104 	return 0;
105 
106 out_fail:
107 	wcn36xx_dxe_free_ctl_block(ch);
108 	return -ENOMEM;
109 }
110 
111 int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn)
112 {
113 	int ret;
114 
115 	wcn->dxe_tx_l_ch.ch_type = WCN36XX_DXE_CH_TX_L;
116 	wcn->dxe_tx_h_ch.ch_type = WCN36XX_DXE_CH_TX_H;
117 	wcn->dxe_rx_l_ch.ch_type = WCN36XX_DXE_CH_RX_L;
118 	wcn->dxe_rx_h_ch.ch_type = WCN36XX_DXE_CH_RX_H;
119 
120 	wcn->dxe_tx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_L;
121 	wcn->dxe_tx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_H;
122 	wcn->dxe_rx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_L;
123 	wcn->dxe_rx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_H;
124 
125 	wcn->dxe_tx_l_ch.dxe_wq =  WCN36XX_DXE_WQ_TX_L;
126 	wcn->dxe_tx_h_ch.dxe_wq =  WCN36XX_DXE_WQ_TX_H;
127 
128 	wcn->dxe_tx_l_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_L_BD;
129 	wcn->dxe_tx_h_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_H_BD;
130 
131 	wcn->dxe_tx_l_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_L_SKB;
132 	wcn->dxe_tx_h_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_H_SKB;
133 
134 	wcn->dxe_tx_l_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_L;
135 	wcn->dxe_tx_h_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_H;
136 
137 	wcn->dxe_tx_l_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_L;
138 	wcn->dxe_tx_h_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_H;
139 
140 	/* DXE control block allocation */
141 	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_l_ch);
142 	if (ret)
143 		goto out_err;
144 	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_h_ch);
145 	if (ret)
146 		goto out_err;
147 	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_l_ch);
148 	if (ret)
149 		goto out_err;
150 	ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_h_ch);
151 	if (ret)
152 		goto out_err;
153 
154 	/* Initialize SMSM state  Clear TX Enable RING EMPTY STATE */
155 	ret = qcom_smem_state_update_bits(wcn->tx_enable_state,
156 					  WCN36XX_SMSM_WLAN_TX_ENABLE |
157 					  WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY,
158 					  WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY);
159 	if (ret)
160 		goto out_err;
161 
162 	return 0;
163 
164 out_err:
165 	wcn36xx_err("Failed to allocate DXE control blocks\n");
166 	wcn36xx_dxe_free_ctl_blks(wcn);
167 	return -ENOMEM;
168 }
169 
170 void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn)
171 {
172 	wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_l_ch);
173 	wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_h_ch);
174 	wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_l_ch);
175 	wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_h_ch);
176 }
177 
178 static int wcn36xx_dxe_init_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn_ch)
179 {
180 	struct wcn36xx_dxe_desc *cur_dxe = NULL;
181 	struct wcn36xx_dxe_desc *prev_dxe = NULL;
182 	struct wcn36xx_dxe_ctl *cur_ctl = NULL;
183 	size_t size;
184 	int i;
185 
186 	size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
187 	wcn_ch->cpu_addr = dma_alloc_coherent(dev, size, &wcn_ch->dma_addr,
188 					      GFP_KERNEL);
189 	if (!wcn_ch->cpu_addr)
190 		return -ENOMEM;
191 
192 	memset(wcn_ch->cpu_addr, 0, size);
193 
194 	cur_dxe = (struct wcn36xx_dxe_desc *)wcn_ch->cpu_addr;
195 	cur_ctl = wcn_ch->head_blk_ctl;
196 
197 	for (i = 0; i < wcn_ch->desc_num; i++) {
198 		cur_ctl->desc = cur_dxe;
199 		cur_ctl->desc_phy_addr = wcn_ch->dma_addr +
200 			i * sizeof(struct wcn36xx_dxe_desc);
201 
202 		switch (wcn_ch->ch_type) {
203 		case WCN36XX_DXE_CH_TX_L:
204 			cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_L;
205 			cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_L;
206 			break;
207 		case WCN36XX_DXE_CH_TX_H:
208 			cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_H;
209 			cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_H;
210 			break;
211 		case WCN36XX_DXE_CH_RX_L:
212 			cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
213 			cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_L;
214 			break;
215 		case WCN36XX_DXE_CH_RX_H:
216 			cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
217 			cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_H;
218 			break;
219 		}
220 		if (0 == i) {
221 			cur_dxe->phy_next_l = 0;
222 		} else if ((0 < i) && (i < wcn_ch->desc_num - 1)) {
223 			prev_dxe->phy_next_l =
224 				cur_ctl->desc_phy_addr;
225 		} else if (i == (wcn_ch->desc_num - 1)) {
226 			prev_dxe->phy_next_l =
227 				cur_ctl->desc_phy_addr;
228 			cur_dxe->phy_next_l =
229 				wcn_ch->head_blk_ctl->desc_phy_addr;
230 		}
231 		cur_ctl = cur_ctl->next;
232 		prev_dxe = cur_dxe;
233 		cur_dxe++;
234 	}
235 
236 	return 0;
237 }
238 
239 static void wcn36xx_dxe_init_tx_bd(struct wcn36xx_dxe_ch *ch,
240 				   struct wcn36xx_dxe_mem_pool *pool)
241 {
242 	int i, chunk_size = pool->chunk_size;
243 	dma_addr_t bd_phy_addr = pool->phy_addr;
244 	void *bd_cpu_addr = pool->virt_addr;
245 	struct wcn36xx_dxe_ctl *cur = ch->head_blk_ctl;
246 
247 	for (i = 0; i < ch->desc_num; i++) {
248 		/* Only every second dxe needs a bd pointer,
249 		   the other will point to the skb data */
250 		if (!(i & 1)) {
251 			cur->bd_phy_addr = bd_phy_addr;
252 			cur->bd_cpu_addr = bd_cpu_addr;
253 			bd_phy_addr += chunk_size;
254 			bd_cpu_addr += chunk_size;
255 		} else {
256 			cur->bd_phy_addr = 0;
257 			cur->bd_cpu_addr = NULL;
258 		}
259 		cur = cur->next;
260 	}
261 }
262 
263 static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
264 {
265 	int reg_data = 0;
266 
267 	wcn36xx_dxe_read_register(wcn,
268 				  WCN36XX_DXE_INT_MASK_REG,
269 				  &reg_data);
270 
271 	reg_data |= wcn_ch;
272 
273 	wcn36xx_dxe_write_register(wcn,
274 				   WCN36XX_DXE_INT_MASK_REG,
275 				   (int)reg_data);
276 	return 0;
277 }
278 
279 static int wcn36xx_dxe_fill_skb(struct device *dev, struct wcn36xx_dxe_ctl *ctl)
280 {
281 	struct wcn36xx_dxe_desc *dxe = ctl->desc;
282 	struct sk_buff *skb;
283 
284 	skb = alloc_skb(WCN36XX_PKT_SIZE, GFP_ATOMIC);
285 	if (skb == NULL)
286 		return -ENOMEM;
287 
288 	dxe->dst_addr_l = dma_map_single(dev,
289 					 skb_tail_pointer(skb),
290 					 WCN36XX_PKT_SIZE,
291 					 DMA_FROM_DEVICE);
292 	ctl->skb = skb;
293 
294 	return 0;
295 }
296 
297 static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn,
298 				    struct wcn36xx_dxe_ch *wcn_ch)
299 {
300 	int i;
301 	struct wcn36xx_dxe_ctl *cur_ctl = NULL;
302 
303 	cur_ctl = wcn_ch->head_blk_ctl;
304 
305 	for (i = 0; i < wcn_ch->desc_num; i++) {
306 		wcn36xx_dxe_fill_skb(wcn->dev, cur_ctl);
307 		cur_ctl = cur_ctl->next;
308 	}
309 
310 	return 0;
311 }
312 
313 static void wcn36xx_dxe_ch_free_skbs(struct wcn36xx *wcn,
314 				     struct wcn36xx_dxe_ch *wcn_ch)
315 {
316 	struct wcn36xx_dxe_ctl *cur = wcn_ch->head_blk_ctl;
317 	int i;
318 
319 	for (i = 0; i < wcn_ch->desc_num; i++) {
320 		kfree_skb(cur->skb);
321 		cur = cur->next;
322 	}
323 }
324 
325 void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status)
326 {
327 	struct ieee80211_tx_info *info;
328 	struct sk_buff *skb;
329 	unsigned long flags;
330 
331 	spin_lock_irqsave(&wcn->dxe_lock, flags);
332 	skb = wcn->tx_ack_skb;
333 	wcn->tx_ack_skb = NULL;
334 	spin_unlock_irqrestore(&wcn->dxe_lock, flags);
335 
336 	if (!skb) {
337 		wcn36xx_warn("Spurious TX complete indication\n");
338 		return;
339 	}
340 
341 	info = IEEE80211_SKB_CB(skb);
342 
343 	if (status == 1)
344 		info->flags |= IEEE80211_TX_STAT_ACK;
345 
346 	wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ack status: %d\n", status);
347 
348 	ieee80211_tx_status_irqsafe(wcn->hw, skb);
349 	ieee80211_wake_queues(wcn->hw);
350 }
351 
352 static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
353 {
354 	struct wcn36xx_dxe_ctl *ctl;
355 	struct ieee80211_tx_info *info;
356 	unsigned long flags;
357 
358 	/*
359 	 * Make at least one loop of do-while because in case ring is
360 	 * completely full head and tail are pointing to the same element
361 	 * and while-do will not make any cycles.
362 	 */
363 	spin_lock_irqsave(&ch->lock, flags);
364 	ctl = ch->tail_blk_ctl;
365 	do {
366 		if (ctl->desc->ctrl & WCN36XX_DXE_CTRL_VALID_MASK)
367 			break;
368 		if (ctl->skb) {
369 			dma_unmap_single(wcn->dev, ctl->desc->src_addr_l,
370 					 ctl->skb->len, DMA_TO_DEVICE);
371 			info = IEEE80211_SKB_CB(ctl->skb);
372 			if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) {
373 				/* Keep frame until TX status comes */
374 				ieee80211_free_txskb(wcn->hw, ctl->skb);
375 			}
376 			spin_lock(&ctl->skb_lock);
377 			if (wcn->queues_stopped) {
378 				wcn->queues_stopped = false;
379 				ieee80211_wake_queues(wcn->hw);
380 			}
381 			spin_unlock(&ctl->skb_lock);
382 
383 			ctl->skb = NULL;
384 		}
385 		ctl = ctl->next;
386 	} while (ctl != ch->head_blk_ctl &&
387 	       !(ctl->desc->ctrl & WCN36XX_DXE_CTRL_VALID_MASK));
388 
389 	ch->tail_blk_ctl = ctl;
390 	spin_unlock_irqrestore(&ch->lock, flags);
391 }
392 
393 static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
394 {
395 	struct wcn36xx *wcn = (struct wcn36xx *)dev;
396 	int int_src, int_reason;
397 
398 	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
399 
400 	if (int_src & WCN36XX_INT_MASK_CHAN_TX_H) {
401 		wcn36xx_dxe_read_register(wcn,
402 					  WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H,
403 					  &int_reason);
404 
405 		/* TODO: Check int_reason */
406 
407 		wcn36xx_dxe_write_register(wcn,
408 					   WCN36XX_DXE_0_INT_CLR,
409 					   WCN36XX_INT_MASK_CHAN_TX_H);
410 
411 		wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ED_CLR,
412 					   WCN36XX_INT_MASK_CHAN_TX_H);
413 		wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high\n");
414 		reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch);
415 	}
416 
417 	if (int_src & WCN36XX_INT_MASK_CHAN_TX_L) {
418 		wcn36xx_dxe_read_register(wcn,
419 					  WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L,
420 					  &int_reason);
421 		/* TODO: Check int_reason */
422 
423 		wcn36xx_dxe_write_register(wcn,
424 					   WCN36XX_DXE_0_INT_CLR,
425 					   WCN36XX_INT_MASK_CHAN_TX_L);
426 
427 		wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ED_CLR,
428 					   WCN36XX_INT_MASK_CHAN_TX_L);
429 		wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low\n");
430 		reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch);
431 	}
432 
433 	return IRQ_HANDLED;
434 }
435 
436 static irqreturn_t wcn36xx_irq_rx_ready(int irq, void *dev)
437 {
438 	struct wcn36xx *wcn = (struct wcn36xx *)dev;
439 
440 	disable_irq_nosync(wcn->rx_irq);
441 	wcn36xx_dxe_rx_frame(wcn);
442 	enable_irq(wcn->rx_irq);
443 	return IRQ_HANDLED;
444 }
445 
446 static int wcn36xx_dxe_request_irqs(struct wcn36xx *wcn)
447 {
448 	int ret;
449 
450 	ret = request_irq(wcn->tx_irq, wcn36xx_irq_tx_complete,
451 			  IRQF_TRIGGER_HIGH, "wcn36xx_tx", wcn);
452 	if (ret) {
453 		wcn36xx_err("failed to alloc tx irq\n");
454 		goto out_err;
455 	}
456 
457 	ret = request_irq(wcn->rx_irq, wcn36xx_irq_rx_ready, IRQF_TRIGGER_HIGH,
458 			  "wcn36xx_rx", wcn);
459 	if (ret) {
460 		wcn36xx_err("failed to alloc rx irq\n");
461 		goto out_txirq;
462 	}
463 
464 	enable_irq_wake(wcn->rx_irq);
465 
466 	return 0;
467 
468 out_txirq:
469 	free_irq(wcn->tx_irq, wcn);
470 out_err:
471 	return ret;
472 
473 }
474 
475 static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
476 				     struct wcn36xx_dxe_ch *ch)
477 {
478 	struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl;
479 	struct wcn36xx_dxe_desc *dxe = ctl->desc;
480 	dma_addr_t  dma_addr;
481 	struct sk_buff *skb;
482 	int ret = 0, int_mask;
483 	u32 value;
484 
485 	if (ch->ch_type == WCN36XX_DXE_CH_RX_L) {
486 		value = WCN36XX_DXE_CTRL_RX_L;
487 		int_mask = WCN36XX_DXE_INT_CH1_MASK;
488 	} else {
489 		value = WCN36XX_DXE_CTRL_RX_H;
490 		int_mask = WCN36XX_DXE_INT_CH3_MASK;
491 	}
492 
493 	while (!(dxe->ctrl & WCN36XX_DXE_CTRL_VALID_MASK)) {
494 		skb = ctl->skb;
495 		dma_addr = dxe->dst_addr_l;
496 		ret = wcn36xx_dxe_fill_skb(wcn->dev, ctl);
497 		if (0 == ret) {
498 			/* new skb allocation ok. Use the new one and queue
499 			 * the old one to network system.
500 			 */
501 			dma_unmap_single(wcn->dev, dma_addr, WCN36XX_PKT_SIZE,
502 					DMA_FROM_DEVICE);
503 			wcn36xx_rx_skb(wcn, skb);
504 		} /* else keep old skb not submitted and use it for rx DMA */
505 
506 		dxe->ctrl = value;
507 		ctl = ctl->next;
508 		dxe = ctl->desc;
509 	}
510 	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR, int_mask);
511 
512 	ch->head_blk_ctl = ctl;
513 	return 0;
514 }
515 
516 void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn)
517 {
518 	int int_src;
519 
520 	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
521 
522 	/* RX_LOW_PRI */
523 	if (int_src & WCN36XX_DXE_INT_CH1_MASK) {
524 		wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
525 					   WCN36XX_DXE_INT_CH1_MASK);
526 		wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_l_ch));
527 	}
528 
529 	/* RX_HIGH_PRI */
530 	if (int_src & WCN36XX_DXE_INT_CH3_MASK) {
531 		/* Clean up all the INT within this channel */
532 		wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
533 					   WCN36XX_DXE_INT_CH3_MASK);
534 		wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_h_ch));
535 	}
536 
537 	if (!int_src)
538 		wcn36xx_warn("No DXE interrupt pending\n");
539 }
540 
541 int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
542 {
543 	size_t s;
544 	void *cpu_addr;
545 
546 	/* Allocate BD headers for MGMT frames */
547 
548 	/* Where this come from ask QC */
549 	wcn->mgmt_mem_pool.chunk_size =	WCN36XX_BD_CHUNK_SIZE +
550 		16 - (WCN36XX_BD_CHUNK_SIZE % 8);
551 
552 	s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
553 	cpu_addr = dma_alloc_coherent(wcn->dev, s, &wcn->mgmt_mem_pool.phy_addr,
554 				      GFP_KERNEL);
555 	if (!cpu_addr)
556 		goto out_err;
557 
558 	wcn->mgmt_mem_pool.virt_addr = cpu_addr;
559 	memset(cpu_addr, 0, s);
560 
561 	/* Allocate BD headers for DATA frames */
562 
563 	/* Where this come from ask QC */
564 	wcn->data_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
565 		16 - (WCN36XX_BD_CHUNK_SIZE % 8);
566 
567 	s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
568 	cpu_addr = dma_alloc_coherent(wcn->dev, s, &wcn->data_mem_pool.phy_addr,
569 				      GFP_KERNEL);
570 	if (!cpu_addr)
571 		goto out_err;
572 
573 	wcn->data_mem_pool.virt_addr = cpu_addr;
574 	memset(cpu_addr, 0, s);
575 
576 	return 0;
577 
578 out_err:
579 	wcn36xx_dxe_free_mem_pools(wcn);
580 	wcn36xx_err("Failed to allocate BD mempool\n");
581 	return -ENOMEM;
582 }
583 
584 void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn)
585 {
586 	if (wcn->mgmt_mem_pool.virt_addr)
587 		dma_free_coherent(wcn->dev, wcn->mgmt_mem_pool.chunk_size *
588 				  WCN36XX_DXE_CH_DESC_NUMB_TX_H,
589 				  wcn->mgmt_mem_pool.virt_addr,
590 				  wcn->mgmt_mem_pool.phy_addr);
591 
592 	if (wcn->data_mem_pool.virt_addr) {
593 		dma_free_coherent(wcn->dev, wcn->data_mem_pool.chunk_size *
594 				  WCN36XX_DXE_CH_DESC_NUMB_TX_L,
595 				  wcn->data_mem_pool.virt_addr,
596 				  wcn->data_mem_pool.phy_addr);
597 	}
598 }
599 
600 int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
601 			 struct wcn36xx_vif *vif_priv,
602 			 struct sk_buff *skb,
603 			 bool is_low)
604 {
605 	struct wcn36xx_dxe_ctl *ctl = NULL;
606 	struct wcn36xx_dxe_desc *desc = NULL;
607 	struct wcn36xx_dxe_ch *ch = NULL;
608 	unsigned long flags;
609 	int ret;
610 
611 	ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch;
612 
613 	spin_lock_irqsave(&ch->lock, flags);
614 	ctl = ch->head_blk_ctl;
615 
616 	spin_lock(&ctl->next->skb_lock);
617 
618 	/*
619 	 * If skb is not null that means that we reached the tail of the ring
620 	 * hence ring is full. Stop queues to let mac80211 back off until ring
621 	 * has an empty slot again.
622 	 */
623 	if (NULL != ctl->next->skb) {
624 		ieee80211_stop_queues(wcn->hw);
625 		wcn->queues_stopped = true;
626 		spin_unlock(&ctl->next->skb_lock);
627 		spin_unlock_irqrestore(&ch->lock, flags);
628 		return -EBUSY;
629 	}
630 	spin_unlock(&ctl->next->skb_lock);
631 
632 	ctl->skb = NULL;
633 	desc = ctl->desc;
634 
635 	/* Set source address of the BD we send */
636 	desc->src_addr_l = ctl->bd_phy_addr;
637 
638 	desc->dst_addr_l = ch->dxe_wq;
639 	desc->fr_len = sizeof(struct wcn36xx_tx_bd);
640 	desc->ctrl = ch->ctrl_bd;
641 
642 	wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX\n");
643 
644 	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ",
645 			 (char *)desc, sizeof(*desc));
646 	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP,
647 			 "BD   >>> ", (char *)ctl->bd_cpu_addr,
648 			 sizeof(struct wcn36xx_tx_bd));
649 
650 	/* Set source address of the SKB we send */
651 	ctl = ctl->next;
652 	ctl->skb = skb;
653 	desc = ctl->desc;
654 	if (ctl->bd_cpu_addr) {
655 		wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
656 		ret = -EINVAL;
657 		goto unlock;
658 	}
659 
660 	desc->src_addr_l = dma_map_single(wcn->dev,
661 					  ctl->skb->data,
662 					  ctl->skb->len,
663 					  DMA_TO_DEVICE);
664 
665 	desc->dst_addr_l = ch->dxe_wq;
666 	desc->fr_len = ctl->skb->len;
667 
668 	/* set dxe descriptor to VALID */
669 	desc->ctrl = ch->ctrl_skb;
670 
671 	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC2 >>> ",
672 			 (char *)desc, sizeof(*desc));
673 	wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "SKB   >>> ",
674 			 (char *)ctl->skb->data, ctl->skb->len);
675 
676 	/* Move the head of the ring to the next empty descriptor */
677 	 ch->head_blk_ctl = ctl->next;
678 
679 	/*
680 	 * When connected and trying to send data frame chip can be in sleep
681 	 * mode and writing to the register will not wake up the chip. Instead
682 	 * notify chip about new frame through SMSM bus.
683 	 */
684 	if (is_low &&  vif_priv->pw_state == WCN36XX_BMPS) {
685 		qcom_smem_state_update_bits(wcn->tx_rings_empty_state,
686 					    WCN36XX_SMSM_WLAN_TX_ENABLE,
687 					    WCN36XX_SMSM_WLAN_TX_ENABLE);
688 	} else {
689 		/* indicate End Of Packet and generate interrupt on descriptor
690 		 * done.
691 		 */
692 		wcn36xx_dxe_write_register(wcn,
693 			ch->reg_ctrl, ch->def_ctrl);
694 	}
695 
696 	ret = 0;
697 unlock:
698 	spin_unlock_irqrestore(&ch->lock, flags);
699 	return ret;
700 }
701 
702 int wcn36xx_dxe_init(struct wcn36xx *wcn)
703 {
704 	int reg_data = 0, ret;
705 
706 	reg_data = WCN36XX_DXE_REG_RESET;
707 	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
708 
709 	/* Select channels for rx avail and xfer done interrupts... */
710 	reg_data = (WCN36XX_DXE_INT_CH3_MASK | WCN36XX_DXE_INT_CH1_MASK) << 16 |
711 		    WCN36XX_DXE_INT_CH0_MASK | WCN36XX_DXE_INT_CH4_MASK;
712 	if (wcn->is_pronto)
713 		wcn36xx_ccu_write_register(wcn, WCN36XX_CCU_DXE_INT_SELECT_PRONTO, reg_data);
714 	else
715 		wcn36xx_ccu_write_register(wcn, WCN36XX_CCU_DXE_INT_SELECT_RIVA, reg_data);
716 
717 	/***************************************/
718 	/* Init descriptors for TX LOW channel */
719 	/***************************************/
720 	wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_l_ch);
721 	wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_l_ch, &wcn->data_mem_pool);
722 
723 	/* Write channel head to a NEXT register */
724 	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L,
725 		wcn->dxe_tx_l_ch.head_blk_ctl->desc_phy_addr);
726 
727 	/* Program DMA destination addr for TX LOW */
728 	wcn36xx_dxe_write_register(wcn,
729 		WCN36XX_DXE_CH_DEST_ADDR_TX_L,
730 		WCN36XX_DXE_WQ_TX_L);
731 
732 	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
733 	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
734 
735 	/***************************************/
736 	/* Init descriptors for TX HIGH channel */
737 	/***************************************/
738 	wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_h_ch);
739 	wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_h_ch, &wcn->mgmt_mem_pool);
740 
741 	/* Write channel head to a NEXT register */
742 	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H,
743 		wcn->dxe_tx_h_ch.head_blk_ctl->desc_phy_addr);
744 
745 	/* Program DMA destination addr for TX HIGH */
746 	wcn36xx_dxe_write_register(wcn,
747 		WCN36XX_DXE_CH_DEST_ADDR_TX_H,
748 		WCN36XX_DXE_WQ_TX_H);
749 
750 	wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
751 
752 	/* Enable channel interrupts */
753 	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
754 
755 	/***************************************/
756 	/* Init descriptors for RX LOW channel */
757 	/***************************************/
758 	wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_l_ch);
759 
760 	/* For RX we need to preallocated buffers */
761 	wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch);
762 
763 	/* Write channel head to a NEXT register */
764 	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L,
765 		wcn->dxe_rx_l_ch.head_blk_ctl->desc_phy_addr);
766 
767 	/* Write DMA source address */
768 	wcn36xx_dxe_write_register(wcn,
769 		WCN36XX_DXE_CH_SRC_ADDR_RX_L,
770 		WCN36XX_DXE_WQ_RX_L);
771 
772 	/* Program preallocated destination address */
773 	wcn36xx_dxe_write_register(wcn,
774 		WCN36XX_DXE_CH_DEST_ADDR_RX_L,
775 		wcn->dxe_rx_l_ch.head_blk_ctl->desc->phy_next_l);
776 
777 	/* Enable default control registers */
778 	wcn36xx_dxe_write_register(wcn,
779 		WCN36XX_DXE_REG_CTL_RX_L,
780 		WCN36XX_DXE_CH_DEFAULT_CTL_RX_L);
781 
782 	/* Enable channel interrupts */
783 	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
784 
785 	/***************************************/
786 	/* Init descriptors for RX HIGH channel */
787 	/***************************************/
788 	wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_h_ch);
789 
790 	/* For RX we need to prealocat buffers */
791 	wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_h_ch);
792 
793 	/* Write chanel head to a NEXT register */
794 	wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H,
795 		wcn->dxe_rx_h_ch.head_blk_ctl->desc_phy_addr);
796 
797 	/* Write DMA source address */
798 	wcn36xx_dxe_write_register(wcn,
799 		WCN36XX_DXE_CH_SRC_ADDR_RX_H,
800 		WCN36XX_DXE_WQ_RX_H);
801 
802 	/* Program preallocated destination address */
803 	wcn36xx_dxe_write_register(wcn,
804 		WCN36XX_DXE_CH_DEST_ADDR_RX_H,
805 		 wcn->dxe_rx_h_ch.head_blk_ctl->desc->phy_next_l);
806 
807 	/* Enable default control registers */
808 	wcn36xx_dxe_write_register(wcn,
809 		WCN36XX_DXE_REG_CTL_RX_H,
810 		WCN36XX_DXE_CH_DEFAULT_CTL_RX_H);
811 
812 	/* Enable channel interrupts */
813 	wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
814 
815 	ret = wcn36xx_dxe_request_irqs(wcn);
816 	if (ret < 0)
817 		goto out_err;
818 
819 	return 0;
820 
821 out_err:
822 	return ret;
823 }
824 
825 void wcn36xx_dxe_deinit(struct wcn36xx *wcn)
826 {
827 	free_irq(wcn->tx_irq, wcn);
828 	free_irq(wcn->rx_irq, wcn);
829 
830 	if (wcn->tx_ack_skb) {
831 		ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
832 		wcn->tx_ack_skb = NULL;
833 	}
834 
835 	wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_l_ch);
836 	wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_h_ch);
837 }
838