1*d642b012SHaijun Liu // SPDX-License-Identifier: GPL-2.0-only
2*d642b012SHaijun Liu /*
3*d642b012SHaijun Liu  * Copyright (c) 2021, MediaTek Inc.
4*d642b012SHaijun Liu  * Copyright (c) 2021-2022, Intel Corporation.
5*d642b012SHaijun Liu  *
6*d642b012SHaijun Liu  * Authors:
7*d642b012SHaijun Liu  *  Amir Hanania <amir.hanania@intel.com>
8*d642b012SHaijun Liu  *  Haijun Liu <haijun.liu@mediatek.com>
9*d642b012SHaijun Liu  *  Eliot Lee <eliot.lee@intel.com>
10*d642b012SHaijun Liu  *  Moises Veleta <moises.veleta@intel.com>
11*d642b012SHaijun Liu  *  Ricardo Martinez <ricardo.martinez@linux.intel.com>
12*d642b012SHaijun Liu  *
13*d642b012SHaijun Liu  * Contributors:
14*d642b012SHaijun Liu  *  Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
15*d642b012SHaijun Liu  *  Sreehari Kancharla <sreehari.kancharla@intel.com>
16*d642b012SHaijun Liu  */
17*d642b012SHaijun Liu 
18*d642b012SHaijun Liu #include <linux/atomic.h>
19*d642b012SHaijun Liu #include <linux/bitfield.h>
20*d642b012SHaijun Liu #include <linux/delay.h>
21*d642b012SHaijun Liu #include <linux/device.h>
22*d642b012SHaijun Liu #include <linux/dma-direction.h>
23*d642b012SHaijun Liu #include <linux/dma-mapping.h>
24*d642b012SHaijun Liu #include <linux/err.h>
25*d642b012SHaijun Liu #include <linux/gfp.h>
26*d642b012SHaijun Liu #include <linux/kernel.h>
27*d642b012SHaijun Liu #include <linux/kthread.h>
28*d642b012SHaijun Liu #include <linux/list.h>
29*d642b012SHaijun Liu #include <linux/minmax.h>
30*d642b012SHaijun Liu #include <linux/netdevice.h>
31*d642b012SHaijun Liu #include <linux/sched.h>
32*d642b012SHaijun Liu #include <linux/spinlock.h>
33*d642b012SHaijun Liu #include <linux/skbuff.h>
34*d642b012SHaijun Liu #include <linux/types.h>
35*d642b012SHaijun Liu #include <linux/wait.h>
36*d642b012SHaijun Liu #include <linux/workqueue.h>
37*d642b012SHaijun Liu 
38*d642b012SHaijun Liu #include "t7xx_dpmaif.h"
39*d642b012SHaijun Liu #include "t7xx_hif_dpmaif.h"
40*d642b012SHaijun Liu #include "t7xx_hif_dpmaif_tx.h"
41*d642b012SHaijun Liu #include "t7xx_pci.h"
42*d642b012SHaijun Liu 
43*d642b012SHaijun Liu #define DPMAIF_SKB_TX_BURST_CNT	5
44*d642b012SHaijun Liu #define DPMAIF_DRB_LIST_LEN	6144
45*d642b012SHaijun Liu 
46*d642b012SHaijun Liu /* DRB dtype */
47*d642b012SHaijun Liu #define DES_DTYP_PD		0
48*d642b012SHaijun Liu #define DES_DTYP_MSG		1
49*d642b012SHaijun Liu 
50*d642b012SHaijun Liu static unsigned int t7xx_dpmaif_update_drb_rd_idx(struct dpmaif_ctrl *dpmaif_ctrl,
51*d642b012SHaijun Liu 						  unsigned int q_num)
52*d642b012SHaijun Liu {
53*d642b012SHaijun Liu 	struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num];
54*d642b012SHaijun Liu 	unsigned int old_sw_rd_idx, new_hw_rd_idx, drb_cnt;
55*d642b012SHaijun Liu 	unsigned long flags;
56*d642b012SHaijun Liu 
57*d642b012SHaijun Liu 	if (!txq->que_started)
58*d642b012SHaijun Liu 		return 0;
59*d642b012SHaijun Liu 
60*d642b012SHaijun Liu 	old_sw_rd_idx = txq->drb_rd_idx;
61*d642b012SHaijun Liu 	new_hw_rd_idx = t7xx_dpmaif_ul_get_rd_idx(&dpmaif_ctrl->hw_info, q_num);
62*d642b012SHaijun Liu 	if (new_hw_rd_idx >= DPMAIF_DRB_LIST_LEN) {
63*d642b012SHaijun Liu 		dev_err(dpmaif_ctrl->dev, "Out of range read index: %u\n", new_hw_rd_idx);
64*d642b012SHaijun Liu 		return 0;
65*d642b012SHaijun Liu 	}
66*d642b012SHaijun Liu 
67*d642b012SHaijun Liu 	if (old_sw_rd_idx <= new_hw_rd_idx)
68*d642b012SHaijun Liu 		drb_cnt = new_hw_rd_idx - old_sw_rd_idx;
69*d642b012SHaijun Liu 	else
70*d642b012SHaijun Liu 		drb_cnt = txq->drb_size_cnt - old_sw_rd_idx + new_hw_rd_idx;
71*d642b012SHaijun Liu 
72*d642b012SHaijun Liu 	spin_lock_irqsave(&txq->tx_lock, flags);
73*d642b012SHaijun Liu 	txq->drb_rd_idx = new_hw_rd_idx;
74*d642b012SHaijun Liu 	spin_unlock_irqrestore(&txq->tx_lock, flags);
75*d642b012SHaijun Liu 
76*d642b012SHaijun Liu 	return drb_cnt;
77*d642b012SHaijun Liu }
78*d642b012SHaijun Liu 
79*d642b012SHaijun Liu static unsigned int t7xx_dpmaif_release_tx_buffer(struct dpmaif_ctrl *dpmaif_ctrl,
80*d642b012SHaijun Liu 						  unsigned int q_num, unsigned int release_cnt)
81*d642b012SHaijun Liu {
82*d642b012SHaijun Liu 	struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num];
83*d642b012SHaijun Liu 	struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks;
84*d642b012SHaijun Liu 	struct dpmaif_drb_skb *cur_drb_skb, *drb_skb_base;
85*d642b012SHaijun Liu 	struct dpmaif_drb *cur_drb, *drb_base;
86*d642b012SHaijun Liu 	unsigned int drb_cnt, i, cur_idx;
87*d642b012SHaijun Liu 	unsigned long flags;
88*d642b012SHaijun Liu 
89*d642b012SHaijun Liu 	drb_skb_base = txq->drb_skb_base;
90*d642b012SHaijun Liu 	drb_base = txq->drb_base;
91*d642b012SHaijun Liu 
92*d642b012SHaijun Liu 	spin_lock_irqsave(&txq->tx_lock, flags);
93*d642b012SHaijun Liu 	drb_cnt = txq->drb_size_cnt;
94*d642b012SHaijun Liu 	cur_idx = txq->drb_release_rd_idx;
95*d642b012SHaijun Liu 	spin_unlock_irqrestore(&txq->tx_lock, flags);
96*d642b012SHaijun Liu 
97*d642b012SHaijun Liu 	for (i = 0; i < release_cnt; i++) {
98*d642b012SHaijun Liu 		cur_drb = drb_base + cur_idx;
99*d642b012SHaijun Liu 		if (FIELD_GET(DRB_HDR_DTYP, le32_to_cpu(cur_drb->header)) == DES_DTYP_PD) {
100*d642b012SHaijun Liu 			cur_drb_skb = drb_skb_base + cur_idx;
101*d642b012SHaijun Liu 			if (!cur_drb_skb->is_msg)
102*d642b012SHaijun Liu 				dma_unmap_single(dpmaif_ctrl->dev, cur_drb_skb->bus_addr,
103*d642b012SHaijun Liu 						 cur_drb_skb->data_len, DMA_TO_DEVICE);
104*d642b012SHaijun Liu 
105*d642b012SHaijun Liu 			if (!FIELD_GET(DRB_HDR_CONT, le32_to_cpu(cur_drb->header))) {
106*d642b012SHaijun Liu 				if (!cur_drb_skb->skb) {
107*d642b012SHaijun Liu 					dev_err(dpmaif_ctrl->dev,
108*d642b012SHaijun Liu 						"txq%u: DRB check fail, invalid skb\n", q_num);
109*d642b012SHaijun Liu 					continue;
110*d642b012SHaijun Liu 				}
111*d642b012SHaijun Liu 
112*d642b012SHaijun Liu 				dev_kfree_skb_any(cur_drb_skb->skb);
113*d642b012SHaijun Liu 			}
114*d642b012SHaijun Liu 
115*d642b012SHaijun Liu 			cur_drb_skb->skb = NULL;
116*d642b012SHaijun Liu 		}
117*d642b012SHaijun Liu 
118*d642b012SHaijun Liu 		spin_lock_irqsave(&txq->tx_lock, flags);
119*d642b012SHaijun Liu 		cur_idx = t7xx_ring_buf_get_next_wr_idx(drb_cnt, cur_idx);
120*d642b012SHaijun Liu 		txq->drb_release_rd_idx = cur_idx;
121*d642b012SHaijun Liu 		spin_unlock_irqrestore(&txq->tx_lock, flags);
122*d642b012SHaijun Liu 
123*d642b012SHaijun Liu 		if (atomic_inc_return(&txq->tx_budget) > txq->drb_size_cnt / 8)
124*d642b012SHaijun Liu 			cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_IRQ, txq->index);
125*d642b012SHaijun Liu 	}
126*d642b012SHaijun Liu 
127*d642b012SHaijun Liu 	if (FIELD_GET(DRB_HDR_CONT, le32_to_cpu(cur_drb->header)))
128*d642b012SHaijun Liu 		dev_err(dpmaif_ctrl->dev, "txq%u: DRB not marked as the last one\n", q_num);
129*d642b012SHaijun Liu 
130*d642b012SHaijun Liu 	return i;
131*d642b012SHaijun Liu }
132*d642b012SHaijun Liu 
133*d642b012SHaijun Liu static int t7xx_dpmaif_tx_release(struct dpmaif_ctrl *dpmaif_ctrl,
134*d642b012SHaijun Liu 				  unsigned int q_num, unsigned int budget)
135*d642b012SHaijun Liu {
136*d642b012SHaijun Liu 	struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num];
137*d642b012SHaijun Liu 	unsigned int rel_cnt, real_rel_cnt;
138*d642b012SHaijun Liu 
139*d642b012SHaijun Liu 	/* Update read index from HW */
140*d642b012SHaijun Liu 	t7xx_dpmaif_update_drb_rd_idx(dpmaif_ctrl, q_num);
141*d642b012SHaijun Liu 
142*d642b012SHaijun Liu 	rel_cnt = t7xx_ring_buf_rd_wr_count(txq->drb_size_cnt, txq->drb_release_rd_idx,
143*d642b012SHaijun Liu 					    txq->drb_rd_idx, DPMAIF_READ);
144*d642b012SHaijun Liu 
145*d642b012SHaijun Liu 	real_rel_cnt = min_not_zero(budget, rel_cnt);
146*d642b012SHaijun Liu 	if (real_rel_cnt)
147*d642b012SHaijun Liu 		real_rel_cnt = t7xx_dpmaif_release_tx_buffer(dpmaif_ctrl, q_num, real_rel_cnt);
148*d642b012SHaijun Liu 
149*d642b012SHaijun Liu 	return real_rel_cnt < rel_cnt ? -EAGAIN : 0;
150*d642b012SHaijun Liu }
151*d642b012SHaijun Liu 
152*d642b012SHaijun Liu static bool t7xx_dpmaif_drb_ring_not_empty(struct dpmaif_tx_queue *txq)
153*d642b012SHaijun Liu {
154*d642b012SHaijun Liu 	return !!t7xx_dpmaif_update_drb_rd_idx(txq->dpmaif_ctrl, txq->index);
155*d642b012SHaijun Liu }
156*d642b012SHaijun Liu 
157*d642b012SHaijun Liu static void t7xx_dpmaif_tx_done(struct work_struct *work)
158*d642b012SHaijun Liu {
159*d642b012SHaijun Liu 	struct dpmaif_tx_queue *txq = container_of(work, struct dpmaif_tx_queue, dpmaif_tx_work);
160*d642b012SHaijun Liu 	struct dpmaif_ctrl *dpmaif_ctrl = txq->dpmaif_ctrl;
161*d642b012SHaijun Liu 	struct dpmaif_hw_info *hw_info;
162*d642b012SHaijun Liu 	int ret;
163*d642b012SHaijun Liu 
164*d642b012SHaijun Liu 	hw_info = &dpmaif_ctrl->hw_info;
165*d642b012SHaijun Liu 	ret = t7xx_dpmaif_tx_release(dpmaif_ctrl, txq->index, txq->drb_size_cnt);
166*d642b012SHaijun Liu 	if (ret == -EAGAIN ||
167*d642b012SHaijun Liu 	    (t7xx_dpmaif_ul_clr_done(hw_info, txq->index) &&
168*d642b012SHaijun Liu 	     t7xx_dpmaif_drb_ring_not_empty(txq))) {
169*d642b012SHaijun Liu 		queue_work(dpmaif_ctrl->txq[txq->index].worker,
170*d642b012SHaijun Liu 			   &dpmaif_ctrl->txq[txq->index].dpmaif_tx_work);
171*d642b012SHaijun Liu 		/* Give the device time to enter the low power state */
172*d642b012SHaijun Liu 		t7xx_dpmaif_clr_ip_busy_sts(hw_info);
173*d642b012SHaijun Liu 	} else {
174*d642b012SHaijun Liu 		t7xx_dpmaif_clr_ip_busy_sts(hw_info);
175*d642b012SHaijun Liu 		t7xx_dpmaif_unmask_ulq_intr(hw_info, txq->index);
176*d642b012SHaijun Liu 	}
177*d642b012SHaijun Liu }
178*d642b012SHaijun Liu 
179*d642b012SHaijun Liu static void t7xx_setup_msg_drb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num,
180*d642b012SHaijun Liu 			       unsigned int cur_idx, unsigned int pkt_len, unsigned int count_l,
181*d642b012SHaijun Liu 			       unsigned int channel_id)
182*d642b012SHaijun Liu {
183*d642b012SHaijun Liu 	struct dpmaif_drb *drb_base = dpmaif_ctrl->txq[q_num].drb_base;
184*d642b012SHaijun Liu 	struct dpmaif_drb *drb = drb_base + cur_idx;
185*d642b012SHaijun Liu 
186*d642b012SHaijun Liu 	drb->header = cpu_to_le32(FIELD_PREP(DRB_HDR_DTYP, DES_DTYP_MSG) |
187*d642b012SHaijun Liu 				  FIELD_PREP(DRB_HDR_CONT, 1) |
188*d642b012SHaijun Liu 				  FIELD_PREP(DRB_HDR_DATA_LEN, pkt_len));
189*d642b012SHaijun Liu 
190*d642b012SHaijun Liu 	drb->msg.msg_hdr = cpu_to_le32(FIELD_PREP(DRB_MSG_COUNT_L, count_l) |
191*d642b012SHaijun Liu 				       FIELD_PREP(DRB_MSG_CHANNEL_ID, channel_id) |
192*d642b012SHaijun Liu 				       FIELD_PREP(DRB_MSG_L4_CHK, 1));
193*d642b012SHaijun Liu }
194*d642b012SHaijun Liu 
195*d642b012SHaijun Liu static void t7xx_setup_payload_drb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num,
196*d642b012SHaijun Liu 				   unsigned int cur_idx, dma_addr_t data_addr,
197*d642b012SHaijun Liu 				   unsigned int pkt_size, bool last_one)
198*d642b012SHaijun Liu {
199*d642b012SHaijun Liu 	struct dpmaif_drb *drb_base = dpmaif_ctrl->txq[q_num].drb_base;
200*d642b012SHaijun Liu 	struct dpmaif_drb *drb = drb_base + cur_idx;
201*d642b012SHaijun Liu 	u32 header;
202*d642b012SHaijun Liu 
203*d642b012SHaijun Liu 	header = FIELD_PREP(DRB_HDR_DTYP, DES_DTYP_PD) | FIELD_PREP(DRB_HDR_DATA_LEN, pkt_size);
204*d642b012SHaijun Liu 	if (!last_one)
205*d642b012SHaijun Liu 		header |= FIELD_PREP(DRB_HDR_CONT, 1);
206*d642b012SHaijun Liu 
207*d642b012SHaijun Liu 	drb->header = cpu_to_le32(header);
208*d642b012SHaijun Liu 	drb->pd.data_addr_l = cpu_to_le32(lower_32_bits(data_addr));
209*d642b012SHaijun Liu 	drb->pd.data_addr_h = cpu_to_le32(upper_32_bits(data_addr));
210*d642b012SHaijun Liu }
211*d642b012SHaijun Liu 
212*d642b012SHaijun Liu static void t7xx_record_drb_skb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num,
213*d642b012SHaijun Liu 				unsigned int cur_idx, struct sk_buff *skb, bool is_msg,
214*d642b012SHaijun Liu 				bool is_frag, bool is_last_one, dma_addr_t bus_addr,
215*d642b012SHaijun Liu 				unsigned int data_len)
216*d642b012SHaijun Liu {
217*d642b012SHaijun Liu 	struct dpmaif_drb_skb *drb_skb_base = dpmaif_ctrl->txq[q_num].drb_skb_base;
218*d642b012SHaijun Liu 	struct dpmaif_drb_skb *drb_skb = drb_skb_base + cur_idx;
219*d642b012SHaijun Liu 
220*d642b012SHaijun Liu 	drb_skb->skb = skb;
221*d642b012SHaijun Liu 	drb_skb->bus_addr = bus_addr;
222*d642b012SHaijun Liu 	drb_skb->data_len = data_len;
223*d642b012SHaijun Liu 	drb_skb->index = cur_idx;
224*d642b012SHaijun Liu 	drb_skb->is_msg = is_msg;
225*d642b012SHaijun Liu 	drb_skb->is_frag = is_frag;
226*d642b012SHaijun Liu 	drb_skb->is_last = is_last_one;
227*d642b012SHaijun Liu }
228*d642b012SHaijun Liu 
229*d642b012SHaijun Liu static int t7xx_dpmaif_add_skb_to_ring(struct dpmaif_ctrl *dpmaif_ctrl, struct sk_buff *skb)
230*d642b012SHaijun Liu {
231*d642b012SHaijun Liu 	struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks;
232*d642b012SHaijun Liu 	unsigned int wr_cnt, send_cnt, payload_cnt;
233*d642b012SHaijun Liu 	unsigned int cur_idx, drb_wr_idx_backup;
234*d642b012SHaijun Liu 	struct skb_shared_info *shinfo;
235*d642b012SHaijun Liu 	struct dpmaif_tx_queue *txq;
236*d642b012SHaijun Liu 	struct t7xx_skb_cb *skb_cb;
237*d642b012SHaijun Liu 	unsigned long flags;
238*d642b012SHaijun Liu 
239*d642b012SHaijun Liu 	skb_cb = T7XX_SKB_CB(skb);
240*d642b012SHaijun Liu 	txq = &dpmaif_ctrl->txq[skb_cb->txq_number];
241*d642b012SHaijun Liu 	if (!txq->que_started || dpmaif_ctrl->state != DPMAIF_STATE_PWRON)
242*d642b012SHaijun Liu 		return -ENODEV;
243*d642b012SHaijun Liu 
244*d642b012SHaijun Liu 	atomic_set(&txq->tx_processing, 1);
245*d642b012SHaijun Liu 	 /* Ensure tx_processing is changed to 1 before actually begin TX flow */
246*d642b012SHaijun Liu 	smp_mb();
247*d642b012SHaijun Liu 
248*d642b012SHaijun Liu 	shinfo = skb_shinfo(skb);
249*d642b012SHaijun Liu 	if (shinfo->frag_list)
250*d642b012SHaijun Liu 		dev_warn_ratelimited(dpmaif_ctrl->dev, "frag_list not supported\n");
251*d642b012SHaijun Liu 
252*d642b012SHaijun Liu 	payload_cnt = shinfo->nr_frags + 1;
253*d642b012SHaijun Liu 	/* nr_frags: frag cnt, 1: skb->data, 1: msg DRB */
254*d642b012SHaijun Liu 	send_cnt = payload_cnt + 1;
255*d642b012SHaijun Liu 
256*d642b012SHaijun Liu 	spin_lock_irqsave(&txq->tx_lock, flags);
257*d642b012SHaijun Liu 	cur_idx = txq->drb_wr_idx;
258*d642b012SHaijun Liu 	drb_wr_idx_backup = cur_idx;
259*d642b012SHaijun Liu 	txq->drb_wr_idx += send_cnt;
260*d642b012SHaijun Liu 	if (txq->drb_wr_idx >= txq->drb_size_cnt)
261*d642b012SHaijun Liu 		txq->drb_wr_idx -= txq->drb_size_cnt;
262*d642b012SHaijun Liu 	t7xx_setup_msg_drb(dpmaif_ctrl, txq->index, cur_idx, skb->len, 0, skb_cb->netif_idx);
263*d642b012SHaijun Liu 	t7xx_record_drb_skb(dpmaif_ctrl, txq->index, cur_idx, skb, true, 0, 0, 0, 0);
264*d642b012SHaijun Liu 	spin_unlock_irqrestore(&txq->tx_lock, flags);
265*d642b012SHaijun Liu 
266*d642b012SHaijun Liu 	for (wr_cnt = 0; wr_cnt < payload_cnt; wr_cnt++) {
267*d642b012SHaijun Liu 		bool is_frag, is_last_one = wr_cnt == payload_cnt - 1;
268*d642b012SHaijun Liu 		unsigned int data_len;
269*d642b012SHaijun Liu 		dma_addr_t bus_addr;
270*d642b012SHaijun Liu 		void *data_addr;
271*d642b012SHaijun Liu 
272*d642b012SHaijun Liu 		if (!wr_cnt) {
273*d642b012SHaijun Liu 			data_len = skb_headlen(skb);
274*d642b012SHaijun Liu 			data_addr = skb->data;
275*d642b012SHaijun Liu 			is_frag = false;
276*d642b012SHaijun Liu 		} else {
277*d642b012SHaijun Liu 			skb_frag_t *frag = shinfo->frags + wr_cnt - 1;
278*d642b012SHaijun Liu 
279*d642b012SHaijun Liu 			data_len = skb_frag_size(frag);
280*d642b012SHaijun Liu 			data_addr = skb_frag_address(frag);
281*d642b012SHaijun Liu 			is_frag = true;
282*d642b012SHaijun Liu 		}
283*d642b012SHaijun Liu 
284*d642b012SHaijun Liu 		bus_addr = dma_map_single(dpmaif_ctrl->dev, data_addr, data_len, DMA_TO_DEVICE);
285*d642b012SHaijun Liu 		if (dma_mapping_error(dpmaif_ctrl->dev, bus_addr))
286*d642b012SHaijun Liu 			goto unmap_buffers;
287*d642b012SHaijun Liu 
288*d642b012SHaijun Liu 		cur_idx = t7xx_ring_buf_get_next_wr_idx(txq->drb_size_cnt, cur_idx);
289*d642b012SHaijun Liu 
290*d642b012SHaijun Liu 		spin_lock_irqsave(&txq->tx_lock, flags);
291*d642b012SHaijun Liu 		t7xx_setup_payload_drb(dpmaif_ctrl, txq->index, cur_idx, bus_addr, data_len,
292*d642b012SHaijun Liu 				       is_last_one);
293*d642b012SHaijun Liu 		t7xx_record_drb_skb(dpmaif_ctrl, txq->index, cur_idx, skb, false, is_frag,
294*d642b012SHaijun Liu 				    is_last_one, bus_addr, data_len);
295*d642b012SHaijun Liu 		spin_unlock_irqrestore(&txq->tx_lock, flags);
296*d642b012SHaijun Liu 	}
297*d642b012SHaijun Liu 
298*d642b012SHaijun Liu 	if (atomic_sub_return(send_cnt, &txq->tx_budget) <= (MAX_SKB_FRAGS + 2))
299*d642b012SHaijun Liu 		cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_FULL, txq->index);
300*d642b012SHaijun Liu 
301*d642b012SHaijun Liu 	atomic_set(&txq->tx_processing, 0);
302*d642b012SHaijun Liu 
303*d642b012SHaijun Liu 	return 0;
304*d642b012SHaijun Liu 
305*d642b012SHaijun Liu unmap_buffers:
306*d642b012SHaijun Liu 	while (wr_cnt--) {
307*d642b012SHaijun Liu 		struct dpmaif_drb_skb *drb_skb = txq->drb_skb_base;
308*d642b012SHaijun Liu 
309*d642b012SHaijun Liu 		cur_idx = cur_idx ? cur_idx - 1 : txq->drb_size_cnt - 1;
310*d642b012SHaijun Liu 		drb_skb += cur_idx;
311*d642b012SHaijun Liu 		dma_unmap_single(dpmaif_ctrl->dev, drb_skb->bus_addr,
312*d642b012SHaijun Liu 				 drb_skb->data_len, DMA_TO_DEVICE);
313*d642b012SHaijun Liu 	}
314*d642b012SHaijun Liu 
315*d642b012SHaijun Liu 	txq->drb_wr_idx = drb_wr_idx_backup;
316*d642b012SHaijun Liu 	atomic_set(&txq->tx_processing, 0);
317*d642b012SHaijun Liu 
318*d642b012SHaijun Liu 	return -ENOMEM;
319*d642b012SHaijun Liu }
320*d642b012SHaijun Liu 
321*d642b012SHaijun Liu static bool t7xx_tx_lists_are_all_empty(const struct dpmaif_ctrl *dpmaif_ctrl)
322*d642b012SHaijun Liu {
323*d642b012SHaijun Liu 	int i;
324*d642b012SHaijun Liu 
325*d642b012SHaijun Liu 	for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
326*d642b012SHaijun Liu 		if (!skb_queue_empty(&dpmaif_ctrl->txq[i].tx_skb_head))
327*d642b012SHaijun Liu 			return false;
328*d642b012SHaijun Liu 	}
329*d642b012SHaijun Liu 
330*d642b012SHaijun Liu 	return true;
331*d642b012SHaijun Liu }
332*d642b012SHaijun Liu 
333*d642b012SHaijun Liu /* Currently, only the default TX queue is used */
334*d642b012SHaijun Liu static struct dpmaif_tx_queue *t7xx_select_tx_queue(struct dpmaif_ctrl *dpmaif_ctrl)
335*d642b012SHaijun Liu {
336*d642b012SHaijun Liu 	struct dpmaif_tx_queue *txq;
337*d642b012SHaijun Liu 
338*d642b012SHaijun Liu 	txq = &dpmaif_ctrl->txq[DPMAIF_TX_DEFAULT_QUEUE];
339*d642b012SHaijun Liu 	if (!txq->que_started)
340*d642b012SHaijun Liu 		return NULL;
341*d642b012SHaijun Liu 
342*d642b012SHaijun Liu 	return txq;
343*d642b012SHaijun Liu }
344*d642b012SHaijun Liu 
345*d642b012SHaijun Liu static unsigned int t7xx_txq_drb_wr_available(struct dpmaif_tx_queue *txq)
346*d642b012SHaijun Liu {
347*d642b012SHaijun Liu 	return t7xx_ring_buf_rd_wr_count(txq->drb_size_cnt, txq->drb_release_rd_idx,
348*d642b012SHaijun Liu 					 txq->drb_wr_idx, DPMAIF_WRITE);
349*d642b012SHaijun Liu }
350*d642b012SHaijun Liu 
351*d642b012SHaijun Liu static unsigned int t7xx_skb_drb_cnt(struct sk_buff *skb)
352*d642b012SHaijun Liu {
353*d642b012SHaijun Liu 	/* Normal DRB (frags data + skb linear data) + msg DRB */
354*d642b012SHaijun Liu 	return skb_shinfo(skb)->nr_frags + 2;
355*d642b012SHaijun Liu }
356*d642b012SHaijun Liu 
357*d642b012SHaijun Liu static int t7xx_txq_burst_send_skb(struct dpmaif_tx_queue *txq)
358*d642b012SHaijun Liu {
359*d642b012SHaijun Liu 	unsigned int drb_remain_cnt, i;
360*d642b012SHaijun Liu 	unsigned int send_drb_cnt;
361*d642b012SHaijun Liu 	int drb_cnt = 0;
362*d642b012SHaijun Liu 	int ret = 0;
363*d642b012SHaijun Liu 
364*d642b012SHaijun Liu 	drb_remain_cnt = t7xx_txq_drb_wr_available(txq);
365*d642b012SHaijun Liu 
366*d642b012SHaijun Liu 	for (i = 0; i < DPMAIF_SKB_TX_BURST_CNT; i++) {
367*d642b012SHaijun Liu 		struct sk_buff *skb;
368*d642b012SHaijun Liu 
369*d642b012SHaijun Liu 		skb = skb_peek(&txq->tx_skb_head);
370*d642b012SHaijun Liu 		if (!skb)
371*d642b012SHaijun Liu 			break;
372*d642b012SHaijun Liu 
373*d642b012SHaijun Liu 		send_drb_cnt = t7xx_skb_drb_cnt(skb);
374*d642b012SHaijun Liu 		if (drb_remain_cnt < send_drb_cnt) {
375*d642b012SHaijun Liu 			drb_remain_cnt = t7xx_txq_drb_wr_available(txq);
376*d642b012SHaijun Liu 			continue;
377*d642b012SHaijun Liu 		}
378*d642b012SHaijun Liu 
379*d642b012SHaijun Liu 		drb_remain_cnt -= send_drb_cnt;
380*d642b012SHaijun Liu 
381*d642b012SHaijun Liu 		ret = t7xx_dpmaif_add_skb_to_ring(txq->dpmaif_ctrl, skb);
382*d642b012SHaijun Liu 		if (ret < 0) {
383*d642b012SHaijun Liu 			dev_err(txq->dpmaif_ctrl->dev,
384*d642b012SHaijun Liu 				"Failed to add skb to device's ring: %d\n", ret);
385*d642b012SHaijun Liu 			break;
386*d642b012SHaijun Liu 		}
387*d642b012SHaijun Liu 
388*d642b012SHaijun Liu 		drb_cnt += send_drb_cnt;
389*d642b012SHaijun Liu 		skb_unlink(skb, &txq->tx_skb_head);
390*d642b012SHaijun Liu 	}
391*d642b012SHaijun Liu 
392*d642b012SHaijun Liu 	if (drb_cnt > 0)
393*d642b012SHaijun Liu 		return drb_cnt;
394*d642b012SHaijun Liu 
395*d642b012SHaijun Liu 	return ret;
396*d642b012SHaijun Liu }
397*d642b012SHaijun Liu 
398*d642b012SHaijun Liu static void t7xx_do_tx_hw_push(struct dpmaif_ctrl *dpmaif_ctrl)
399*d642b012SHaijun Liu {
400*d642b012SHaijun Liu 	do {
401*d642b012SHaijun Liu 		struct dpmaif_tx_queue *txq;
402*d642b012SHaijun Liu 		int drb_send_cnt;
403*d642b012SHaijun Liu 
404*d642b012SHaijun Liu 		txq = t7xx_select_tx_queue(dpmaif_ctrl);
405*d642b012SHaijun Liu 		if (!txq)
406*d642b012SHaijun Liu 			return;
407*d642b012SHaijun Liu 
408*d642b012SHaijun Liu 		drb_send_cnt = t7xx_txq_burst_send_skb(txq);
409*d642b012SHaijun Liu 		if (drb_send_cnt <= 0) {
410*d642b012SHaijun Liu 			usleep_range(10, 20);
411*d642b012SHaijun Liu 			cond_resched();
412*d642b012SHaijun Liu 			continue;
413*d642b012SHaijun Liu 		}
414*d642b012SHaijun Liu 
415*d642b012SHaijun Liu 		t7xx_dpmaif_ul_update_hw_drb_cnt(&dpmaif_ctrl->hw_info, txq->index,
416*d642b012SHaijun Liu 						 drb_send_cnt * DPMAIF_UL_DRB_SIZE_WORD);
417*d642b012SHaijun Liu 
418*d642b012SHaijun Liu 		cond_resched();
419*d642b012SHaijun Liu 	} while (!t7xx_tx_lists_are_all_empty(dpmaif_ctrl) && !kthread_should_stop() &&
420*d642b012SHaijun Liu 		 (dpmaif_ctrl->state == DPMAIF_STATE_PWRON));
421*d642b012SHaijun Liu }
422*d642b012SHaijun Liu 
423*d642b012SHaijun Liu static int t7xx_dpmaif_tx_hw_push_thread(void *arg)
424*d642b012SHaijun Liu {
425*d642b012SHaijun Liu 	struct dpmaif_ctrl *dpmaif_ctrl = arg;
426*d642b012SHaijun Liu 
427*d642b012SHaijun Liu 	while (!kthread_should_stop()) {
428*d642b012SHaijun Liu 		if (t7xx_tx_lists_are_all_empty(dpmaif_ctrl) ||
429*d642b012SHaijun Liu 		    dpmaif_ctrl->state != DPMAIF_STATE_PWRON) {
430*d642b012SHaijun Liu 			if (wait_event_interruptible(dpmaif_ctrl->tx_wq,
431*d642b012SHaijun Liu 						     (!t7xx_tx_lists_are_all_empty(dpmaif_ctrl) &&
432*d642b012SHaijun Liu 						     dpmaif_ctrl->state == DPMAIF_STATE_PWRON) ||
433*d642b012SHaijun Liu 						     kthread_should_stop()))
434*d642b012SHaijun Liu 				continue;
435*d642b012SHaijun Liu 
436*d642b012SHaijun Liu 			if (kthread_should_stop())
437*d642b012SHaijun Liu 				break;
438*d642b012SHaijun Liu 		}
439*d642b012SHaijun Liu 
440*d642b012SHaijun Liu 		t7xx_do_tx_hw_push(dpmaif_ctrl);
441*d642b012SHaijun Liu 	}
442*d642b012SHaijun Liu 
443*d642b012SHaijun Liu 	return 0;
444*d642b012SHaijun Liu }
445*d642b012SHaijun Liu 
446*d642b012SHaijun Liu int t7xx_dpmaif_tx_thread_init(struct dpmaif_ctrl *dpmaif_ctrl)
447*d642b012SHaijun Liu {
448*d642b012SHaijun Liu 	init_waitqueue_head(&dpmaif_ctrl->tx_wq);
449*d642b012SHaijun Liu 	dpmaif_ctrl->tx_thread = kthread_run(t7xx_dpmaif_tx_hw_push_thread,
450*d642b012SHaijun Liu 					     dpmaif_ctrl, "dpmaif_tx_hw_push");
451*d642b012SHaijun Liu 	return PTR_ERR_OR_ZERO(dpmaif_ctrl->tx_thread);
452*d642b012SHaijun Liu }
453*d642b012SHaijun Liu 
454*d642b012SHaijun Liu void t7xx_dpmaif_tx_thread_rel(struct dpmaif_ctrl *dpmaif_ctrl)
455*d642b012SHaijun Liu {
456*d642b012SHaijun Liu 	if (dpmaif_ctrl->tx_thread)
457*d642b012SHaijun Liu 		kthread_stop(dpmaif_ctrl->tx_thread);
458*d642b012SHaijun Liu }
459*d642b012SHaijun Liu 
460*d642b012SHaijun Liu /**
461*d642b012SHaijun Liu  * t7xx_dpmaif_tx_send_skb() - Add skb to the transmit queue.
462*d642b012SHaijun Liu  * @dpmaif_ctrl: Pointer to struct dpmaif_ctrl.
463*d642b012SHaijun Liu  * @txq_number: Queue number to xmit on.
464*d642b012SHaijun Liu  * @skb: Pointer to the skb to transmit.
465*d642b012SHaijun Liu  *
466*d642b012SHaijun Liu  * Add the skb to the queue of the skbs to be transmit.
467*d642b012SHaijun Liu  * Wake up the thread that push the skbs from the queue to the HW.
468*d642b012SHaijun Liu  *
469*d642b012SHaijun Liu  * Return:
470*d642b012SHaijun Liu  * * 0		- Success.
471*d642b012SHaijun Liu  * * -EBUSY	- Tx budget exhausted.
472*d642b012SHaijun Liu  *		  In normal circumstances t7xx_dpmaif_add_skb_to_ring() must report the txq full
473*d642b012SHaijun Liu  *		  state to prevent this error condition.
474*d642b012SHaijun Liu  */
475*d642b012SHaijun Liu int t7xx_dpmaif_tx_send_skb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int txq_number,
476*d642b012SHaijun Liu 			    struct sk_buff *skb)
477*d642b012SHaijun Liu {
478*d642b012SHaijun Liu 	struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[txq_number];
479*d642b012SHaijun Liu 	struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks;
480*d642b012SHaijun Liu 	struct t7xx_skb_cb *skb_cb;
481*d642b012SHaijun Liu 
482*d642b012SHaijun Liu 	if (atomic_read(&txq->tx_budget) <= t7xx_skb_drb_cnt(skb)) {
483*d642b012SHaijun Liu 		cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_FULL, txq_number);
484*d642b012SHaijun Liu 		return -EBUSY;
485*d642b012SHaijun Liu 	}
486*d642b012SHaijun Liu 
487*d642b012SHaijun Liu 	skb_cb = T7XX_SKB_CB(skb);
488*d642b012SHaijun Liu 	skb_cb->txq_number = txq_number;
489*d642b012SHaijun Liu 	skb_queue_tail(&txq->tx_skb_head, skb);
490*d642b012SHaijun Liu 	wake_up(&dpmaif_ctrl->tx_wq);
491*d642b012SHaijun Liu 
492*d642b012SHaijun Liu 	return 0;
493*d642b012SHaijun Liu }
494*d642b012SHaijun Liu 
495*d642b012SHaijun Liu void t7xx_dpmaif_irq_tx_done(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int que_mask)
496*d642b012SHaijun Liu {
497*d642b012SHaijun Liu 	int i;
498*d642b012SHaijun Liu 
499*d642b012SHaijun Liu 	for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
500*d642b012SHaijun Liu 		if (que_mask & BIT(i))
501*d642b012SHaijun Liu 			queue_work(dpmaif_ctrl->txq[i].worker, &dpmaif_ctrl->txq[i].dpmaif_tx_work);
502*d642b012SHaijun Liu 	}
503*d642b012SHaijun Liu }
504*d642b012SHaijun Liu 
505*d642b012SHaijun Liu static int t7xx_dpmaif_tx_drb_buf_init(struct dpmaif_tx_queue *txq)
506*d642b012SHaijun Liu {
507*d642b012SHaijun Liu 	size_t brb_skb_size, brb_pd_size;
508*d642b012SHaijun Liu 
509*d642b012SHaijun Liu 	brb_pd_size = DPMAIF_DRB_LIST_LEN * sizeof(struct dpmaif_drb);
510*d642b012SHaijun Liu 	brb_skb_size = DPMAIF_DRB_LIST_LEN * sizeof(struct dpmaif_drb_skb);
511*d642b012SHaijun Liu 
512*d642b012SHaijun Liu 	txq->drb_size_cnt = DPMAIF_DRB_LIST_LEN;
513*d642b012SHaijun Liu 
514*d642b012SHaijun Liu 	/* For HW && AP SW */
515*d642b012SHaijun Liu 	txq->drb_base = dma_alloc_coherent(txq->dpmaif_ctrl->dev, brb_pd_size,
516*d642b012SHaijun Liu 					   &txq->drb_bus_addr, GFP_KERNEL | __GFP_ZERO);
517*d642b012SHaijun Liu 	if (!txq->drb_base)
518*d642b012SHaijun Liu 		return -ENOMEM;
519*d642b012SHaijun Liu 
520*d642b012SHaijun Liu 	/* For AP SW to record the skb information */
521*d642b012SHaijun Liu 	txq->drb_skb_base = devm_kzalloc(txq->dpmaif_ctrl->dev, brb_skb_size, GFP_KERNEL);
522*d642b012SHaijun Liu 	if (!txq->drb_skb_base) {
523*d642b012SHaijun Liu 		dma_free_coherent(txq->dpmaif_ctrl->dev, brb_pd_size,
524*d642b012SHaijun Liu 				  txq->drb_base, txq->drb_bus_addr);
525*d642b012SHaijun Liu 		return -ENOMEM;
526*d642b012SHaijun Liu 	}
527*d642b012SHaijun Liu 
528*d642b012SHaijun Liu 	return 0;
529*d642b012SHaijun Liu }
530*d642b012SHaijun Liu 
531*d642b012SHaijun Liu static void t7xx_dpmaif_tx_free_drb_skb(struct dpmaif_tx_queue *txq)
532*d642b012SHaijun Liu {
533*d642b012SHaijun Liu 	struct dpmaif_drb_skb *drb_skb, *drb_skb_base = txq->drb_skb_base;
534*d642b012SHaijun Liu 	unsigned int i;
535*d642b012SHaijun Liu 
536*d642b012SHaijun Liu 	if (!drb_skb_base)
537*d642b012SHaijun Liu 		return;
538*d642b012SHaijun Liu 
539*d642b012SHaijun Liu 	for (i = 0; i < txq->drb_size_cnt; i++) {
540*d642b012SHaijun Liu 		drb_skb = drb_skb_base + i;
541*d642b012SHaijun Liu 		if (!drb_skb->skb)
542*d642b012SHaijun Liu 			continue;
543*d642b012SHaijun Liu 
544*d642b012SHaijun Liu 		if (!drb_skb->is_msg)
545*d642b012SHaijun Liu 			dma_unmap_single(txq->dpmaif_ctrl->dev, drb_skb->bus_addr,
546*d642b012SHaijun Liu 					 drb_skb->data_len, DMA_TO_DEVICE);
547*d642b012SHaijun Liu 
548*d642b012SHaijun Liu 		if (drb_skb->is_last) {
549*d642b012SHaijun Liu 			dev_kfree_skb(drb_skb->skb);
550*d642b012SHaijun Liu 			drb_skb->skb = NULL;
551*d642b012SHaijun Liu 		}
552*d642b012SHaijun Liu 	}
553*d642b012SHaijun Liu }
554*d642b012SHaijun Liu 
555*d642b012SHaijun Liu static void t7xx_dpmaif_tx_drb_buf_rel(struct dpmaif_tx_queue *txq)
556*d642b012SHaijun Liu {
557*d642b012SHaijun Liu 	if (txq->drb_base)
558*d642b012SHaijun Liu 		dma_free_coherent(txq->dpmaif_ctrl->dev,
559*d642b012SHaijun Liu 				  txq->drb_size_cnt * sizeof(struct dpmaif_drb),
560*d642b012SHaijun Liu 				  txq->drb_base, txq->drb_bus_addr);
561*d642b012SHaijun Liu 
562*d642b012SHaijun Liu 	t7xx_dpmaif_tx_free_drb_skb(txq);
563*d642b012SHaijun Liu }
564*d642b012SHaijun Liu 
565*d642b012SHaijun Liu /**
566*d642b012SHaijun Liu  * t7xx_dpmaif_txq_init() - Initialize TX queue.
567*d642b012SHaijun Liu  * @txq: Pointer to struct dpmaif_tx_queue.
568*d642b012SHaijun Liu  *
569*d642b012SHaijun Liu  * Initialize the TX queue data structure and allocate memory for it to use.
570*d642b012SHaijun Liu  *
571*d642b012SHaijun Liu  * Return:
572*d642b012SHaijun Liu  * * 0		- Success.
573*d642b012SHaijun Liu  * * -ERROR	- Error code from failure sub-initializations.
574*d642b012SHaijun Liu  */
575*d642b012SHaijun Liu int t7xx_dpmaif_txq_init(struct dpmaif_tx_queue *txq)
576*d642b012SHaijun Liu {
577*d642b012SHaijun Liu 	int ret;
578*d642b012SHaijun Liu 
579*d642b012SHaijun Liu 	skb_queue_head_init(&txq->tx_skb_head);
580*d642b012SHaijun Liu 	init_waitqueue_head(&txq->req_wq);
581*d642b012SHaijun Liu 	atomic_set(&txq->tx_budget, DPMAIF_DRB_LIST_LEN);
582*d642b012SHaijun Liu 
583*d642b012SHaijun Liu 	ret = t7xx_dpmaif_tx_drb_buf_init(txq);
584*d642b012SHaijun Liu 	if (ret) {
585*d642b012SHaijun Liu 		dev_err(txq->dpmaif_ctrl->dev, "Failed to initialize DRB buffers: %d\n", ret);
586*d642b012SHaijun Liu 		return ret;
587*d642b012SHaijun Liu 	}
588*d642b012SHaijun Liu 
589*d642b012SHaijun Liu 	txq->worker = alloc_workqueue("md_dpmaif_tx%d_worker", WQ_UNBOUND | WQ_MEM_RECLAIM |
590*d642b012SHaijun Liu 				      (txq->index ? 0 : WQ_HIGHPRI), 1, txq->index);
591*d642b012SHaijun Liu 	if (!txq->worker)
592*d642b012SHaijun Liu 		return -ENOMEM;
593*d642b012SHaijun Liu 
594*d642b012SHaijun Liu 	INIT_WORK(&txq->dpmaif_tx_work, t7xx_dpmaif_tx_done);
595*d642b012SHaijun Liu 	spin_lock_init(&txq->tx_lock);
596*d642b012SHaijun Liu 
597*d642b012SHaijun Liu 	return 0;
598*d642b012SHaijun Liu }
599*d642b012SHaijun Liu 
600*d642b012SHaijun Liu void t7xx_dpmaif_txq_free(struct dpmaif_tx_queue *txq)
601*d642b012SHaijun Liu {
602*d642b012SHaijun Liu 	if (txq->worker)
603*d642b012SHaijun Liu 		destroy_workqueue(txq->worker);
604*d642b012SHaijun Liu 
605*d642b012SHaijun Liu 	skb_queue_purge(&txq->tx_skb_head);
606*d642b012SHaijun Liu 	t7xx_dpmaif_tx_drb_buf_rel(txq);
607*d642b012SHaijun Liu }
608*d642b012SHaijun Liu 
609*d642b012SHaijun Liu void t7xx_dpmaif_tx_stop(struct dpmaif_ctrl *dpmaif_ctrl)
610*d642b012SHaijun Liu {
611*d642b012SHaijun Liu 	int i;
612*d642b012SHaijun Liu 
613*d642b012SHaijun Liu 	for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
614*d642b012SHaijun Liu 		struct dpmaif_tx_queue *txq;
615*d642b012SHaijun Liu 		int count = 0;
616*d642b012SHaijun Liu 
617*d642b012SHaijun Liu 		txq = &dpmaif_ctrl->txq[i];
618*d642b012SHaijun Liu 		txq->que_started = false;
619*d642b012SHaijun Liu 		/* Make sure TXQ is disabled */
620*d642b012SHaijun Liu 		smp_mb();
621*d642b012SHaijun Liu 
622*d642b012SHaijun Liu 		/* Wait for active Tx to be done */
623*d642b012SHaijun Liu 		while (atomic_read(&txq->tx_processing)) {
624*d642b012SHaijun Liu 			if (++count >= DPMAIF_MAX_CHECK_COUNT) {
625*d642b012SHaijun Liu 				dev_err(dpmaif_ctrl->dev, "TX queue stop failed\n");
626*d642b012SHaijun Liu 				break;
627*d642b012SHaijun Liu 			}
628*d642b012SHaijun Liu 		}
629*d642b012SHaijun Liu 	}
630*d642b012SHaijun Liu }
631*d642b012SHaijun Liu 
632*d642b012SHaijun Liu static void t7xx_dpmaif_txq_flush_rel(struct dpmaif_tx_queue *txq)
633*d642b012SHaijun Liu {
634*d642b012SHaijun Liu 	txq->que_started = false;
635*d642b012SHaijun Liu 
636*d642b012SHaijun Liu 	cancel_work_sync(&txq->dpmaif_tx_work);
637*d642b012SHaijun Liu 	flush_work(&txq->dpmaif_tx_work);
638*d642b012SHaijun Liu 	t7xx_dpmaif_tx_free_drb_skb(txq);
639*d642b012SHaijun Liu 
640*d642b012SHaijun Liu 	txq->drb_rd_idx = 0;
641*d642b012SHaijun Liu 	txq->drb_wr_idx = 0;
642*d642b012SHaijun Liu 	txq->drb_release_rd_idx = 0;
643*d642b012SHaijun Liu }
644*d642b012SHaijun Liu 
645*d642b012SHaijun Liu void t7xx_dpmaif_tx_clear(struct dpmaif_ctrl *dpmaif_ctrl)
646*d642b012SHaijun Liu {
647*d642b012SHaijun Liu 	int i;
648*d642b012SHaijun Liu 
649*d642b012SHaijun Liu 	for (i = 0; i < DPMAIF_TXQ_NUM; i++)
650*d642b012SHaijun Liu 		t7xx_dpmaif_txq_flush_rel(&dpmaif_ctrl->txq[i]);
651*d642b012SHaijun Liu }
652