1d642b012SHaijun Liu // SPDX-License-Identifier: GPL-2.0-only
2d642b012SHaijun Liu /*
3d642b012SHaijun Liu  * Copyright (c) 2021, MediaTek Inc.
4d642b012SHaijun Liu  * Copyright (c) 2021-2022, Intel Corporation.
5d642b012SHaijun Liu  *
6d642b012SHaijun Liu  * Authors:
7d642b012SHaijun Liu  *  Amir Hanania <amir.hanania@intel.com>
8d642b012SHaijun Liu  *  Haijun Liu <haijun.liu@mediatek.com>
9d642b012SHaijun Liu  *  Eliot Lee <eliot.lee@intel.com>
10d642b012SHaijun Liu  *  Moises Veleta <moises.veleta@intel.com>
11d642b012SHaijun Liu  *  Ricardo Martinez <ricardo.martinez@linux.intel.com>
12d642b012SHaijun Liu  *
13d642b012SHaijun Liu  * Contributors:
14d642b012SHaijun Liu  *  Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
15d642b012SHaijun Liu  *  Sreehari Kancharla <sreehari.kancharla@intel.com>
16d642b012SHaijun Liu  */
17d642b012SHaijun Liu 
18d642b012SHaijun Liu #include <linux/atomic.h>
19d642b012SHaijun Liu #include <linux/bitfield.h>
20d642b012SHaijun Liu #include <linux/delay.h>
21d642b012SHaijun Liu #include <linux/device.h>
22d642b012SHaijun Liu #include <linux/dma-direction.h>
23d642b012SHaijun Liu #include <linux/dma-mapping.h>
24d642b012SHaijun Liu #include <linux/err.h>
25d642b012SHaijun Liu #include <linux/gfp.h>
26d642b012SHaijun Liu #include <linux/kernel.h>
27d642b012SHaijun Liu #include <linux/kthread.h>
28d642b012SHaijun Liu #include <linux/list.h>
29d642b012SHaijun Liu #include <linux/minmax.h>
30d642b012SHaijun Liu #include <linux/netdevice.h>
31d10b3a69SHaijun Liu #include <linux/pm_runtime.h>
32d642b012SHaijun Liu #include <linux/sched.h>
33d642b012SHaijun Liu #include <linux/spinlock.h>
34d642b012SHaijun Liu #include <linux/skbuff.h>
35d642b012SHaijun Liu #include <linux/types.h>
36d642b012SHaijun Liu #include <linux/wait.h>
37d642b012SHaijun Liu #include <linux/workqueue.h>
38d642b012SHaijun Liu 
39d642b012SHaijun Liu #include "t7xx_dpmaif.h"
40d642b012SHaijun Liu #include "t7xx_hif_dpmaif.h"
41d642b012SHaijun Liu #include "t7xx_hif_dpmaif_tx.h"
42d642b012SHaijun Liu #include "t7xx_pci.h"
43d642b012SHaijun Liu 
44d642b012SHaijun Liu #define DPMAIF_SKB_TX_BURST_CNT	5
45d642b012SHaijun Liu #define DPMAIF_DRB_LIST_LEN	6144
46d642b012SHaijun Liu 
47d642b012SHaijun Liu /* DRB dtype */
48d642b012SHaijun Liu #define DES_DTYP_PD		0
49d642b012SHaijun Liu #define DES_DTYP_MSG		1
50d642b012SHaijun Liu 
t7xx_dpmaif_update_drb_rd_idx(struct dpmaif_ctrl * dpmaif_ctrl,unsigned int q_num)51d642b012SHaijun Liu static unsigned int t7xx_dpmaif_update_drb_rd_idx(struct dpmaif_ctrl *dpmaif_ctrl,
52d642b012SHaijun Liu 						  unsigned int q_num)
53d642b012SHaijun Liu {
54d642b012SHaijun Liu 	struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num];
55d642b012SHaijun Liu 	unsigned int old_sw_rd_idx, new_hw_rd_idx, drb_cnt;
56d642b012SHaijun Liu 	unsigned long flags;
57d642b012SHaijun Liu 
58d642b012SHaijun Liu 	if (!txq->que_started)
59d642b012SHaijun Liu 		return 0;
60d642b012SHaijun Liu 
61d642b012SHaijun Liu 	old_sw_rd_idx = txq->drb_rd_idx;
62d642b012SHaijun Liu 	new_hw_rd_idx = t7xx_dpmaif_ul_get_rd_idx(&dpmaif_ctrl->hw_info, q_num);
63d642b012SHaijun Liu 	if (new_hw_rd_idx >= DPMAIF_DRB_LIST_LEN) {
64d642b012SHaijun Liu 		dev_err(dpmaif_ctrl->dev, "Out of range read index: %u\n", new_hw_rd_idx);
65d642b012SHaijun Liu 		return 0;
66d642b012SHaijun Liu 	}
67d642b012SHaijun Liu 
68d642b012SHaijun Liu 	if (old_sw_rd_idx <= new_hw_rd_idx)
69d642b012SHaijun Liu 		drb_cnt = new_hw_rd_idx - old_sw_rd_idx;
70d642b012SHaijun Liu 	else
71d642b012SHaijun Liu 		drb_cnt = txq->drb_size_cnt - old_sw_rd_idx + new_hw_rd_idx;
72d642b012SHaijun Liu 
73d642b012SHaijun Liu 	spin_lock_irqsave(&txq->tx_lock, flags);
74d642b012SHaijun Liu 	txq->drb_rd_idx = new_hw_rd_idx;
75d642b012SHaijun Liu 	spin_unlock_irqrestore(&txq->tx_lock, flags);
76d642b012SHaijun Liu 
77d642b012SHaijun Liu 	return drb_cnt;
78d642b012SHaijun Liu }
79d642b012SHaijun Liu 
t7xx_dpmaif_release_tx_buffer(struct dpmaif_ctrl * dpmaif_ctrl,unsigned int q_num,unsigned int release_cnt)80d642b012SHaijun Liu static unsigned int t7xx_dpmaif_release_tx_buffer(struct dpmaif_ctrl *dpmaif_ctrl,
81d642b012SHaijun Liu 						  unsigned int q_num, unsigned int release_cnt)
82d642b012SHaijun Liu {
83d642b012SHaijun Liu 	struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num];
84d642b012SHaijun Liu 	struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks;
85d642b012SHaijun Liu 	struct dpmaif_drb_skb *cur_drb_skb, *drb_skb_base;
86d642b012SHaijun Liu 	struct dpmaif_drb *cur_drb, *drb_base;
87d642b012SHaijun Liu 	unsigned int drb_cnt, i, cur_idx;
88d642b012SHaijun Liu 	unsigned long flags;
89d642b012SHaijun Liu 
90d642b012SHaijun Liu 	drb_skb_base = txq->drb_skb_base;
91d642b012SHaijun Liu 	drb_base = txq->drb_base;
92d642b012SHaijun Liu 
93d642b012SHaijun Liu 	spin_lock_irqsave(&txq->tx_lock, flags);
94d642b012SHaijun Liu 	drb_cnt = txq->drb_size_cnt;
95d642b012SHaijun Liu 	cur_idx = txq->drb_release_rd_idx;
96d642b012SHaijun Liu 	spin_unlock_irqrestore(&txq->tx_lock, flags);
97d642b012SHaijun Liu 
98d642b012SHaijun Liu 	for (i = 0; i < release_cnt; i++) {
99d642b012SHaijun Liu 		cur_drb = drb_base + cur_idx;
100d642b012SHaijun Liu 		if (FIELD_GET(DRB_HDR_DTYP, le32_to_cpu(cur_drb->header)) == DES_DTYP_PD) {
101d642b012SHaijun Liu 			cur_drb_skb = drb_skb_base + cur_idx;
102d642b012SHaijun Liu 			if (!cur_drb_skb->is_msg)
103d642b012SHaijun Liu 				dma_unmap_single(dpmaif_ctrl->dev, cur_drb_skb->bus_addr,
104d642b012SHaijun Liu 						 cur_drb_skb->data_len, DMA_TO_DEVICE);
105d642b012SHaijun Liu 
106d642b012SHaijun Liu 			if (!FIELD_GET(DRB_HDR_CONT, le32_to_cpu(cur_drb->header))) {
107d642b012SHaijun Liu 				if (!cur_drb_skb->skb) {
108d642b012SHaijun Liu 					dev_err(dpmaif_ctrl->dev,
109d642b012SHaijun Liu 						"txq%u: DRB check fail, invalid skb\n", q_num);
110d642b012SHaijun Liu 					continue;
111d642b012SHaijun Liu 				}
112d642b012SHaijun Liu 
113d642b012SHaijun Liu 				dev_kfree_skb_any(cur_drb_skb->skb);
114d642b012SHaijun Liu 			}
115d642b012SHaijun Liu 
116d642b012SHaijun Liu 			cur_drb_skb->skb = NULL;
117d642b012SHaijun Liu 		}
118d642b012SHaijun Liu 
119d642b012SHaijun Liu 		spin_lock_irqsave(&txq->tx_lock, flags);
120d642b012SHaijun Liu 		cur_idx = t7xx_ring_buf_get_next_wr_idx(drb_cnt, cur_idx);
121d642b012SHaijun Liu 		txq->drb_release_rd_idx = cur_idx;
122d642b012SHaijun Liu 		spin_unlock_irqrestore(&txq->tx_lock, flags);
123d642b012SHaijun Liu 
124d642b012SHaijun Liu 		if (atomic_inc_return(&txq->tx_budget) > txq->drb_size_cnt / 8)
125d642b012SHaijun Liu 			cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_IRQ, txq->index);
126d642b012SHaijun Liu 	}
127d642b012SHaijun Liu 
128d642b012SHaijun Liu 	if (FIELD_GET(DRB_HDR_CONT, le32_to_cpu(cur_drb->header)))
129d642b012SHaijun Liu 		dev_err(dpmaif_ctrl->dev, "txq%u: DRB not marked as the last one\n", q_num);
130d642b012SHaijun Liu 
131d642b012SHaijun Liu 	return i;
132d642b012SHaijun Liu }
133d642b012SHaijun Liu 
t7xx_dpmaif_tx_release(struct dpmaif_ctrl * dpmaif_ctrl,unsigned int q_num,unsigned int budget)134d642b012SHaijun Liu static int t7xx_dpmaif_tx_release(struct dpmaif_ctrl *dpmaif_ctrl,
135d642b012SHaijun Liu 				  unsigned int q_num, unsigned int budget)
136d642b012SHaijun Liu {
137d642b012SHaijun Liu 	struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num];
138d642b012SHaijun Liu 	unsigned int rel_cnt, real_rel_cnt;
139d642b012SHaijun Liu 
140d642b012SHaijun Liu 	/* Update read index from HW */
141d642b012SHaijun Liu 	t7xx_dpmaif_update_drb_rd_idx(dpmaif_ctrl, q_num);
142d642b012SHaijun Liu 
143d642b012SHaijun Liu 	rel_cnt = t7xx_ring_buf_rd_wr_count(txq->drb_size_cnt, txq->drb_release_rd_idx,
144d642b012SHaijun Liu 					    txq->drb_rd_idx, DPMAIF_READ);
145d642b012SHaijun Liu 
146d642b012SHaijun Liu 	real_rel_cnt = min_not_zero(budget, rel_cnt);
147d642b012SHaijun Liu 	if (real_rel_cnt)
148d642b012SHaijun Liu 		real_rel_cnt = t7xx_dpmaif_release_tx_buffer(dpmaif_ctrl, q_num, real_rel_cnt);
149d642b012SHaijun Liu 
150d642b012SHaijun Liu 	return real_rel_cnt < rel_cnt ? -EAGAIN : 0;
151d642b012SHaijun Liu }
152d642b012SHaijun Liu 
t7xx_dpmaif_drb_ring_not_empty(struct dpmaif_tx_queue * txq)153d642b012SHaijun Liu static bool t7xx_dpmaif_drb_ring_not_empty(struct dpmaif_tx_queue *txq)
154d642b012SHaijun Liu {
155d642b012SHaijun Liu 	return !!t7xx_dpmaif_update_drb_rd_idx(txq->dpmaif_ctrl, txq->index);
156d642b012SHaijun Liu }
157d642b012SHaijun Liu 
t7xx_dpmaif_tx_done(struct work_struct * work)158d642b012SHaijun Liu static void t7xx_dpmaif_tx_done(struct work_struct *work)
159d642b012SHaijun Liu {
160d642b012SHaijun Liu 	struct dpmaif_tx_queue *txq = container_of(work, struct dpmaif_tx_queue, dpmaif_tx_work);
161d642b012SHaijun Liu 	struct dpmaif_ctrl *dpmaif_ctrl = txq->dpmaif_ctrl;
162d642b012SHaijun Liu 	struct dpmaif_hw_info *hw_info;
163d642b012SHaijun Liu 	int ret;
164d642b012SHaijun Liu 
165d10b3a69SHaijun Liu 	ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev);
166d10b3a69SHaijun Liu 	if (ret < 0 && ret != -EACCES)
167d10b3a69SHaijun Liu 		return;
168d10b3a69SHaijun Liu 
169de49ea38SHaijun Liu 	/* The device may be in low power state. Disable sleep if needed */
170de49ea38SHaijun Liu 	t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev);
171de49ea38SHaijun Liu 	if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) {
172d642b012SHaijun Liu 		hw_info = &dpmaif_ctrl->hw_info;
173d642b012SHaijun Liu 		ret = t7xx_dpmaif_tx_release(dpmaif_ctrl, txq->index, txq->drb_size_cnt);
174d642b012SHaijun Liu 		if (ret == -EAGAIN ||
175d642b012SHaijun Liu 		    (t7xx_dpmaif_ul_clr_done(hw_info, txq->index) &&
176d642b012SHaijun Liu 		     t7xx_dpmaif_drb_ring_not_empty(txq))) {
177d642b012SHaijun Liu 			queue_work(dpmaif_ctrl->txq[txq->index].worker,
178d642b012SHaijun Liu 				   &dpmaif_ctrl->txq[txq->index].dpmaif_tx_work);
179d642b012SHaijun Liu 			/* Give the device time to enter the low power state */
180d642b012SHaijun Liu 			t7xx_dpmaif_clr_ip_busy_sts(hw_info);
181d642b012SHaijun Liu 		} else {
182d642b012SHaijun Liu 			t7xx_dpmaif_clr_ip_busy_sts(hw_info);
183d642b012SHaijun Liu 			t7xx_dpmaif_unmask_ulq_intr(hw_info, txq->index);
184d642b012SHaijun Liu 		}
185de49ea38SHaijun Liu 	}
186d10b3a69SHaijun Liu 
187de49ea38SHaijun Liu 	t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev);
188d10b3a69SHaijun Liu 	pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
189d10b3a69SHaijun Liu 	pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
190d642b012SHaijun Liu }
191d642b012SHaijun Liu 
t7xx_setup_msg_drb(struct dpmaif_ctrl * dpmaif_ctrl,unsigned int q_num,unsigned int cur_idx,unsigned int pkt_len,unsigned int count_l,unsigned int channel_id)192d642b012SHaijun Liu static void t7xx_setup_msg_drb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num,
193d642b012SHaijun Liu 			       unsigned int cur_idx, unsigned int pkt_len, unsigned int count_l,
194d642b012SHaijun Liu 			       unsigned int channel_id)
195d642b012SHaijun Liu {
196d642b012SHaijun Liu 	struct dpmaif_drb *drb_base = dpmaif_ctrl->txq[q_num].drb_base;
197d642b012SHaijun Liu 	struct dpmaif_drb *drb = drb_base + cur_idx;
198d642b012SHaijun Liu 
199d642b012SHaijun Liu 	drb->header = cpu_to_le32(FIELD_PREP(DRB_HDR_DTYP, DES_DTYP_MSG) |
200d642b012SHaijun Liu 				  FIELD_PREP(DRB_HDR_CONT, 1) |
201d642b012SHaijun Liu 				  FIELD_PREP(DRB_HDR_DATA_LEN, pkt_len));
202d642b012SHaijun Liu 
203d642b012SHaijun Liu 	drb->msg.msg_hdr = cpu_to_le32(FIELD_PREP(DRB_MSG_COUNT_L, count_l) |
204d642b012SHaijun Liu 				       FIELD_PREP(DRB_MSG_CHANNEL_ID, channel_id) |
205d642b012SHaijun Liu 				       FIELD_PREP(DRB_MSG_L4_CHK, 1));
206d642b012SHaijun Liu }
207d642b012SHaijun Liu 
t7xx_setup_payload_drb(struct dpmaif_ctrl * dpmaif_ctrl,unsigned int q_num,unsigned int cur_idx,dma_addr_t data_addr,unsigned int pkt_size,bool last_one)208d642b012SHaijun Liu static void t7xx_setup_payload_drb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num,
209d642b012SHaijun Liu 				   unsigned int cur_idx, dma_addr_t data_addr,
210d642b012SHaijun Liu 				   unsigned int pkt_size, bool last_one)
211d642b012SHaijun Liu {
212d642b012SHaijun Liu 	struct dpmaif_drb *drb_base = dpmaif_ctrl->txq[q_num].drb_base;
213d642b012SHaijun Liu 	struct dpmaif_drb *drb = drb_base + cur_idx;
214d642b012SHaijun Liu 	u32 header;
215d642b012SHaijun Liu 
216d642b012SHaijun Liu 	header = FIELD_PREP(DRB_HDR_DTYP, DES_DTYP_PD) | FIELD_PREP(DRB_HDR_DATA_LEN, pkt_size);
217d642b012SHaijun Liu 	if (!last_one)
218d642b012SHaijun Liu 		header |= FIELD_PREP(DRB_HDR_CONT, 1);
219d642b012SHaijun Liu 
220d642b012SHaijun Liu 	drb->header = cpu_to_le32(header);
221d642b012SHaijun Liu 	drb->pd.data_addr_l = cpu_to_le32(lower_32_bits(data_addr));
222d642b012SHaijun Liu 	drb->pd.data_addr_h = cpu_to_le32(upper_32_bits(data_addr));
223d642b012SHaijun Liu }
224d642b012SHaijun Liu 
t7xx_record_drb_skb(struct dpmaif_ctrl * dpmaif_ctrl,unsigned int q_num,unsigned int cur_idx,struct sk_buff * skb,bool is_msg,bool is_frag,bool is_last_one,dma_addr_t bus_addr,unsigned int data_len)225d642b012SHaijun Liu static void t7xx_record_drb_skb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num,
226d642b012SHaijun Liu 				unsigned int cur_idx, struct sk_buff *skb, bool is_msg,
227d642b012SHaijun Liu 				bool is_frag, bool is_last_one, dma_addr_t bus_addr,
228d642b012SHaijun Liu 				unsigned int data_len)
229d642b012SHaijun Liu {
230d642b012SHaijun Liu 	struct dpmaif_drb_skb *drb_skb_base = dpmaif_ctrl->txq[q_num].drb_skb_base;
231d642b012SHaijun Liu 	struct dpmaif_drb_skb *drb_skb = drb_skb_base + cur_idx;
232d642b012SHaijun Liu 
233d642b012SHaijun Liu 	drb_skb->skb = skb;
234d642b012SHaijun Liu 	drb_skb->bus_addr = bus_addr;
235d642b012SHaijun Liu 	drb_skb->data_len = data_len;
236d642b012SHaijun Liu 	drb_skb->index = cur_idx;
237d642b012SHaijun Liu 	drb_skb->is_msg = is_msg;
238d642b012SHaijun Liu 	drb_skb->is_frag = is_frag;
239d642b012SHaijun Liu 	drb_skb->is_last = is_last_one;
240d642b012SHaijun Liu }
241d642b012SHaijun Liu 
t7xx_dpmaif_add_skb_to_ring(struct dpmaif_ctrl * dpmaif_ctrl,struct sk_buff * skb)242d642b012SHaijun Liu static int t7xx_dpmaif_add_skb_to_ring(struct dpmaif_ctrl *dpmaif_ctrl, struct sk_buff *skb)
243d642b012SHaijun Liu {
244d642b012SHaijun Liu 	struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks;
245d642b012SHaijun Liu 	unsigned int wr_cnt, send_cnt, payload_cnt;
246d642b012SHaijun Liu 	unsigned int cur_idx, drb_wr_idx_backup;
247d642b012SHaijun Liu 	struct skb_shared_info *shinfo;
248d642b012SHaijun Liu 	struct dpmaif_tx_queue *txq;
249d642b012SHaijun Liu 	struct t7xx_skb_cb *skb_cb;
250d642b012SHaijun Liu 	unsigned long flags;
251d642b012SHaijun Liu 
252d642b012SHaijun Liu 	skb_cb = T7XX_SKB_CB(skb);
253d642b012SHaijun Liu 	txq = &dpmaif_ctrl->txq[skb_cb->txq_number];
254d642b012SHaijun Liu 	if (!txq->que_started || dpmaif_ctrl->state != DPMAIF_STATE_PWRON)
255d642b012SHaijun Liu 		return -ENODEV;
256d642b012SHaijun Liu 
257d642b012SHaijun Liu 	atomic_set(&txq->tx_processing, 1);
258d642b012SHaijun Liu 	 /* Ensure tx_processing is changed to 1 before actually begin TX flow */
259d642b012SHaijun Liu 	smp_mb();
260d642b012SHaijun Liu 
261d642b012SHaijun Liu 	shinfo = skb_shinfo(skb);
262d642b012SHaijun Liu 	if (shinfo->frag_list)
263d642b012SHaijun Liu 		dev_warn_ratelimited(dpmaif_ctrl->dev, "frag_list not supported\n");
264d642b012SHaijun Liu 
265d642b012SHaijun Liu 	payload_cnt = shinfo->nr_frags + 1;
266d642b012SHaijun Liu 	/* nr_frags: frag cnt, 1: skb->data, 1: msg DRB */
267d642b012SHaijun Liu 	send_cnt = payload_cnt + 1;
268d642b012SHaijun Liu 
269d642b012SHaijun Liu 	spin_lock_irqsave(&txq->tx_lock, flags);
270d642b012SHaijun Liu 	cur_idx = txq->drb_wr_idx;
271d642b012SHaijun Liu 	drb_wr_idx_backup = cur_idx;
272d642b012SHaijun Liu 	txq->drb_wr_idx += send_cnt;
273d642b012SHaijun Liu 	if (txq->drb_wr_idx >= txq->drb_size_cnt)
274d642b012SHaijun Liu 		txq->drb_wr_idx -= txq->drb_size_cnt;
275d642b012SHaijun Liu 	t7xx_setup_msg_drb(dpmaif_ctrl, txq->index, cur_idx, skb->len, 0, skb_cb->netif_idx);
276d642b012SHaijun Liu 	t7xx_record_drb_skb(dpmaif_ctrl, txq->index, cur_idx, skb, true, 0, 0, 0, 0);
277d642b012SHaijun Liu 	spin_unlock_irqrestore(&txq->tx_lock, flags);
278d642b012SHaijun Liu 
279d642b012SHaijun Liu 	for (wr_cnt = 0; wr_cnt < payload_cnt; wr_cnt++) {
280d642b012SHaijun Liu 		bool is_frag, is_last_one = wr_cnt == payload_cnt - 1;
281d642b012SHaijun Liu 		unsigned int data_len;
282d642b012SHaijun Liu 		dma_addr_t bus_addr;
283d642b012SHaijun Liu 		void *data_addr;
284d642b012SHaijun Liu 
285d642b012SHaijun Liu 		if (!wr_cnt) {
286d642b012SHaijun Liu 			data_len = skb_headlen(skb);
287d642b012SHaijun Liu 			data_addr = skb->data;
288d642b012SHaijun Liu 			is_frag = false;
289d642b012SHaijun Liu 		} else {
290d642b012SHaijun Liu 			skb_frag_t *frag = shinfo->frags + wr_cnt - 1;
291d642b012SHaijun Liu 
292d642b012SHaijun Liu 			data_len = skb_frag_size(frag);
293d642b012SHaijun Liu 			data_addr = skb_frag_address(frag);
294d642b012SHaijun Liu 			is_frag = true;
295d642b012SHaijun Liu 		}
296d642b012SHaijun Liu 
297d642b012SHaijun Liu 		bus_addr = dma_map_single(dpmaif_ctrl->dev, data_addr, data_len, DMA_TO_DEVICE);
298d642b012SHaijun Liu 		if (dma_mapping_error(dpmaif_ctrl->dev, bus_addr))
299d642b012SHaijun Liu 			goto unmap_buffers;
300d642b012SHaijun Liu 
301d642b012SHaijun Liu 		cur_idx = t7xx_ring_buf_get_next_wr_idx(txq->drb_size_cnt, cur_idx);
302d642b012SHaijun Liu 
303d642b012SHaijun Liu 		spin_lock_irqsave(&txq->tx_lock, flags);
304d642b012SHaijun Liu 		t7xx_setup_payload_drb(dpmaif_ctrl, txq->index, cur_idx, bus_addr, data_len,
305d642b012SHaijun Liu 				       is_last_one);
306d642b012SHaijun Liu 		t7xx_record_drb_skb(dpmaif_ctrl, txq->index, cur_idx, skb, false, is_frag,
307d642b012SHaijun Liu 				    is_last_one, bus_addr, data_len);
308d642b012SHaijun Liu 		spin_unlock_irqrestore(&txq->tx_lock, flags);
309d642b012SHaijun Liu 	}
310d642b012SHaijun Liu 
311d642b012SHaijun Liu 	if (atomic_sub_return(send_cnt, &txq->tx_budget) <= (MAX_SKB_FRAGS + 2))
312d642b012SHaijun Liu 		cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_FULL, txq->index);
313d642b012SHaijun Liu 
314d642b012SHaijun Liu 	atomic_set(&txq->tx_processing, 0);
315d642b012SHaijun Liu 
316d642b012SHaijun Liu 	return 0;
317d642b012SHaijun Liu 
318d642b012SHaijun Liu unmap_buffers:
319d642b012SHaijun Liu 	while (wr_cnt--) {
320d642b012SHaijun Liu 		struct dpmaif_drb_skb *drb_skb = txq->drb_skb_base;
321d642b012SHaijun Liu 
322d642b012SHaijun Liu 		cur_idx = cur_idx ? cur_idx - 1 : txq->drb_size_cnt - 1;
323d642b012SHaijun Liu 		drb_skb += cur_idx;
324d642b012SHaijun Liu 		dma_unmap_single(dpmaif_ctrl->dev, drb_skb->bus_addr,
325d642b012SHaijun Liu 				 drb_skb->data_len, DMA_TO_DEVICE);
326d642b012SHaijun Liu 	}
327d642b012SHaijun Liu 
328d642b012SHaijun Liu 	txq->drb_wr_idx = drb_wr_idx_backup;
329d642b012SHaijun Liu 	atomic_set(&txq->tx_processing, 0);
330d642b012SHaijun Liu 
331d642b012SHaijun Liu 	return -ENOMEM;
332d642b012SHaijun Liu }
333d642b012SHaijun Liu 
t7xx_tx_lists_are_all_empty(const struct dpmaif_ctrl * dpmaif_ctrl)334d642b012SHaijun Liu static bool t7xx_tx_lists_are_all_empty(const struct dpmaif_ctrl *dpmaif_ctrl)
335d642b012SHaijun Liu {
336d642b012SHaijun Liu 	int i;
337d642b012SHaijun Liu 
338d642b012SHaijun Liu 	for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
339d642b012SHaijun Liu 		if (!skb_queue_empty(&dpmaif_ctrl->txq[i].tx_skb_head))
340d642b012SHaijun Liu 			return false;
341d642b012SHaijun Liu 	}
342d642b012SHaijun Liu 
343d642b012SHaijun Liu 	return true;
344d642b012SHaijun Liu }
345d642b012SHaijun Liu 
346d642b012SHaijun Liu /* Currently, only the default TX queue is used */
t7xx_select_tx_queue(struct dpmaif_ctrl * dpmaif_ctrl)347d642b012SHaijun Liu static struct dpmaif_tx_queue *t7xx_select_tx_queue(struct dpmaif_ctrl *dpmaif_ctrl)
348d642b012SHaijun Liu {
349d642b012SHaijun Liu 	struct dpmaif_tx_queue *txq;
350d642b012SHaijun Liu 
351d642b012SHaijun Liu 	txq = &dpmaif_ctrl->txq[DPMAIF_TX_DEFAULT_QUEUE];
352d642b012SHaijun Liu 	if (!txq->que_started)
353d642b012SHaijun Liu 		return NULL;
354d642b012SHaijun Liu 
355d642b012SHaijun Liu 	return txq;
356d642b012SHaijun Liu }
357d642b012SHaijun Liu 
t7xx_txq_drb_wr_available(struct dpmaif_tx_queue * txq)358d642b012SHaijun Liu static unsigned int t7xx_txq_drb_wr_available(struct dpmaif_tx_queue *txq)
359d642b012SHaijun Liu {
360d642b012SHaijun Liu 	return t7xx_ring_buf_rd_wr_count(txq->drb_size_cnt, txq->drb_release_rd_idx,
361d642b012SHaijun Liu 					 txq->drb_wr_idx, DPMAIF_WRITE);
362d642b012SHaijun Liu }
363d642b012SHaijun Liu 
t7xx_skb_drb_cnt(struct sk_buff * skb)364d642b012SHaijun Liu static unsigned int t7xx_skb_drb_cnt(struct sk_buff *skb)
365d642b012SHaijun Liu {
366d642b012SHaijun Liu 	/* Normal DRB (frags data + skb linear data) + msg DRB */
367d642b012SHaijun Liu 	return skb_shinfo(skb)->nr_frags + 2;
368d642b012SHaijun Liu }
369d642b012SHaijun Liu 
t7xx_txq_burst_send_skb(struct dpmaif_tx_queue * txq)370d642b012SHaijun Liu static int t7xx_txq_burst_send_skb(struct dpmaif_tx_queue *txq)
371d642b012SHaijun Liu {
372d642b012SHaijun Liu 	unsigned int drb_remain_cnt, i;
373d642b012SHaijun Liu 	unsigned int send_drb_cnt;
374d642b012SHaijun Liu 	int drb_cnt = 0;
375d642b012SHaijun Liu 	int ret = 0;
376d642b012SHaijun Liu 
377d642b012SHaijun Liu 	drb_remain_cnt = t7xx_txq_drb_wr_available(txq);
378d642b012SHaijun Liu 
379d642b012SHaijun Liu 	for (i = 0; i < DPMAIF_SKB_TX_BURST_CNT; i++) {
380d642b012SHaijun Liu 		struct sk_buff *skb;
381d642b012SHaijun Liu 
382d642b012SHaijun Liu 		skb = skb_peek(&txq->tx_skb_head);
383d642b012SHaijun Liu 		if (!skb)
384d642b012SHaijun Liu 			break;
385d642b012SHaijun Liu 
386d642b012SHaijun Liu 		send_drb_cnt = t7xx_skb_drb_cnt(skb);
387d642b012SHaijun Liu 		if (drb_remain_cnt < send_drb_cnt) {
388d642b012SHaijun Liu 			drb_remain_cnt = t7xx_txq_drb_wr_available(txq);
389d642b012SHaijun Liu 			continue;
390d642b012SHaijun Liu 		}
391d642b012SHaijun Liu 
392d642b012SHaijun Liu 		drb_remain_cnt -= send_drb_cnt;
393d642b012SHaijun Liu 
394d642b012SHaijun Liu 		ret = t7xx_dpmaif_add_skb_to_ring(txq->dpmaif_ctrl, skb);
395d642b012SHaijun Liu 		if (ret < 0) {
396d642b012SHaijun Liu 			dev_err(txq->dpmaif_ctrl->dev,
397d642b012SHaijun Liu 				"Failed to add skb to device's ring: %d\n", ret);
398d642b012SHaijun Liu 			break;
399d642b012SHaijun Liu 		}
400d642b012SHaijun Liu 
401d642b012SHaijun Liu 		drb_cnt += send_drb_cnt;
402d642b012SHaijun Liu 		skb_unlink(skb, &txq->tx_skb_head);
403d642b012SHaijun Liu 	}
404d642b012SHaijun Liu 
405d642b012SHaijun Liu 	if (drb_cnt > 0)
406d642b012SHaijun Liu 		return drb_cnt;
407d642b012SHaijun Liu 
408d642b012SHaijun Liu 	return ret;
409d642b012SHaijun Liu }
410d642b012SHaijun Liu 
t7xx_do_tx_hw_push(struct dpmaif_ctrl * dpmaif_ctrl)411d642b012SHaijun Liu static void t7xx_do_tx_hw_push(struct dpmaif_ctrl *dpmaif_ctrl)
412d642b012SHaijun Liu {
413de49ea38SHaijun Liu 	bool wait_disable_sleep = true;
414de49ea38SHaijun Liu 
415d642b012SHaijun Liu 	do {
416d642b012SHaijun Liu 		struct dpmaif_tx_queue *txq;
417d642b012SHaijun Liu 		int drb_send_cnt;
418d642b012SHaijun Liu 
419d642b012SHaijun Liu 		txq = t7xx_select_tx_queue(dpmaif_ctrl);
420d642b012SHaijun Liu 		if (!txq)
421d642b012SHaijun Liu 			return;
422d642b012SHaijun Liu 
423d642b012SHaijun Liu 		drb_send_cnt = t7xx_txq_burst_send_skb(txq);
424d642b012SHaijun Liu 		if (drb_send_cnt <= 0) {
425d642b012SHaijun Liu 			usleep_range(10, 20);
426d642b012SHaijun Liu 			cond_resched();
427d642b012SHaijun Liu 			continue;
428d642b012SHaijun Liu 		}
429d642b012SHaijun Liu 
430de49ea38SHaijun Liu 		/* Wait for the PCIe resource to unlock */
431de49ea38SHaijun Liu 		if (wait_disable_sleep) {
432de49ea38SHaijun Liu 			if (!t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev))
433de49ea38SHaijun Liu 				return;
434de49ea38SHaijun Liu 
435de49ea38SHaijun Liu 			wait_disable_sleep = false;
436de49ea38SHaijun Liu 		}
437de49ea38SHaijun Liu 
438d642b012SHaijun Liu 		t7xx_dpmaif_ul_update_hw_drb_cnt(&dpmaif_ctrl->hw_info, txq->index,
439d642b012SHaijun Liu 						 drb_send_cnt * DPMAIF_UL_DRB_SIZE_WORD);
440d642b012SHaijun Liu 
441d642b012SHaijun Liu 		cond_resched();
442d642b012SHaijun Liu 	} while (!t7xx_tx_lists_are_all_empty(dpmaif_ctrl) && !kthread_should_stop() &&
443d642b012SHaijun Liu 		 (dpmaif_ctrl->state == DPMAIF_STATE_PWRON));
444d642b012SHaijun Liu }
445d642b012SHaijun Liu 
t7xx_dpmaif_tx_hw_push_thread(void * arg)446d642b012SHaijun Liu static int t7xx_dpmaif_tx_hw_push_thread(void *arg)
447d642b012SHaijun Liu {
448d642b012SHaijun Liu 	struct dpmaif_ctrl *dpmaif_ctrl = arg;
449d10b3a69SHaijun Liu 	int ret;
450d642b012SHaijun Liu 
451d642b012SHaijun Liu 	while (!kthread_should_stop()) {
452d642b012SHaijun Liu 		if (t7xx_tx_lists_are_all_empty(dpmaif_ctrl) ||
453d642b012SHaijun Liu 		    dpmaif_ctrl->state != DPMAIF_STATE_PWRON) {
454d642b012SHaijun Liu 			if (wait_event_interruptible(dpmaif_ctrl->tx_wq,
455d642b012SHaijun Liu 						     (!t7xx_tx_lists_are_all_empty(dpmaif_ctrl) &&
456d642b012SHaijun Liu 						     dpmaif_ctrl->state == DPMAIF_STATE_PWRON) ||
457d642b012SHaijun Liu 						     kthread_should_stop()))
458d642b012SHaijun Liu 				continue;
459d642b012SHaijun Liu 
460d642b012SHaijun Liu 			if (kthread_should_stop())
461d642b012SHaijun Liu 				break;
462d642b012SHaijun Liu 		}
463d642b012SHaijun Liu 
464d10b3a69SHaijun Liu 		ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev);
465d10b3a69SHaijun Liu 		if (ret < 0 && ret != -EACCES)
466d10b3a69SHaijun Liu 			return ret;
467d10b3a69SHaijun Liu 
468de49ea38SHaijun Liu 		t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev);
469d642b012SHaijun Liu 		t7xx_do_tx_hw_push(dpmaif_ctrl);
470de49ea38SHaijun Liu 		t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev);
471d10b3a69SHaijun Liu 		pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
472d10b3a69SHaijun Liu 		pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
473d642b012SHaijun Liu 	}
474d642b012SHaijun Liu 
475d642b012SHaijun Liu 	return 0;
476d642b012SHaijun Liu }
477d642b012SHaijun Liu 
t7xx_dpmaif_tx_thread_init(struct dpmaif_ctrl * dpmaif_ctrl)478d642b012SHaijun Liu int t7xx_dpmaif_tx_thread_init(struct dpmaif_ctrl *dpmaif_ctrl)
479d642b012SHaijun Liu {
480d642b012SHaijun Liu 	init_waitqueue_head(&dpmaif_ctrl->tx_wq);
481d642b012SHaijun Liu 	dpmaif_ctrl->tx_thread = kthread_run(t7xx_dpmaif_tx_hw_push_thread,
482d642b012SHaijun Liu 					     dpmaif_ctrl, "dpmaif_tx_hw_push");
483d642b012SHaijun Liu 	return PTR_ERR_OR_ZERO(dpmaif_ctrl->tx_thread);
484d642b012SHaijun Liu }
485d642b012SHaijun Liu 
t7xx_dpmaif_tx_thread_rel(struct dpmaif_ctrl * dpmaif_ctrl)486d642b012SHaijun Liu void t7xx_dpmaif_tx_thread_rel(struct dpmaif_ctrl *dpmaif_ctrl)
487d642b012SHaijun Liu {
488d642b012SHaijun Liu 	if (dpmaif_ctrl->tx_thread)
489d642b012SHaijun Liu 		kthread_stop(dpmaif_ctrl->tx_thread);
490d642b012SHaijun Liu }
491d642b012SHaijun Liu 
492d642b012SHaijun Liu /**
493d642b012SHaijun Liu  * t7xx_dpmaif_tx_send_skb() - Add skb to the transmit queue.
494d642b012SHaijun Liu  * @dpmaif_ctrl: Pointer to struct dpmaif_ctrl.
495d642b012SHaijun Liu  * @txq_number: Queue number to xmit on.
496d642b012SHaijun Liu  * @skb: Pointer to the skb to transmit.
497d642b012SHaijun Liu  *
498d642b012SHaijun Liu  * Add the skb to the queue of the skbs to be transmit.
499d642b012SHaijun Liu  * Wake up the thread that push the skbs from the queue to the HW.
500d642b012SHaijun Liu  *
501d642b012SHaijun Liu  * Return:
502d642b012SHaijun Liu  * * 0		- Success.
503d642b012SHaijun Liu  * * -EBUSY	- Tx budget exhausted.
504d642b012SHaijun Liu  *		  In normal circumstances t7xx_dpmaif_add_skb_to_ring() must report the txq full
505d642b012SHaijun Liu  *		  state to prevent this error condition.
506d642b012SHaijun Liu  */
t7xx_dpmaif_tx_send_skb(struct dpmaif_ctrl * dpmaif_ctrl,unsigned int txq_number,struct sk_buff * skb)507d642b012SHaijun Liu int t7xx_dpmaif_tx_send_skb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int txq_number,
508d642b012SHaijun Liu 			    struct sk_buff *skb)
509d642b012SHaijun Liu {
510d642b012SHaijun Liu 	struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[txq_number];
511d642b012SHaijun Liu 	struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks;
512d642b012SHaijun Liu 	struct t7xx_skb_cb *skb_cb;
513d642b012SHaijun Liu 
514d642b012SHaijun Liu 	if (atomic_read(&txq->tx_budget) <= t7xx_skb_drb_cnt(skb)) {
515d642b012SHaijun Liu 		cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_FULL, txq_number);
516d642b012SHaijun Liu 		return -EBUSY;
517d642b012SHaijun Liu 	}
518d642b012SHaijun Liu 
519d642b012SHaijun Liu 	skb_cb = T7XX_SKB_CB(skb);
520d642b012SHaijun Liu 	skb_cb->txq_number = txq_number;
521d642b012SHaijun Liu 	skb_queue_tail(&txq->tx_skb_head, skb);
522d642b012SHaijun Liu 	wake_up(&dpmaif_ctrl->tx_wq);
523d642b012SHaijun Liu 
524d642b012SHaijun Liu 	return 0;
525d642b012SHaijun Liu }
526d642b012SHaijun Liu 
t7xx_dpmaif_irq_tx_done(struct dpmaif_ctrl * dpmaif_ctrl,unsigned int que_mask)527d642b012SHaijun Liu void t7xx_dpmaif_irq_tx_done(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int que_mask)
528d642b012SHaijun Liu {
529d642b012SHaijun Liu 	int i;
530d642b012SHaijun Liu 
531d642b012SHaijun Liu 	for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
532d642b012SHaijun Liu 		if (que_mask & BIT(i))
533d642b012SHaijun Liu 			queue_work(dpmaif_ctrl->txq[i].worker, &dpmaif_ctrl->txq[i].dpmaif_tx_work);
534d642b012SHaijun Liu 	}
535d642b012SHaijun Liu }
536d642b012SHaijun Liu 
t7xx_dpmaif_tx_drb_buf_init(struct dpmaif_tx_queue * txq)537d642b012SHaijun Liu static int t7xx_dpmaif_tx_drb_buf_init(struct dpmaif_tx_queue *txq)
538d642b012SHaijun Liu {
539d642b012SHaijun Liu 	size_t brb_skb_size, brb_pd_size;
540d642b012SHaijun Liu 
541d642b012SHaijun Liu 	brb_pd_size = DPMAIF_DRB_LIST_LEN * sizeof(struct dpmaif_drb);
542d642b012SHaijun Liu 	brb_skb_size = DPMAIF_DRB_LIST_LEN * sizeof(struct dpmaif_drb_skb);
543d642b012SHaijun Liu 
544d642b012SHaijun Liu 	txq->drb_size_cnt = DPMAIF_DRB_LIST_LEN;
545d642b012SHaijun Liu 
546d642b012SHaijun Liu 	/* For HW && AP SW */
547d642b012SHaijun Liu 	txq->drb_base = dma_alloc_coherent(txq->dpmaif_ctrl->dev, brb_pd_size,
548d642b012SHaijun Liu 					   &txq->drb_bus_addr, GFP_KERNEL | __GFP_ZERO);
549d642b012SHaijun Liu 	if (!txq->drb_base)
550d642b012SHaijun Liu 		return -ENOMEM;
551d642b012SHaijun Liu 
552d642b012SHaijun Liu 	/* For AP SW to record the skb information */
553d642b012SHaijun Liu 	txq->drb_skb_base = devm_kzalloc(txq->dpmaif_ctrl->dev, brb_skb_size, GFP_KERNEL);
554d642b012SHaijun Liu 	if (!txq->drb_skb_base) {
555d642b012SHaijun Liu 		dma_free_coherent(txq->dpmaif_ctrl->dev, brb_pd_size,
556d642b012SHaijun Liu 				  txq->drb_base, txq->drb_bus_addr);
557d642b012SHaijun Liu 		return -ENOMEM;
558d642b012SHaijun Liu 	}
559d642b012SHaijun Liu 
560d642b012SHaijun Liu 	return 0;
561d642b012SHaijun Liu }
562d642b012SHaijun Liu 
t7xx_dpmaif_tx_free_drb_skb(struct dpmaif_tx_queue * txq)563d642b012SHaijun Liu static void t7xx_dpmaif_tx_free_drb_skb(struct dpmaif_tx_queue *txq)
564d642b012SHaijun Liu {
565d642b012SHaijun Liu 	struct dpmaif_drb_skb *drb_skb, *drb_skb_base = txq->drb_skb_base;
566d642b012SHaijun Liu 	unsigned int i;
567d642b012SHaijun Liu 
568d642b012SHaijun Liu 	if (!drb_skb_base)
569d642b012SHaijun Liu 		return;
570d642b012SHaijun Liu 
571d642b012SHaijun Liu 	for (i = 0; i < txq->drb_size_cnt; i++) {
572d642b012SHaijun Liu 		drb_skb = drb_skb_base + i;
573d642b012SHaijun Liu 		if (!drb_skb->skb)
574d642b012SHaijun Liu 			continue;
575d642b012SHaijun Liu 
576d642b012SHaijun Liu 		if (!drb_skb->is_msg)
577d642b012SHaijun Liu 			dma_unmap_single(txq->dpmaif_ctrl->dev, drb_skb->bus_addr,
578d642b012SHaijun Liu 					 drb_skb->data_len, DMA_TO_DEVICE);
579d642b012SHaijun Liu 
580d642b012SHaijun Liu 		if (drb_skb->is_last) {
581d642b012SHaijun Liu 			dev_kfree_skb(drb_skb->skb);
582d642b012SHaijun Liu 			drb_skb->skb = NULL;
583d642b012SHaijun Liu 		}
584d642b012SHaijun Liu 	}
585d642b012SHaijun Liu }
586d642b012SHaijun Liu 
t7xx_dpmaif_tx_drb_buf_rel(struct dpmaif_tx_queue * txq)587d642b012SHaijun Liu static void t7xx_dpmaif_tx_drb_buf_rel(struct dpmaif_tx_queue *txq)
588d642b012SHaijun Liu {
589d642b012SHaijun Liu 	if (txq->drb_base)
590d642b012SHaijun Liu 		dma_free_coherent(txq->dpmaif_ctrl->dev,
591d642b012SHaijun Liu 				  txq->drb_size_cnt * sizeof(struct dpmaif_drb),
592d642b012SHaijun Liu 				  txq->drb_base, txq->drb_bus_addr);
593d642b012SHaijun Liu 
594d642b012SHaijun Liu 	t7xx_dpmaif_tx_free_drb_skb(txq);
595d642b012SHaijun Liu }
596d642b012SHaijun Liu 
597d642b012SHaijun Liu /**
598d642b012SHaijun Liu  * t7xx_dpmaif_txq_init() - Initialize TX queue.
599d642b012SHaijun Liu  * @txq: Pointer to struct dpmaif_tx_queue.
600d642b012SHaijun Liu  *
601d642b012SHaijun Liu  * Initialize the TX queue data structure and allocate memory for it to use.
602d642b012SHaijun Liu  *
603d642b012SHaijun Liu  * Return:
604d642b012SHaijun Liu  * * 0		- Success.
605d642b012SHaijun Liu  * * -ERROR	- Error code from failure sub-initializations.
606d642b012SHaijun Liu  */
t7xx_dpmaif_txq_init(struct dpmaif_tx_queue * txq)607d642b012SHaijun Liu int t7xx_dpmaif_txq_init(struct dpmaif_tx_queue *txq)
608d642b012SHaijun Liu {
609d642b012SHaijun Liu 	int ret;
610d642b012SHaijun Liu 
611d642b012SHaijun Liu 	skb_queue_head_init(&txq->tx_skb_head);
612d642b012SHaijun Liu 	init_waitqueue_head(&txq->req_wq);
613d642b012SHaijun Liu 	atomic_set(&txq->tx_budget, DPMAIF_DRB_LIST_LEN);
614d642b012SHaijun Liu 
615d642b012SHaijun Liu 	ret = t7xx_dpmaif_tx_drb_buf_init(txq);
616d642b012SHaijun Liu 	if (ret) {
617d642b012SHaijun Liu 		dev_err(txq->dpmaif_ctrl->dev, "Failed to initialize DRB buffers: %d\n", ret);
618d642b012SHaijun Liu 		return ret;
619d642b012SHaijun Liu 	}
620d642b012SHaijun Liu 
621*72b1fe6cSTejun Heo 	txq->worker = alloc_ordered_workqueue("md_dpmaif_tx%d_worker",
622*72b1fe6cSTejun Heo 				WQ_MEM_RECLAIM | (txq->index ? 0 : WQ_HIGHPRI),
623*72b1fe6cSTejun Heo 				txq->index);
624d642b012SHaijun Liu 	if (!txq->worker)
625d642b012SHaijun Liu 		return -ENOMEM;
626d642b012SHaijun Liu 
627d642b012SHaijun Liu 	INIT_WORK(&txq->dpmaif_tx_work, t7xx_dpmaif_tx_done);
628d642b012SHaijun Liu 	spin_lock_init(&txq->tx_lock);
629d642b012SHaijun Liu 
630d642b012SHaijun Liu 	return 0;
631d642b012SHaijun Liu }
632d642b012SHaijun Liu 
t7xx_dpmaif_txq_free(struct dpmaif_tx_queue * txq)633d642b012SHaijun Liu void t7xx_dpmaif_txq_free(struct dpmaif_tx_queue *txq)
634d642b012SHaijun Liu {
635d642b012SHaijun Liu 	if (txq->worker)
636d642b012SHaijun Liu 		destroy_workqueue(txq->worker);
637d642b012SHaijun Liu 
638d642b012SHaijun Liu 	skb_queue_purge(&txq->tx_skb_head);
639d642b012SHaijun Liu 	t7xx_dpmaif_tx_drb_buf_rel(txq);
640d642b012SHaijun Liu }
641d642b012SHaijun Liu 
t7xx_dpmaif_tx_stop(struct dpmaif_ctrl * dpmaif_ctrl)642d642b012SHaijun Liu void t7xx_dpmaif_tx_stop(struct dpmaif_ctrl *dpmaif_ctrl)
643d642b012SHaijun Liu {
644d642b012SHaijun Liu 	int i;
645d642b012SHaijun Liu 
646d642b012SHaijun Liu 	for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
647d642b012SHaijun Liu 		struct dpmaif_tx_queue *txq;
648d642b012SHaijun Liu 		int count = 0;
649d642b012SHaijun Liu 
650d642b012SHaijun Liu 		txq = &dpmaif_ctrl->txq[i];
651d642b012SHaijun Liu 		txq->que_started = false;
652d642b012SHaijun Liu 		/* Make sure TXQ is disabled */
653d642b012SHaijun Liu 		smp_mb();
654d642b012SHaijun Liu 
655d642b012SHaijun Liu 		/* Wait for active Tx to be done */
656d642b012SHaijun Liu 		while (atomic_read(&txq->tx_processing)) {
657d642b012SHaijun Liu 			if (++count >= DPMAIF_MAX_CHECK_COUNT) {
658d642b012SHaijun Liu 				dev_err(dpmaif_ctrl->dev, "TX queue stop failed\n");
659d642b012SHaijun Liu 				break;
660d642b012SHaijun Liu 			}
661d642b012SHaijun Liu 		}
662d642b012SHaijun Liu 	}
663d642b012SHaijun Liu }
664d642b012SHaijun Liu 
t7xx_dpmaif_txq_flush_rel(struct dpmaif_tx_queue * txq)665d642b012SHaijun Liu static void t7xx_dpmaif_txq_flush_rel(struct dpmaif_tx_queue *txq)
666d642b012SHaijun Liu {
667d642b012SHaijun Liu 	txq->que_started = false;
668d642b012SHaijun Liu 
669d642b012SHaijun Liu 	cancel_work_sync(&txq->dpmaif_tx_work);
670d642b012SHaijun Liu 	flush_work(&txq->dpmaif_tx_work);
671d642b012SHaijun Liu 	t7xx_dpmaif_tx_free_drb_skb(txq);
672d642b012SHaijun Liu 
673d642b012SHaijun Liu 	txq->drb_rd_idx = 0;
674d642b012SHaijun Liu 	txq->drb_wr_idx = 0;
675d642b012SHaijun Liu 	txq->drb_release_rd_idx = 0;
676d642b012SHaijun Liu }
677d642b012SHaijun Liu 
t7xx_dpmaif_tx_clear(struct dpmaif_ctrl * dpmaif_ctrl)678d642b012SHaijun Liu void t7xx_dpmaif_tx_clear(struct dpmaif_ctrl *dpmaif_ctrl)
679d642b012SHaijun Liu {
680d642b012SHaijun Liu 	int i;
681d642b012SHaijun Liu 
682d642b012SHaijun Liu 	for (i = 0; i < DPMAIF_TXQ_NUM; i++)
683d642b012SHaijun Liu 		t7xx_dpmaif_txq_flush_rel(&dpmaif_ctrl->txq[i]);
684d642b012SHaijun Liu }
685