1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2021, MediaTek Inc.
4  * Copyright (c) 2021-2022, Intel Corporation.
5  *
6  * Authors:
7  *  Amir Hanania <amir.hanania@intel.com>
8  *  Haijun Liu <haijun.liu@mediatek.com>
9  *  Eliot Lee <eliot.lee@intel.com>
10  *  Moises Veleta <moises.veleta@intel.com>
11  *  Ricardo Martinez <ricardo.martinez@linux.intel.com>
12  *
13  * Contributors:
14  *  Andy Shevchenko <andriy.shevchenko@linux.intel.com>
15  *  Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
16  *  Sreehari Kancharla <sreehari.kancharla@intel.com>
17  */
18 
19 #include <linux/atomic.h>
20 #include <linux/bitfield.h>
21 #include <linux/bitops.h>
22 #include <linux/device.h>
23 #include <linux/dma-direction.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/gfp.h>
26 #include <linux/err.h>
27 #include <linux/iopoll.h>
28 #include <linux/jiffies.h>
29 #include <linux/kernel.h>
30 #include <linux/kthread.h>
31 #include <linux/list.h>
32 #include <linux/minmax.h>
33 #include <linux/mm.h>
34 #include <linux/netdevice.h>
35 #include <linux/pm_runtime.h>
36 #include <linux/sched.h>
37 #include <linux/skbuff.h>
38 #include <linux/slab.h>
39 #include <linux/spinlock.h>
40 #include <linux/string.h>
41 #include <linux/types.h>
42 #include <linux/wait.h>
43 #include <linux/workqueue.h>
44 
45 #include "t7xx_dpmaif.h"
46 #include "t7xx_hif_dpmaif.h"
47 #include "t7xx_hif_dpmaif_rx.h"
48 #include "t7xx_pci.h"
49 
50 #define DPMAIF_BAT_COUNT		8192
51 #define DPMAIF_FRG_COUNT		4814
52 #define DPMAIF_PIT_COUNT		(DPMAIF_BAT_COUNT * 2)
53 
54 #define DPMAIF_BAT_CNT_THRESHOLD	30
55 #define DPMAIF_PIT_CNT_THRESHOLD	60
56 #define DPMAIF_RX_PUSH_THRESHOLD_MASK	GENMASK(2, 0)
57 #define DPMAIF_NOTIFY_RELEASE_COUNT	128
58 #define DPMAIF_POLL_PIT_TIME_US		20
59 #define DPMAIF_POLL_PIT_MAX_TIME_US	2000
60 #define DPMAIF_WQ_TIME_LIMIT_MS		2
61 #define DPMAIF_CS_RESULT_PASS		0
62 
63 /* Packet type */
64 #define DES_PT_PD			0
65 #define DES_PT_MSG			1
66 /* Buffer type */
67 #define PKT_BUF_FRAG			1
68 
69 static unsigned int t7xx_normal_pit_bid(const struct dpmaif_pit *pit_info)
70 {
71 	u32 value;
72 
73 	value = FIELD_GET(PD_PIT_H_BID, le32_to_cpu(pit_info->pd.footer));
74 	value <<= 13;
75 	value += FIELD_GET(PD_PIT_BUFFER_ID, le32_to_cpu(pit_info->header));
76 	return value;
77 }
78 
79 static int t7xx_dpmaif_net_rx_push_thread(void *arg)
80 {
81 	struct dpmaif_rx_queue *q = arg;
82 	struct dpmaif_ctrl *hif_ctrl;
83 	struct dpmaif_callbacks *cb;
84 
85 	hif_ctrl = q->dpmaif_ctrl;
86 	cb = hif_ctrl->callbacks;
87 
88 	while (!kthread_should_stop()) {
89 		struct sk_buff *skb;
90 		unsigned long flags;
91 
92 		if (skb_queue_empty(&q->skb_list)) {
93 			if (wait_event_interruptible(q->rx_wq,
94 						     !skb_queue_empty(&q->skb_list) ||
95 						     kthread_should_stop()))
96 				continue;
97 
98 			if (kthread_should_stop())
99 				break;
100 		}
101 
102 		spin_lock_irqsave(&q->skb_list.lock, flags);
103 		skb = __skb_dequeue(&q->skb_list);
104 		spin_unlock_irqrestore(&q->skb_list.lock, flags);
105 
106 		if (!skb)
107 			continue;
108 
109 		cb->recv_skb(hif_ctrl->t7xx_dev, skb);
110 		cond_resched();
111 	}
112 
113 	return 0;
114 }
115 
116 static int t7xx_dpmaif_update_bat_wr_idx(struct dpmaif_ctrl *dpmaif_ctrl,
117 					 const unsigned int q_num, const unsigned int bat_cnt)
118 {
119 	struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num];
120 	struct dpmaif_bat_request *bat_req = rxq->bat_req;
121 	unsigned int old_rl_idx, new_wr_idx, old_wr_idx;
122 
123 	if (!rxq->que_started) {
124 		dev_err(dpmaif_ctrl->dev, "RX queue %d has not been started\n", rxq->index);
125 		return -EINVAL;
126 	}
127 
128 	old_rl_idx = bat_req->bat_release_rd_idx;
129 	old_wr_idx = bat_req->bat_wr_idx;
130 	new_wr_idx = old_wr_idx + bat_cnt;
131 
132 	if (old_rl_idx > old_wr_idx && new_wr_idx >= old_rl_idx)
133 		goto err_flow;
134 
135 	if (new_wr_idx >= bat_req->bat_size_cnt) {
136 		new_wr_idx -= bat_req->bat_size_cnt;
137 		if (new_wr_idx >= old_rl_idx)
138 			goto err_flow;
139 	}
140 
141 	bat_req->bat_wr_idx = new_wr_idx;
142 	return 0;
143 
144 err_flow:
145 	dev_err(dpmaif_ctrl->dev, "RX BAT flow check fail\n");
146 	return -EINVAL;
147 }
148 
149 static bool t7xx_alloc_and_map_skb_info(const struct dpmaif_ctrl *dpmaif_ctrl,
150 					const unsigned int size, struct dpmaif_bat_skb *cur_skb)
151 {
152 	dma_addr_t data_bus_addr;
153 	struct sk_buff *skb;
154 	size_t data_len;
155 
156 	skb = __dev_alloc_skb(size, GFP_KERNEL);
157 	if (!skb)
158 		return false;
159 
160 	data_len = skb_data_area_size(skb);
161 	data_bus_addr = dma_map_single(dpmaif_ctrl->dev, skb->data, data_len, DMA_FROM_DEVICE);
162 	if (dma_mapping_error(dpmaif_ctrl->dev, data_bus_addr)) {
163 		dev_err_ratelimited(dpmaif_ctrl->dev, "DMA mapping error\n");
164 		dev_kfree_skb_any(skb);
165 		return false;
166 	}
167 
168 	cur_skb->skb = skb;
169 	cur_skb->data_bus_addr = data_bus_addr;
170 	cur_skb->data_len = data_len;
171 
172 	return true;
173 }
174 
175 static void t7xx_unmap_bat_skb(struct device *dev, struct dpmaif_bat_skb *bat_skb_base,
176 			       unsigned int index)
177 {
178 	struct dpmaif_bat_skb *bat_skb = bat_skb_base + index;
179 
180 	if (bat_skb->skb) {
181 		dma_unmap_single(dev, bat_skb->data_bus_addr, bat_skb->data_len, DMA_FROM_DEVICE);
182 		dev_kfree_skb(bat_skb->skb);
183 		bat_skb->skb = NULL;
184 	}
185 }
186 
187 /**
188  * t7xx_dpmaif_rx_buf_alloc() - Allocate buffers for the BAT ring.
189  * @dpmaif_ctrl: Pointer to DPMAIF context structure.
190  * @bat_req: Pointer to BAT request structure.
191  * @q_num: Queue number.
192  * @buf_cnt: Number of buffers to allocate.
193  * @initial: Indicates if the ring is being populated for the first time.
194  *
195  * Allocate skb and store the start address of the data buffer into the BAT ring.
196  * If this is not the initial call, notify the HW about the new entries.
197  *
198  * Return:
199  * * 0		- Success.
200  * * -ERROR	- Error code.
201  */
202 int t7xx_dpmaif_rx_buf_alloc(struct dpmaif_ctrl *dpmaif_ctrl,
203 			     const struct dpmaif_bat_request *bat_req,
204 			     const unsigned int q_num, const unsigned int buf_cnt,
205 			     const bool initial)
206 {
207 	unsigned int i, bat_cnt, bat_max_cnt, bat_start_idx;
208 	int ret;
209 
210 	if (!buf_cnt || buf_cnt > bat_req->bat_size_cnt)
211 		return -EINVAL;
212 
213 	/* Check BAT buffer space */
214 	bat_max_cnt = bat_req->bat_size_cnt;
215 
216 	bat_cnt = t7xx_ring_buf_rd_wr_count(bat_max_cnt, bat_req->bat_release_rd_idx,
217 					    bat_req->bat_wr_idx, DPMAIF_WRITE);
218 	if (buf_cnt > bat_cnt)
219 		return -ENOMEM;
220 
221 	bat_start_idx = bat_req->bat_wr_idx;
222 
223 	for (i = 0; i < buf_cnt; i++) {
224 		unsigned int cur_bat_idx = bat_start_idx + i;
225 		struct dpmaif_bat_skb *cur_skb;
226 		struct dpmaif_bat *cur_bat;
227 
228 		if (cur_bat_idx >= bat_max_cnt)
229 			cur_bat_idx -= bat_max_cnt;
230 
231 		cur_skb = (struct dpmaif_bat_skb *)bat_req->bat_skb + cur_bat_idx;
232 		if (!cur_skb->skb &&
233 		    !t7xx_alloc_and_map_skb_info(dpmaif_ctrl, bat_req->pkt_buf_sz, cur_skb))
234 			break;
235 
236 		cur_bat = (struct dpmaif_bat *)bat_req->bat_base + cur_bat_idx;
237 		cur_bat->buffer_addr_ext = upper_32_bits(cur_skb->data_bus_addr);
238 		cur_bat->p_buffer_addr = lower_32_bits(cur_skb->data_bus_addr);
239 	}
240 
241 	if (!i)
242 		return -ENOMEM;
243 
244 	ret = t7xx_dpmaif_update_bat_wr_idx(dpmaif_ctrl, q_num, i);
245 	if (ret)
246 		goto err_unmap_skbs;
247 
248 	if (!initial) {
249 		unsigned int hw_wr_idx;
250 
251 		ret = t7xx_dpmaif_dl_snd_hw_bat_cnt(&dpmaif_ctrl->hw_info, i);
252 		if (ret)
253 			goto err_unmap_skbs;
254 
255 		hw_wr_idx = t7xx_dpmaif_dl_get_bat_wr_idx(&dpmaif_ctrl->hw_info,
256 							  DPF_RX_QNO_DFT);
257 		if (hw_wr_idx != bat_req->bat_wr_idx) {
258 			ret = -EFAULT;
259 			dev_err(dpmaif_ctrl->dev, "Write index mismatch in RX ring\n");
260 			goto err_unmap_skbs;
261 		}
262 	}
263 
264 	return 0;
265 
266 err_unmap_skbs:
267 	while (--i > 0)
268 		t7xx_unmap_bat_skb(dpmaif_ctrl->dev, bat_req->bat_skb, i);
269 
270 	return ret;
271 }
272 
273 static int t7xx_dpmaifq_release_pit_entry(struct dpmaif_rx_queue *rxq,
274 					  const unsigned int rel_entry_num)
275 {
276 	struct dpmaif_hw_info *hw_info = &rxq->dpmaif_ctrl->hw_info;
277 	unsigned int old_rel_idx, new_rel_idx, hw_wr_idx;
278 	int ret;
279 
280 	if (!rxq->que_started)
281 		return 0;
282 
283 	if (rel_entry_num >= rxq->pit_size_cnt) {
284 		dev_err(rxq->dpmaif_ctrl->dev, "Invalid PIT release index\n");
285 		return -EINVAL;
286 	}
287 
288 	old_rel_idx = rxq->pit_release_rd_idx;
289 	new_rel_idx = old_rel_idx + rel_entry_num;
290 	hw_wr_idx = rxq->pit_wr_idx;
291 	if (hw_wr_idx < old_rel_idx && new_rel_idx >= rxq->pit_size_cnt)
292 		new_rel_idx -= rxq->pit_size_cnt;
293 
294 	ret = t7xx_dpmaif_dlq_add_pit_remain_cnt(hw_info, rxq->index, rel_entry_num);
295 	if (ret) {
296 		dev_err(rxq->dpmaif_ctrl->dev, "PIT release failure: %d\n", ret);
297 		return ret;
298 	}
299 
300 	rxq->pit_release_rd_idx = new_rel_idx;
301 	return 0;
302 }
303 
304 static void t7xx_dpmaif_set_bat_mask(struct dpmaif_bat_request *bat_req, unsigned int idx)
305 {
306 	unsigned long flags;
307 
308 	spin_lock_irqsave(&bat_req->mask_lock, flags);
309 	set_bit(idx, bat_req->bat_bitmap);
310 	spin_unlock_irqrestore(&bat_req->mask_lock, flags);
311 }
312 
313 static int t7xx_frag_bat_cur_bid_check(struct dpmaif_rx_queue *rxq,
314 				       const unsigned int cur_bid)
315 {
316 	struct dpmaif_bat_request *bat_frag = rxq->bat_frag;
317 	struct dpmaif_bat_page *bat_page;
318 
319 	if (cur_bid >= DPMAIF_FRG_COUNT)
320 		return -EINVAL;
321 
322 	bat_page = bat_frag->bat_skb + cur_bid;
323 	if (!bat_page->page)
324 		return -EINVAL;
325 
326 	return 0;
327 }
328 
329 static void t7xx_unmap_bat_page(struct device *dev, struct dpmaif_bat_page *bat_page_base,
330 				unsigned int index)
331 {
332 	struct dpmaif_bat_page *bat_page = bat_page_base + index;
333 
334 	if (bat_page->page) {
335 		dma_unmap_page(dev, bat_page->data_bus_addr, bat_page->data_len, DMA_FROM_DEVICE);
336 		put_page(bat_page->page);
337 		bat_page->page = NULL;
338 	}
339 }
340 
341 /**
342  * t7xx_dpmaif_rx_frag_alloc() - Allocates buffers for the Fragment BAT ring.
343  * @dpmaif_ctrl: Pointer to DPMAIF context structure.
344  * @bat_req: Pointer to BAT request structure.
345  * @buf_cnt: Number of buffers to allocate.
346  * @initial: Indicates if the ring is being populated for the first time.
347  *
348  * Fragment BAT is used when the received packet does not fit in a normal BAT entry.
349  * This function allocates a page fragment and stores the start address of the page
350  * into the Fragment BAT ring.
351  * If this is not the initial call, notify the HW about the new entries.
352  *
353  * Return:
354  * * 0		- Success.
355  * * -ERROR	- Error code.
356  */
357 int t7xx_dpmaif_rx_frag_alloc(struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req,
358 			      const unsigned int buf_cnt, const bool initial)
359 {
360 	unsigned int buf_space, cur_bat_idx = bat_req->bat_wr_idx;
361 	struct dpmaif_bat_page *bat_skb = bat_req->bat_skb;
362 	int ret = 0, i;
363 
364 	if (!buf_cnt || buf_cnt > bat_req->bat_size_cnt)
365 		return -EINVAL;
366 
367 	buf_space = t7xx_ring_buf_rd_wr_count(bat_req->bat_size_cnt,
368 					      bat_req->bat_release_rd_idx, bat_req->bat_wr_idx,
369 					      DPMAIF_WRITE);
370 	if (buf_cnt > buf_space) {
371 		dev_err(dpmaif_ctrl->dev,
372 			"Requested more buffers than the space available in RX frag ring\n");
373 		return -EINVAL;
374 	}
375 
376 	for (i = 0; i < buf_cnt; i++) {
377 		struct dpmaif_bat_page *cur_page = bat_skb + cur_bat_idx;
378 		struct dpmaif_bat *cur_bat;
379 		dma_addr_t data_base_addr;
380 
381 		if (!cur_page->page) {
382 			unsigned long offset;
383 			struct page *page;
384 			void *data;
385 
386 			data = netdev_alloc_frag(bat_req->pkt_buf_sz);
387 			if (!data)
388 				break;
389 
390 			page = virt_to_head_page(data);
391 			offset = data - page_address(page);
392 
393 			data_base_addr = dma_map_page(dpmaif_ctrl->dev, page, offset,
394 						      bat_req->pkt_buf_sz, DMA_FROM_DEVICE);
395 			if (dma_mapping_error(dpmaif_ctrl->dev, data_base_addr)) {
396 				put_page(virt_to_head_page(data));
397 				dev_err(dpmaif_ctrl->dev, "DMA mapping fail\n");
398 				break;
399 			}
400 
401 			cur_page->page = page;
402 			cur_page->data_bus_addr = data_base_addr;
403 			cur_page->offset = offset;
404 			cur_page->data_len = bat_req->pkt_buf_sz;
405 		}
406 
407 		data_base_addr = cur_page->data_bus_addr;
408 		cur_bat = (struct dpmaif_bat *)bat_req->bat_base + cur_bat_idx;
409 		cur_bat->buffer_addr_ext = upper_32_bits(data_base_addr);
410 		cur_bat->p_buffer_addr = lower_32_bits(data_base_addr);
411 		cur_bat_idx = t7xx_ring_buf_get_next_wr_idx(bat_req->bat_size_cnt, cur_bat_idx);
412 	}
413 
414 	bat_req->bat_wr_idx = cur_bat_idx;
415 
416 	if (!initial)
417 		t7xx_dpmaif_dl_snd_hw_frg_cnt(&dpmaif_ctrl->hw_info, i);
418 
419 	if (i < buf_cnt) {
420 		ret = -ENOMEM;
421 		if (initial) {
422 			while (--i > 0)
423 				t7xx_unmap_bat_page(dpmaif_ctrl->dev, bat_req->bat_skb, i);
424 		}
425 	}
426 
427 	return ret;
428 }
429 
430 static int t7xx_dpmaif_set_frag_to_skb(const struct dpmaif_rx_queue *rxq,
431 				       const struct dpmaif_pit *pkt_info,
432 				       struct sk_buff *skb)
433 {
434 	unsigned long long data_bus_addr, data_base_addr;
435 	struct device *dev = rxq->dpmaif_ctrl->dev;
436 	struct dpmaif_bat_page *page_info;
437 	unsigned int data_len;
438 	int data_offset;
439 
440 	page_info = rxq->bat_frag->bat_skb;
441 	page_info += t7xx_normal_pit_bid(pkt_info);
442 	dma_unmap_page(dev, page_info->data_bus_addr, page_info->data_len, DMA_FROM_DEVICE);
443 
444 	if (!page_info->page)
445 		return -EINVAL;
446 
447 	data_bus_addr = le32_to_cpu(pkt_info->pd.data_addr_h);
448 	data_bus_addr = (data_bus_addr << 32) + le32_to_cpu(pkt_info->pd.data_addr_l);
449 	data_base_addr = page_info->data_bus_addr;
450 	data_offset = data_bus_addr - data_base_addr;
451 	data_offset += page_info->offset;
452 	data_len = FIELD_GET(PD_PIT_DATA_LEN, le32_to_cpu(pkt_info->header));
453 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page_info->page,
454 			data_offset, data_len, page_info->data_len);
455 
456 	page_info->page = NULL;
457 	page_info->offset = 0;
458 	page_info->data_len = 0;
459 	return 0;
460 }
461 
462 static int t7xx_dpmaif_get_frag(struct dpmaif_rx_queue *rxq,
463 				const struct dpmaif_pit *pkt_info,
464 				const struct dpmaif_cur_rx_skb_info *skb_info)
465 {
466 	unsigned int cur_bid = t7xx_normal_pit_bid(pkt_info);
467 	int ret;
468 
469 	ret = t7xx_frag_bat_cur_bid_check(rxq, cur_bid);
470 	if (ret < 0)
471 		return ret;
472 
473 	ret = t7xx_dpmaif_set_frag_to_skb(rxq, pkt_info, skb_info->cur_skb);
474 	if (ret < 0) {
475 		dev_err(rxq->dpmaif_ctrl->dev, "Failed to set frag data to skb: %d\n", ret);
476 		return ret;
477 	}
478 
479 	t7xx_dpmaif_set_bat_mask(rxq->bat_frag, cur_bid);
480 	return 0;
481 }
482 
483 static int t7xx_bat_cur_bid_check(struct dpmaif_rx_queue *rxq, const unsigned int cur_bid)
484 {
485 	struct dpmaif_bat_skb *bat_skb = rxq->bat_req->bat_skb;
486 
487 	bat_skb += cur_bid;
488 	if (cur_bid >= DPMAIF_BAT_COUNT || !bat_skb->skb)
489 		return -EINVAL;
490 
491 	return 0;
492 }
493 
494 static int t7xx_dpmaif_read_pit_seq(const struct dpmaif_pit *pit)
495 {
496 	return FIELD_GET(PD_PIT_PIT_SEQ, le32_to_cpu(pit->pd.footer));
497 }
498 
499 static int t7xx_dpmaif_check_pit_seq(struct dpmaif_rx_queue *rxq,
500 				     const struct dpmaif_pit *pit)
501 {
502 	unsigned int cur_pit_seq, expect_pit_seq = rxq->expect_pit_seq;
503 
504 	if (read_poll_timeout_atomic(t7xx_dpmaif_read_pit_seq, cur_pit_seq,
505 				     cur_pit_seq == expect_pit_seq, DPMAIF_POLL_PIT_TIME_US,
506 				     DPMAIF_POLL_PIT_MAX_TIME_US, false, pit))
507 		return -EFAULT;
508 
509 	rxq->expect_pit_seq++;
510 	if (rxq->expect_pit_seq >= DPMAIF_DL_PIT_SEQ_VALUE)
511 		rxq->expect_pit_seq = 0;
512 
513 	return 0;
514 }
515 
516 static unsigned int t7xx_dpmaif_avail_pkt_bat_cnt(struct dpmaif_bat_request *bat_req)
517 {
518 	unsigned int zero_index;
519 	unsigned long flags;
520 
521 	spin_lock_irqsave(&bat_req->mask_lock, flags);
522 
523 	zero_index = find_next_zero_bit(bat_req->bat_bitmap, bat_req->bat_size_cnt,
524 					bat_req->bat_release_rd_idx);
525 
526 	if (zero_index < bat_req->bat_size_cnt) {
527 		spin_unlock_irqrestore(&bat_req->mask_lock, flags);
528 		return zero_index - bat_req->bat_release_rd_idx;
529 	}
530 
531 	/* limiting the search till bat_release_rd_idx */
532 	zero_index = find_first_zero_bit(bat_req->bat_bitmap, bat_req->bat_release_rd_idx);
533 	spin_unlock_irqrestore(&bat_req->mask_lock, flags);
534 	return bat_req->bat_size_cnt - bat_req->bat_release_rd_idx + zero_index;
535 }
536 
537 static int t7xx_dpmaif_release_bat_entry(const struct dpmaif_rx_queue *rxq,
538 					 const unsigned int rel_entry_num,
539 					 const enum bat_type buf_type)
540 {
541 	struct dpmaif_hw_info *hw_info = &rxq->dpmaif_ctrl->hw_info;
542 	unsigned int old_rel_idx, new_rel_idx, hw_rd_idx, i;
543 	struct dpmaif_bat_request *bat;
544 	unsigned long flags;
545 
546 	if (!rxq->que_started || !rel_entry_num)
547 		return -EINVAL;
548 
549 	if (buf_type == BAT_TYPE_FRAG) {
550 		bat = rxq->bat_frag;
551 		hw_rd_idx = t7xx_dpmaif_dl_get_frg_rd_idx(hw_info, rxq->index);
552 	} else {
553 		bat = rxq->bat_req;
554 		hw_rd_idx = t7xx_dpmaif_dl_get_bat_rd_idx(hw_info, rxq->index);
555 	}
556 
557 	if (rel_entry_num >= bat->bat_size_cnt)
558 		return -EINVAL;
559 
560 	old_rel_idx = bat->bat_release_rd_idx;
561 	new_rel_idx = old_rel_idx + rel_entry_num;
562 
563 	/* Do not need to release if the queue is empty */
564 	if (bat->bat_wr_idx == old_rel_idx)
565 		return 0;
566 
567 	if (hw_rd_idx >= old_rel_idx) {
568 		if (new_rel_idx > hw_rd_idx)
569 			return -EINVAL;
570 	}
571 
572 	if (new_rel_idx >= bat->bat_size_cnt) {
573 		new_rel_idx -= bat->bat_size_cnt;
574 		if (new_rel_idx > hw_rd_idx)
575 			return -EINVAL;
576 	}
577 
578 	spin_lock_irqsave(&bat->mask_lock, flags);
579 	for (i = 0; i < rel_entry_num; i++) {
580 		unsigned int index = bat->bat_release_rd_idx + i;
581 
582 		if (index >= bat->bat_size_cnt)
583 			index -= bat->bat_size_cnt;
584 
585 		clear_bit(index, bat->bat_bitmap);
586 	}
587 	spin_unlock_irqrestore(&bat->mask_lock, flags);
588 
589 	bat->bat_release_rd_idx = new_rel_idx;
590 	return rel_entry_num;
591 }
592 
593 static int t7xx_dpmaif_pit_release_and_add(struct dpmaif_rx_queue *rxq)
594 {
595 	int ret;
596 
597 	if (rxq->pit_remain_release_cnt < DPMAIF_PIT_CNT_THRESHOLD)
598 		return 0;
599 
600 	ret = t7xx_dpmaifq_release_pit_entry(rxq, rxq->pit_remain_release_cnt);
601 	if (ret)
602 		return ret;
603 
604 	rxq->pit_remain_release_cnt = 0;
605 	return 0;
606 }
607 
608 static int t7xx_dpmaif_bat_release_and_add(const struct dpmaif_rx_queue *rxq)
609 {
610 	unsigned int bid_cnt;
611 	int ret;
612 
613 	bid_cnt = t7xx_dpmaif_avail_pkt_bat_cnt(rxq->bat_req);
614 	if (bid_cnt < DPMAIF_BAT_CNT_THRESHOLD)
615 		return 0;
616 
617 	ret = t7xx_dpmaif_release_bat_entry(rxq, bid_cnt, BAT_TYPE_NORMAL);
618 	if (ret <= 0) {
619 		dev_err(rxq->dpmaif_ctrl->dev, "Release PKT BAT failed: %d\n", ret);
620 		return ret;
621 	}
622 
623 	ret = t7xx_dpmaif_rx_buf_alloc(rxq->dpmaif_ctrl, rxq->bat_req, rxq->index, bid_cnt, false);
624 	if (ret < 0)
625 		dev_err(rxq->dpmaif_ctrl->dev, "Allocate new RX buffer failed: %d\n", ret);
626 
627 	return ret;
628 }
629 
630 static int t7xx_dpmaif_frag_bat_release_and_add(const struct dpmaif_rx_queue *rxq)
631 {
632 	unsigned int bid_cnt;
633 	int ret;
634 
635 	bid_cnt = t7xx_dpmaif_avail_pkt_bat_cnt(rxq->bat_frag);
636 	if (bid_cnt < DPMAIF_BAT_CNT_THRESHOLD)
637 		return 0;
638 
639 	ret = t7xx_dpmaif_release_bat_entry(rxq, bid_cnt, BAT_TYPE_FRAG);
640 	if (ret <= 0) {
641 		dev_err(rxq->dpmaif_ctrl->dev, "Release BAT entry failed: %d\n", ret);
642 		return ret;
643 	}
644 
645 	return t7xx_dpmaif_rx_frag_alloc(rxq->dpmaif_ctrl, rxq->bat_frag, bid_cnt, false);
646 }
647 
648 static void t7xx_dpmaif_parse_msg_pit(const struct dpmaif_rx_queue *rxq,
649 				      const struct dpmaif_pit *msg_pit,
650 				      struct dpmaif_cur_rx_skb_info *skb_info)
651 {
652 	int header = le32_to_cpu(msg_pit->header);
653 
654 	skb_info->cur_chn_idx = FIELD_GET(MSG_PIT_CHANNEL_ID, header);
655 	skb_info->check_sum = FIELD_GET(MSG_PIT_CHECKSUM, header);
656 	skb_info->pit_dp = FIELD_GET(MSG_PIT_DP, header);
657 	skb_info->pkt_type = FIELD_GET(MSG_PIT_IP, le32_to_cpu(msg_pit->msg.params_3));
658 }
659 
660 static int t7xx_dpmaif_set_data_to_skb(const struct dpmaif_rx_queue *rxq,
661 				       const struct dpmaif_pit *pkt_info,
662 				       struct dpmaif_cur_rx_skb_info *skb_info)
663 {
664 	unsigned long long data_bus_addr, data_base_addr;
665 	struct device *dev = rxq->dpmaif_ctrl->dev;
666 	struct dpmaif_bat_skb *bat_skb;
667 	unsigned int data_len;
668 	struct sk_buff *skb;
669 	int data_offset;
670 
671 	bat_skb = rxq->bat_req->bat_skb;
672 	bat_skb += t7xx_normal_pit_bid(pkt_info);
673 	dma_unmap_single(dev, bat_skb->data_bus_addr, bat_skb->data_len, DMA_FROM_DEVICE);
674 
675 	data_bus_addr = le32_to_cpu(pkt_info->pd.data_addr_h);
676 	data_bus_addr = (data_bus_addr << 32) + le32_to_cpu(pkt_info->pd.data_addr_l);
677 	data_base_addr = bat_skb->data_bus_addr;
678 	data_offset = data_bus_addr - data_base_addr;
679 	data_len = FIELD_GET(PD_PIT_DATA_LEN, le32_to_cpu(pkt_info->header));
680 	skb = bat_skb->skb;
681 	skb->len = 0;
682 	skb_reset_tail_pointer(skb);
683 	skb_reserve(skb, data_offset);
684 
685 	if (skb->tail + data_len > skb->end) {
686 		dev_err(dev, "No buffer space available\n");
687 		return -ENOBUFS;
688 	}
689 
690 	skb_put(skb, data_len);
691 	skb_info->cur_skb = skb;
692 	bat_skb->skb = NULL;
693 	return 0;
694 }
695 
696 static int t7xx_dpmaif_get_rx_pkt(struct dpmaif_rx_queue *rxq,
697 				  const struct dpmaif_pit *pkt_info,
698 				  struct dpmaif_cur_rx_skb_info *skb_info)
699 {
700 	unsigned int cur_bid = t7xx_normal_pit_bid(pkt_info);
701 	int ret;
702 
703 	ret = t7xx_bat_cur_bid_check(rxq, cur_bid);
704 	if (ret < 0)
705 		return ret;
706 
707 	ret = t7xx_dpmaif_set_data_to_skb(rxq, pkt_info, skb_info);
708 	if (ret < 0) {
709 		dev_err(rxq->dpmaif_ctrl->dev, "RX set data to skb failed: %d\n", ret);
710 		return ret;
711 	}
712 
713 	t7xx_dpmaif_set_bat_mask(rxq->bat_req, cur_bid);
714 	return 0;
715 }
716 
717 static int t7xx_dpmaifq_rx_notify_hw(struct dpmaif_rx_queue *rxq)
718 {
719 	struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl;
720 	int ret;
721 
722 	queue_work(dpmaif_ctrl->bat_release_wq, &dpmaif_ctrl->bat_release_work);
723 
724 	ret = t7xx_dpmaif_pit_release_and_add(rxq);
725 	if (ret < 0)
726 		dev_err(dpmaif_ctrl->dev, "RXQ%u update PIT failed: %d\n", rxq->index, ret);
727 
728 	return ret;
729 }
730 
731 static void t7xx_dpmaif_rx_skb_enqueue(struct dpmaif_rx_queue *rxq, struct sk_buff *skb)
732 {
733 	unsigned long flags;
734 
735 	spin_lock_irqsave(&rxq->skb_list.lock, flags);
736 	if (rxq->skb_list.qlen < rxq->skb_list_max_len)
737 		__skb_queue_tail(&rxq->skb_list, skb);
738 	else
739 		dev_kfree_skb_any(skb);
740 	spin_unlock_irqrestore(&rxq->skb_list.lock, flags);
741 }
742 
743 static void t7xx_dpmaif_rx_skb(struct dpmaif_rx_queue *rxq,
744 			       struct dpmaif_cur_rx_skb_info *skb_info)
745 {
746 	struct sk_buff *skb = skb_info->cur_skb;
747 	struct t7xx_skb_cb *skb_cb;
748 	u8 netif_id;
749 
750 	skb_info->cur_skb = NULL;
751 
752 	if (skb_info->pit_dp) {
753 		dev_kfree_skb_any(skb);
754 		return;
755 	}
756 
757 	skb->ip_summed = skb_info->check_sum == DPMAIF_CS_RESULT_PASS ? CHECKSUM_UNNECESSARY :
758 									CHECKSUM_NONE;
759 	netif_id = FIELD_GET(NETIF_MASK, skb_info->cur_chn_idx);
760 	skb_cb = T7XX_SKB_CB(skb);
761 	skb_cb->netif_idx = netif_id;
762 	skb_cb->rx_pkt_type = skb_info->pkt_type;
763 	t7xx_dpmaif_rx_skb_enqueue(rxq, skb);
764 }
765 
766 static int t7xx_dpmaif_rx_start(struct dpmaif_rx_queue *rxq, const unsigned int pit_cnt,
767 				const unsigned long timeout)
768 {
769 	unsigned int cur_pit, pit_len, rx_cnt, recv_skb_cnt = 0;
770 	struct device *dev = rxq->dpmaif_ctrl->dev;
771 	struct dpmaif_cur_rx_skb_info *skb_info;
772 	int ret = 0;
773 
774 	pit_len = rxq->pit_size_cnt;
775 	skb_info = &rxq->rx_data_info;
776 	cur_pit = rxq->pit_rd_idx;
777 
778 	for (rx_cnt = 0; rx_cnt < pit_cnt; rx_cnt++) {
779 		struct dpmaif_pit *pkt_info;
780 		u32 val;
781 
782 		if (!skb_info->msg_pit_received && time_after_eq(jiffies, timeout))
783 			break;
784 
785 		pkt_info = (struct dpmaif_pit *)rxq->pit_base + cur_pit;
786 		if (t7xx_dpmaif_check_pit_seq(rxq, pkt_info)) {
787 			dev_err_ratelimited(dev, "RXQ%u checks PIT SEQ fail\n", rxq->index);
788 			return -EAGAIN;
789 		}
790 
791 		val = FIELD_GET(PD_PIT_PACKET_TYPE, le32_to_cpu(pkt_info->header));
792 		if (val == DES_PT_MSG) {
793 			if (skb_info->msg_pit_received)
794 				dev_err(dev, "RXQ%u received repeated PIT\n", rxq->index);
795 
796 			skb_info->msg_pit_received = true;
797 			t7xx_dpmaif_parse_msg_pit(rxq, pkt_info, skb_info);
798 		} else { /* DES_PT_PD */
799 			val = FIELD_GET(PD_PIT_BUFFER_TYPE, le32_to_cpu(pkt_info->header));
800 			if (val != PKT_BUF_FRAG)
801 				ret = t7xx_dpmaif_get_rx_pkt(rxq, pkt_info, skb_info);
802 			else if (!skb_info->cur_skb)
803 				ret = -EINVAL;
804 			else
805 				ret = t7xx_dpmaif_get_frag(rxq, pkt_info, skb_info);
806 
807 			if (ret < 0) {
808 				skb_info->err_payload = 1;
809 				dev_err_ratelimited(dev, "RXQ%u error payload\n", rxq->index);
810 			}
811 
812 			val = FIELD_GET(PD_PIT_CONT, le32_to_cpu(pkt_info->header));
813 			if (!val) {
814 				if (!skb_info->err_payload) {
815 					t7xx_dpmaif_rx_skb(rxq, skb_info);
816 				} else if (skb_info->cur_skb) {
817 					dev_kfree_skb_any(skb_info->cur_skb);
818 					skb_info->cur_skb = NULL;
819 				}
820 
821 				memset(skb_info, 0, sizeof(*skb_info));
822 
823 				recv_skb_cnt++;
824 				if (!(recv_skb_cnt & DPMAIF_RX_PUSH_THRESHOLD_MASK)) {
825 					wake_up_all(&rxq->rx_wq);
826 					recv_skb_cnt = 0;
827 				}
828 			}
829 		}
830 
831 		cur_pit = t7xx_ring_buf_get_next_wr_idx(pit_len, cur_pit);
832 		rxq->pit_rd_idx = cur_pit;
833 		rxq->pit_remain_release_cnt++;
834 
835 		if (rx_cnt > 0 && !(rx_cnt % DPMAIF_NOTIFY_RELEASE_COUNT)) {
836 			ret = t7xx_dpmaifq_rx_notify_hw(rxq);
837 			if (ret < 0)
838 				break;
839 		}
840 	}
841 
842 	if (recv_skb_cnt)
843 		wake_up_all(&rxq->rx_wq);
844 
845 	if (!ret)
846 		ret = t7xx_dpmaifq_rx_notify_hw(rxq);
847 
848 	if (ret)
849 		return ret;
850 
851 	return rx_cnt;
852 }
853 
854 static unsigned int t7xx_dpmaifq_poll_pit(struct dpmaif_rx_queue *rxq)
855 {
856 	unsigned int hw_wr_idx, pit_cnt;
857 
858 	if (!rxq->que_started)
859 		return 0;
860 
861 	hw_wr_idx = t7xx_dpmaif_dl_dlq_pit_get_wr_idx(&rxq->dpmaif_ctrl->hw_info, rxq->index);
862 	pit_cnt = t7xx_ring_buf_rd_wr_count(rxq->pit_size_cnt, rxq->pit_rd_idx, hw_wr_idx,
863 					    DPMAIF_READ);
864 	rxq->pit_wr_idx = hw_wr_idx;
865 	return pit_cnt;
866 }
867 
868 static int t7xx_dpmaif_rx_data_collect(struct dpmaif_ctrl *dpmaif_ctrl,
869 				       const unsigned int q_num, const unsigned int budget)
870 {
871 	struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num];
872 	unsigned long time_limit;
873 	unsigned int cnt;
874 
875 	time_limit = jiffies + msecs_to_jiffies(DPMAIF_WQ_TIME_LIMIT_MS);
876 
877 	while ((cnt = t7xx_dpmaifq_poll_pit(rxq))) {
878 		unsigned int rd_cnt;
879 		int real_cnt;
880 
881 		rd_cnt = min(cnt, budget);
882 
883 		real_cnt = t7xx_dpmaif_rx_start(rxq, rd_cnt, time_limit);
884 		if (real_cnt < 0)
885 			return real_cnt;
886 
887 		if (real_cnt < cnt)
888 			return -EAGAIN;
889 	}
890 
891 	return 0;
892 }
893 
894 static void t7xx_dpmaif_do_rx(struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_rx_queue *rxq)
895 {
896 	struct dpmaif_hw_info *hw_info = &dpmaif_ctrl->hw_info;
897 	int ret;
898 
899 	ret = t7xx_dpmaif_rx_data_collect(dpmaif_ctrl, rxq->index, rxq->budget);
900 	if (ret < 0) {
901 		/* Try one more time */
902 		queue_work(rxq->worker, &rxq->dpmaif_rxq_work);
903 		t7xx_dpmaif_clr_ip_busy_sts(hw_info);
904 	} else {
905 		t7xx_dpmaif_clr_ip_busy_sts(hw_info);
906 		t7xx_dpmaif_dlq_unmask_rx_done(hw_info, rxq->index);
907 	}
908 }
909 
910 static void t7xx_dpmaif_rxq_work(struct work_struct *work)
911 {
912 	struct dpmaif_rx_queue *rxq = container_of(work, struct dpmaif_rx_queue, dpmaif_rxq_work);
913 	struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl;
914 	int ret;
915 
916 	atomic_set(&rxq->rx_processing, 1);
917 	/* Ensure rx_processing is changed to 1 before actually begin RX flow */
918 	smp_mb();
919 
920 	if (!rxq->que_started) {
921 		atomic_set(&rxq->rx_processing, 0);
922 		dev_err(dpmaif_ctrl->dev, "Work RXQ: %d has not been started\n", rxq->index);
923 		return;
924 	}
925 
926 	ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev);
927 	if (ret < 0 && ret != -EACCES)
928 		return;
929 
930 	t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev);
931 	if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev))
932 		t7xx_dpmaif_do_rx(dpmaif_ctrl, rxq);
933 
934 	t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev);
935 	pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
936 	pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
937 	atomic_set(&rxq->rx_processing, 0);
938 }
939 
940 void t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl *dpmaif_ctrl, const unsigned int que_mask)
941 {
942 	struct dpmaif_rx_queue *rxq;
943 	int qno;
944 
945 	qno = ffs(que_mask) - 1;
946 	if (qno < 0 || qno > DPMAIF_RXQ_NUM - 1) {
947 		dev_err(dpmaif_ctrl->dev, "Invalid RXQ number: %u\n", qno);
948 		return;
949 	}
950 
951 	rxq = &dpmaif_ctrl->rxq[qno];
952 	queue_work(rxq->worker, &rxq->dpmaif_rxq_work);
953 }
954 
955 static void t7xx_dpmaif_base_free(const struct dpmaif_ctrl *dpmaif_ctrl,
956 				  const struct dpmaif_bat_request *bat_req)
957 {
958 	if (bat_req->bat_base)
959 		dma_free_coherent(dpmaif_ctrl->dev,
960 				  bat_req->bat_size_cnt * sizeof(struct dpmaif_bat),
961 				  bat_req->bat_base, bat_req->bat_bus_addr);
962 }
963 
964 /**
965  * t7xx_dpmaif_bat_alloc() - Allocate the BAT ring buffer.
966  * @dpmaif_ctrl: Pointer to DPMAIF context structure.
967  * @bat_req: Pointer to BAT request structure.
968  * @buf_type: BAT ring type.
969  *
970  * This function allocates the BAT ring buffer shared with the HW device, also allocates
971  * a buffer used to store information about the BAT skbs for further release.
972  *
973  * Return:
974  * * 0		- Success.
975  * * -ERROR	- Error code.
976  */
977 int t7xx_dpmaif_bat_alloc(const struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req,
978 			  const enum bat_type buf_type)
979 {
980 	int sw_buf_size;
981 
982 	if (buf_type == BAT_TYPE_FRAG) {
983 		sw_buf_size = sizeof(struct dpmaif_bat_page);
984 		bat_req->bat_size_cnt = DPMAIF_FRG_COUNT;
985 		bat_req->pkt_buf_sz = DPMAIF_HW_FRG_PKTBUF;
986 	} else {
987 		sw_buf_size = sizeof(struct dpmaif_bat_skb);
988 		bat_req->bat_size_cnt = DPMAIF_BAT_COUNT;
989 		bat_req->pkt_buf_sz = DPMAIF_HW_BAT_PKTBUF;
990 	}
991 
992 	bat_req->type = buf_type;
993 	bat_req->bat_wr_idx = 0;
994 	bat_req->bat_release_rd_idx = 0;
995 
996 	bat_req->bat_base = dma_alloc_coherent(dpmaif_ctrl->dev,
997 					       bat_req->bat_size_cnt * sizeof(struct dpmaif_bat),
998 					       &bat_req->bat_bus_addr, GFP_KERNEL | __GFP_ZERO);
999 	if (!bat_req->bat_base)
1000 		return -ENOMEM;
1001 
1002 	/* For AP SW to record skb information */
1003 	bat_req->bat_skb = devm_kzalloc(dpmaif_ctrl->dev, bat_req->bat_size_cnt * sw_buf_size,
1004 					GFP_KERNEL);
1005 	if (!bat_req->bat_skb)
1006 		goto err_free_dma_mem;
1007 
1008 	bat_req->bat_bitmap = bitmap_zalloc(bat_req->bat_size_cnt, GFP_KERNEL);
1009 	if (!bat_req->bat_bitmap)
1010 		goto err_free_dma_mem;
1011 
1012 	spin_lock_init(&bat_req->mask_lock);
1013 	atomic_set(&bat_req->refcnt, 0);
1014 	return 0;
1015 
1016 err_free_dma_mem:
1017 	t7xx_dpmaif_base_free(dpmaif_ctrl, bat_req);
1018 
1019 	return -ENOMEM;
1020 }
1021 
1022 void t7xx_dpmaif_bat_free(const struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req)
1023 {
1024 	if (!bat_req || !atomic_dec_and_test(&bat_req->refcnt))
1025 		return;
1026 
1027 	bitmap_free(bat_req->bat_bitmap);
1028 	bat_req->bat_bitmap = NULL;
1029 
1030 	if (bat_req->bat_skb) {
1031 		unsigned int i;
1032 
1033 		for (i = 0; i < bat_req->bat_size_cnt; i++) {
1034 			if (bat_req->type == BAT_TYPE_FRAG)
1035 				t7xx_unmap_bat_page(dpmaif_ctrl->dev, bat_req->bat_skb, i);
1036 			else
1037 				t7xx_unmap_bat_skb(dpmaif_ctrl->dev, bat_req->bat_skb, i);
1038 		}
1039 	}
1040 
1041 	t7xx_dpmaif_base_free(dpmaif_ctrl, bat_req);
1042 }
1043 
1044 static int t7xx_dpmaif_rx_alloc(struct dpmaif_rx_queue *rxq)
1045 {
1046 	rxq->pit_size_cnt = DPMAIF_PIT_COUNT;
1047 	rxq->pit_rd_idx = 0;
1048 	rxq->pit_wr_idx = 0;
1049 	rxq->pit_release_rd_idx = 0;
1050 	rxq->expect_pit_seq = 0;
1051 	rxq->pit_remain_release_cnt = 0;
1052 	memset(&rxq->rx_data_info, 0, sizeof(rxq->rx_data_info));
1053 
1054 	rxq->pit_base = dma_alloc_coherent(rxq->dpmaif_ctrl->dev,
1055 					   rxq->pit_size_cnt * sizeof(struct dpmaif_pit),
1056 					   &rxq->pit_bus_addr, GFP_KERNEL | __GFP_ZERO);
1057 	if (!rxq->pit_base)
1058 		return -ENOMEM;
1059 
1060 	rxq->bat_req = &rxq->dpmaif_ctrl->bat_req;
1061 	atomic_inc(&rxq->bat_req->refcnt);
1062 
1063 	rxq->bat_frag = &rxq->dpmaif_ctrl->bat_frag;
1064 	atomic_inc(&rxq->bat_frag->refcnt);
1065 	return 0;
1066 }
1067 
1068 static void t7xx_dpmaif_rx_buf_free(const struct dpmaif_rx_queue *rxq)
1069 {
1070 	if (!rxq->dpmaif_ctrl)
1071 		return;
1072 
1073 	t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_req);
1074 	t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_frag);
1075 
1076 	if (rxq->pit_base)
1077 		dma_free_coherent(rxq->dpmaif_ctrl->dev,
1078 				  rxq->pit_size_cnt * sizeof(struct dpmaif_pit),
1079 				  rxq->pit_base, rxq->pit_bus_addr);
1080 }
1081 
1082 int t7xx_dpmaif_rxq_init(struct dpmaif_rx_queue *queue)
1083 {
1084 	int ret;
1085 
1086 	ret = t7xx_dpmaif_rx_alloc(queue);
1087 	if (ret < 0) {
1088 		dev_err(queue->dpmaif_ctrl->dev, "Failed to allocate RX buffers: %d\n", ret);
1089 		return ret;
1090 	}
1091 
1092 	INIT_WORK(&queue->dpmaif_rxq_work, t7xx_dpmaif_rxq_work);
1093 
1094 	queue->worker = alloc_workqueue("dpmaif_rx%d_worker",
1095 					WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, 1, queue->index);
1096 	if (!queue->worker) {
1097 		ret = -ENOMEM;
1098 		goto err_free_rx_buffer;
1099 	}
1100 
1101 	init_waitqueue_head(&queue->rx_wq);
1102 	skb_queue_head_init(&queue->skb_list);
1103 	queue->skb_list_max_len = queue->bat_req->pkt_buf_sz;
1104 	queue->rx_thread = kthread_run(t7xx_dpmaif_net_rx_push_thread,
1105 				       queue, "dpmaif_rx%d_push", queue->index);
1106 
1107 	ret = PTR_ERR_OR_ZERO(queue->rx_thread);
1108 	if (ret)
1109 		goto err_free_workqueue;
1110 
1111 	return 0;
1112 
1113 err_free_workqueue:
1114 	destroy_workqueue(queue->worker);
1115 
1116 err_free_rx_buffer:
1117 	t7xx_dpmaif_rx_buf_free(queue);
1118 
1119 	return ret;
1120 }
1121 
1122 void t7xx_dpmaif_rxq_free(struct dpmaif_rx_queue *queue)
1123 {
1124 	if (queue->worker)
1125 		destroy_workqueue(queue->worker);
1126 
1127 	if (queue->rx_thread)
1128 		kthread_stop(queue->rx_thread);
1129 
1130 	skb_queue_purge(&queue->skb_list);
1131 	t7xx_dpmaif_rx_buf_free(queue);
1132 }
1133 
1134 static void t7xx_dpmaif_bat_release_work(struct work_struct *work)
1135 {
1136 	struct dpmaif_ctrl *dpmaif_ctrl = container_of(work, struct dpmaif_ctrl, bat_release_work);
1137 	struct dpmaif_rx_queue *rxq;
1138 	int ret;
1139 
1140 	ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev);
1141 	if (ret < 0 && ret != -EACCES)
1142 		return;
1143 
1144 	t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev);
1145 
1146 	/* ALL RXQ use one BAT table, so choose DPF_RX_QNO_DFT */
1147 	rxq = &dpmaif_ctrl->rxq[DPF_RX_QNO_DFT];
1148 	if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) {
1149 		t7xx_dpmaif_bat_release_and_add(rxq);
1150 		t7xx_dpmaif_frag_bat_release_and_add(rxq);
1151 	}
1152 
1153 	t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev);
1154 	pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
1155 	pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
1156 }
1157 
1158 int t7xx_dpmaif_bat_rel_wq_alloc(struct dpmaif_ctrl *dpmaif_ctrl)
1159 {
1160 	dpmaif_ctrl->bat_release_wq = alloc_workqueue("dpmaif_bat_release_work_queue",
1161 						      WQ_MEM_RECLAIM, 1);
1162 	if (!dpmaif_ctrl->bat_release_wq)
1163 		return -ENOMEM;
1164 
1165 	INIT_WORK(&dpmaif_ctrl->bat_release_work, t7xx_dpmaif_bat_release_work);
1166 	return 0;
1167 }
1168 
1169 void t7xx_dpmaif_bat_wq_rel(struct dpmaif_ctrl *dpmaif_ctrl)
1170 {
1171 	flush_work(&dpmaif_ctrl->bat_release_work);
1172 
1173 	if (dpmaif_ctrl->bat_release_wq) {
1174 		destroy_workqueue(dpmaif_ctrl->bat_release_wq);
1175 		dpmaif_ctrl->bat_release_wq = NULL;
1176 	}
1177 }
1178 
1179 /**
1180  * t7xx_dpmaif_rx_stop() - Suspend RX flow.
1181  * @dpmaif_ctrl: Pointer to data path control struct dpmaif_ctrl.
1182  *
1183  * Wait for all the RX work to finish executing and mark the RX queue as paused.
1184  */
1185 void t7xx_dpmaif_rx_stop(struct dpmaif_ctrl *dpmaif_ctrl)
1186 {
1187 	unsigned int i;
1188 
1189 	for (i = 0; i < DPMAIF_RXQ_NUM; i++) {
1190 		struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[i];
1191 		int timeout, value;
1192 
1193 		flush_work(&rxq->dpmaif_rxq_work);
1194 
1195 		timeout = readx_poll_timeout_atomic(atomic_read, &rxq->rx_processing, value,
1196 						    !value, 0, DPMAIF_CHECK_INIT_TIMEOUT_US);
1197 		if (timeout)
1198 			dev_err(dpmaif_ctrl->dev, "Stop RX SW failed\n");
1199 
1200 		/* Ensure RX processing has stopped before we set rxq->que_started to false */
1201 		smp_mb();
1202 		rxq->que_started = false;
1203 	}
1204 }
1205 
1206 static void t7xx_dpmaif_stop_rxq(struct dpmaif_rx_queue *rxq)
1207 {
1208 	int cnt, j = 0;
1209 
1210 	flush_work(&rxq->dpmaif_rxq_work);
1211 	rxq->que_started = false;
1212 
1213 	do {
1214 		cnt = t7xx_ring_buf_rd_wr_count(rxq->pit_size_cnt, rxq->pit_rd_idx,
1215 						rxq->pit_wr_idx, DPMAIF_READ);
1216 
1217 		if (++j >= DPMAIF_MAX_CHECK_COUNT) {
1218 			dev_err(rxq->dpmaif_ctrl->dev, "Stop RX SW failed, %d\n", cnt);
1219 			break;
1220 		}
1221 	} while (cnt);
1222 
1223 	memset(rxq->pit_base, 0, rxq->pit_size_cnt * sizeof(struct dpmaif_pit));
1224 	memset(rxq->bat_req->bat_base, 0, rxq->bat_req->bat_size_cnt * sizeof(struct dpmaif_bat));
1225 	bitmap_zero(rxq->bat_req->bat_bitmap, rxq->bat_req->bat_size_cnt);
1226 	memset(&rxq->rx_data_info, 0, sizeof(rxq->rx_data_info));
1227 
1228 	rxq->pit_rd_idx = 0;
1229 	rxq->pit_wr_idx = 0;
1230 	rxq->pit_release_rd_idx = 0;
1231 	rxq->expect_pit_seq = 0;
1232 	rxq->pit_remain_release_cnt = 0;
1233 	rxq->bat_req->bat_release_rd_idx = 0;
1234 	rxq->bat_req->bat_wr_idx = 0;
1235 	rxq->bat_frag->bat_release_rd_idx = 0;
1236 	rxq->bat_frag->bat_wr_idx = 0;
1237 }
1238 
1239 void t7xx_dpmaif_rx_clear(struct dpmaif_ctrl *dpmaif_ctrl)
1240 {
1241 	int i;
1242 
1243 	for (i = 0; i < DPMAIF_RXQ_NUM; i++)
1244 		t7xx_dpmaif_stop_rxq(&dpmaif_ctrl->rxq[i]);
1245 }
1246