1ab6dddd2SSubbaraya Sundeep // SPDX-License-Identifier: GPL-2.0
2ab6dddd2SSubbaraya Sundeep /* Marvell RVU Physical Function ethernet driver
3ab6dddd2SSubbaraya Sundeep *
4ab6dddd2SSubbaraya Sundeep * Copyright (C) 2023 Marvell.
5ab6dddd2SSubbaraya Sundeep *
6ab6dddd2SSubbaraya Sundeep */
7ab6dddd2SSubbaraya Sundeep
8ab6dddd2SSubbaraya Sundeep #include <linux/netdevice.h>
9ab6dddd2SSubbaraya Sundeep #include <net/tso.h>
10ab6dddd2SSubbaraya Sundeep
11ab6dddd2SSubbaraya Sundeep #include "cn10k.h"
12ab6dddd2SSubbaraya Sundeep #include "otx2_reg.h"
13ab6dddd2SSubbaraya Sundeep #include "otx2_common.h"
14ab6dddd2SSubbaraya Sundeep #include "otx2_txrx.h"
15ab6dddd2SSubbaraya Sundeep #include "otx2_struct.h"
16ab6dddd2SSubbaraya Sundeep
17ab6dddd2SSubbaraya Sundeep #define OTX2_QOS_MAX_LEAF_NODES 16
18ab6dddd2SSubbaraya Sundeep
otx2_qos_aura_pool_free(struct otx2_nic * pfvf,int pool_id)19ab6dddd2SSubbaraya Sundeep static void otx2_qos_aura_pool_free(struct otx2_nic *pfvf, int pool_id)
20ab6dddd2SSubbaraya Sundeep {
21ab6dddd2SSubbaraya Sundeep struct otx2_pool *pool;
22ab6dddd2SSubbaraya Sundeep
23ab6dddd2SSubbaraya Sundeep if (!pfvf->qset.pool)
24ab6dddd2SSubbaraya Sundeep return;
25ab6dddd2SSubbaraya Sundeep
26ab6dddd2SSubbaraya Sundeep pool = &pfvf->qset.pool[pool_id];
27ab6dddd2SSubbaraya Sundeep qmem_free(pfvf->dev, pool->stack);
28ab6dddd2SSubbaraya Sundeep qmem_free(pfvf->dev, pool->fc_addr);
29ab6dddd2SSubbaraya Sundeep pool->stack = NULL;
30ab6dddd2SSubbaraya Sundeep pool->fc_addr = NULL;
31ab6dddd2SSubbaraya Sundeep }
32ab6dddd2SSubbaraya Sundeep
otx2_qos_sq_aura_pool_init(struct otx2_nic * pfvf,int qidx)33ab6dddd2SSubbaraya Sundeep static int otx2_qos_sq_aura_pool_init(struct otx2_nic *pfvf, int qidx)
34ab6dddd2SSubbaraya Sundeep {
35ab6dddd2SSubbaraya Sundeep struct otx2_qset *qset = &pfvf->qset;
36ab6dddd2SSubbaraya Sundeep int pool_id, stack_pages, num_sqbs;
37ab6dddd2SSubbaraya Sundeep struct otx2_hw *hw = &pfvf->hw;
38ab6dddd2SSubbaraya Sundeep struct otx2_snd_queue *sq;
39ab6dddd2SSubbaraya Sundeep struct otx2_pool *pool;
40ab6dddd2SSubbaraya Sundeep dma_addr_t bufptr;
41ab6dddd2SSubbaraya Sundeep int err, ptr;
42ab6dddd2SSubbaraya Sundeep u64 iova, pa;
43ab6dddd2SSubbaraya Sundeep
44ab6dddd2SSubbaraya Sundeep /* Calculate number of SQBs needed.
45ab6dddd2SSubbaraya Sundeep *
46ab6dddd2SSubbaraya Sundeep * For a 128byte SQE, and 4K size SQB, 31 SQEs will fit in one SQB.
47ab6dddd2SSubbaraya Sundeep * Last SQE is used for pointing to next SQB.
48ab6dddd2SSubbaraya Sundeep */
49ab6dddd2SSubbaraya Sundeep num_sqbs = (hw->sqb_size / 128) - 1;
50ab6dddd2SSubbaraya Sundeep num_sqbs = (qset->sqe_cnt + num_sqbs) / num_sqbs;
51ab6dddd2SSubbaraya Sundeep
52ab6dddd2SSubbaraya Sundeep /* Get no of stack pages needed */
53ab6dddd2SSubbaraya Sundeep stack_pages =
54ab6dddd2SSubbaraya Sundeep (num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
55ab6dddd2SSubbaraya Sundeep
56ab6dddd2SSubbaraya Sundeep pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
57ab6dddd2SSubbaraya Sundeep pool = &pfvf->qset.pool[pool_id];
58ab6dddd2SSubbaraya Sundeep
59ab6dddd2SSubbaraya Sundeep /* Initialize aura context */
60ab6dddd2SSubbaraya Sundeep err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs);
61ab6dddd2SSubbaraya Sundeep if (err)
62ab6dddd2SSubbaraya Sundeep return err;
63ab6dddd2SSubbaraya Sundeep
64ab6dddd2SSubbaraya Sundeep /* Initialize pool context */
65ab6dddd2SSubbaraya Sundeep err = otx2_pool_init(pfvf, pool_id, stack_pages,
66*b2e3406aSRatheesh Kannoth num_sqbs, hw->sqb_size, AURA_NIX_SQ);
67ab6dddd2SSubbaraya Sundeep if (err)
68ab6dddd2SSubbaraya Sundeep goto aura_free;
69ab6dddd2SSubbaraya Sundeep
70ab6dddd2SSubbaraya Sundeep /* Flush accumulated messages */
71ab6dddd2SSubbaraya Sundeep err = otx2_sync_mbox_msg(&pfvf->mbox);
72ab6dddd2SSubbaraya Sundeep if (err)
73ab6dddd2SSubbaraya Sundeep goto pool_free;
74ab6dddd2SSubbaraya Sundeep
75ab6dddd2SSubbaraya Sundeep /* Allocate pointers and free them to aura/pool */
76ab6dddd2SSubbaraya Sundeep sq = &qset->sq[qidx];
77ab6dddd2SSubbaraya Sundeep sq->sqb_count = 0;
78ab6dddd2SSubbaraya Sundeep sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(*sq->sqb_ptrs), GFP_KERNEL);
79ab6dddd2SSubbaraya Sundeep if (!sq->sqb_ptrs) {
80ab6dddd2SSubbaraya Sundeep err = -ENOMEM;
81ab6dddd2SSubbaraya Sundeep goto pool_free;
82ab6dddd2SSubbaraya Sundeep }
83ab6dddd2SSubbaraya Sundeep
84ab6dddd2SSubbaraya Sundeep for (ptr = 0; ptr < num_sqbs; ptr++) {
85ab6dddd2SSubbaraya Sundeep err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
86ab6dddd2SSubbaraya Sundeep if (err)
87ab6dddd2SSubbaraya Sundeep goto sqb_free;
88ab6dddd2SSubbaraya Sundeep pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr);
89ab6dddd2SSubbaraya Sundeep sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
90ab6dddd2SSubbaraya Sundeep }
91ab6dddd2SSubbaraya Sundeep
92ab6dddd2SSubbaraya Sundeep return 0;
93ab6dddd2SSubbaraya Sundeep
94ab6dddd2SSubbaraya Sundeep sqb_free:
95ab6dddd2SSubbaraya Sundeep while (ptr--) {
96ab6dddd2SSubbaraya Sundeep if (!sq->sqb_ptrs[ptr])
97ab6dddd2SSubbaraya Sundeep continue;
98ab6dddd2SSubbaraya Sundeep iova = sq->sqb_ptrs[ptr];
99ab6dddd2SSubbaraya Sundeep pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
100ab6dddd2SSubbaraya Sundeep dma_unmap_page_attrs(pfvf->dev, iova, hw->sqb_size,
101ab6dddd2SSubbaraya Sundeep DMA_FROM_DEVICE,
102ab6dddd2SSubbaraya Sundeep DMA_ATTR_SKIP_CPU_SYNC);
103ab6dddd2SSubbaraya Sundeep put_page(virt_to_page(phys_to_virt(pa)));
104ab6dddd2SSubbaraya Sundeep otx2_aura_allocptr(pfvf, pool_id);
105ab6dddd2SSubbaraya Sundeep }
106ab6dddd2SSubbaraya Sundeep sq->sqb_count = 0;
107ab6dddd2SSubbaraya Sundeep kfree(sq->sqb_ptrs);
108ab6dddd2SSubbaraya Sundeep pool_free:
109ab6dddd2SSubbaraya Sundeep qmem_free(pfvf->dev, pool->stack);
110ab6dddd2SSubbaraya Sundeep aura_free:
111ab6dddd2SSubbaraya Sundeep qmem_free(pfvf->dev, pool->fc_addr);
112ab6dddd2SSubbaraya Sundeep otx2_mbox_reset(&pfvf->mbox.mbox, 0);
113ab6dddd2SSubbaraya Sundeep return err;
114ab6dddd2SSubbaraya Sundeep }
115ab6dddd2SSubbaraya Sundeep
otx2_qos_sq_free_sqbs(struct otx2_nic * pfvf,int qidx)116ab6dddd2SSubbaraya Sundeep static void otx2_qos_sq_free_sqbs(struct otx2_nic *pfvf, int qidx)
117ab6dddd2SSubbaraya Sundeep {
118ab6dddd2SSubbaraya Sundeep struct otx2_qset *qset = &pfvf->qset;
119ab6dddd2SSubbaraya Sundeep struct otx2_hw *hw = &pfvf->hw;
120ab6dddd2SSubbaraya Sundeep struct otx2_snd_queue *sq;
121ab6dddd2SSubbaraya Sundeep u64 iova, pa;
122ab6dddd2SSubbaraya Sundeep int sqb;
123ab6dddd2SSubbaraya Sundeep
124ab6dddd2SSubbaraya Sundeep sq = &qset->sq[qidx];
125ab6dddd2SSubbaraya Sundeep if (!sq->sqb_ptrs)
126ab6dddd2SSubbaraya Sundeep return;
127ab6dddd2SSubbaraya Sundeep for (sqb = 0; sqb < sq->sqb_count; sqb++) {
128ab6dddd2SSubbaraya Sundeep if (!sq->sqb_ptrs[sqb])
129ab6dddd2SSubbaraya Sundeep continue;
130ab6dddd2SSubbaraya Sundeep iova = sq->sqb_ptrs[sqb];
131ab6dddd2SSubbaraya Sundeep pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
132ab6dddd2SSubbaraya Sundeep dma_unmap_page_attrs(pfvf->dev, iova, hw->sqb_size,
133ab6dddd2SSubbaraya Sundeep DMA_FROM_DEVICE,
134ab6dddd2SSubbaraya Sundeep DMA_ATTR_SKIP_CPU_SYNC);
135ab6dddd2SSubbaraya Sundeep put_page(virt_to_page(phys_to_virt(pa)));
136ab6dddd2SSubbaraya Sundeep }
137ab6dddd2SSubbaraya Sundeep
138ab6dddd2SSubbaraya Sundeep sq->sqb_count = 0;
139ab6dddd2SSubbaraya Sundeep
140ab6dddd2SSubbaraya Sundeep sq = &qset->sq[qidx];
141ab6dddd2SSubbaraya Sundeep qmem_free(pfvf->dev, sq->sqe);
142ab6dddd2SSubbaraya Sundeep qmem_free(pfvf->dev, sq->tso_hdrs);
143ab6dddd2SSubbaraya Sundeep kfree(sq->sg);
144ab6dddd2SSubbaraya Sundeep kfree(sq->sqb_ptrs);
145ab6dddd2SSubbaraya Sundeep qmem_free(pfvf->dev, sq->timestamps);
146ab6dddd2SSubbaraya Sundeep
147ab6dddd2SSubbaraya Sundeep memset((void *)sq, 0, sizeof(*sq));
148ab6dddd2SSubbaraya Sundeep }
149ab6dddd2SSubbaraya Sundeep
150ab6dddd2SSubbaraya Sundeep /* send queue id */
otx2_qos_sqb_flush(struct otx2_nic * pfvf,int qidx)151ab6dddd2SSubbaraya Sundeep static void otx2_qos_sqb_flush(struct otx2_nic *pfvf, int qidx)
152ab6dddd2SSubbaraya Sundeep {
153ab6dddd2SSubbaraya Sundeep int sqe_tail, sqe_head;
154ab6dddd2SSubbaraya Sundeep u64 incr, *ptr, val;
155ab6dddd2SSubbaraya Sundeep
156ab6dddd2SSubbaraya Sundeep ptr = (__force u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
157ab6dddd2SSubbaraya Sundeep incr = (u64)qidx << 32;
158ab6dddd2SSubbaraya Sundeep val = otx2_atomic64_add(incr, ptr);
159ab6dddd2SSubbaraya Sundeep sqe_head = (val >> 20) & 0x3F;
160ab6dddd2SSubbaraya Sundeep sqe_tail = (val >> 28) & 0x3F;
161ab6dddd2SSubbaraya Sundeep if (sqe_head != sqe_tail)
162ab6dddd2SSubbaraya Sundeep usleep_range(50, 60);
163ab6dddd2SSubbaraya Sundeep }
164ab6dddd2SSubbaraya Sundeep
otx2_qos_ctx_disable(struct otx2_nic * pfvf,u16 qidx,int aura_id)165ab6dddd2SSubbaraya Sundeep static int otx2_qos_ctx_disable(struct otx2_nic *pfvf, u16 qidx, int aura_id)
166ab6dddd2SSubbaraya Sundeep {
167ab6dddd2SSubbaraya Sundeep struct nix_cn10k_aq_enq_req *cn10k_sq_aq;
168ab6dddd2SSubbaraya Sundeep struct npa_aq_enq_req *aura_aq;
169ab6dddd2SSubbaraya Sundeep struct npa_aq_enq_req *pool_aq;
170ab6dddd2SSubbaraya Sundeep struct nix_aq_enq_req *sq_aq;
171ab6dddd2SSubbaraya Sundeep
172ab6dddd2SSubbaraya Sundeep if (test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
173ab6dddd2SSubbaraya Sundeep cn10k_sq_aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
174ab6dddd2SSubbaraya Sundeep if (!cn10k_sq_aq)
175ab6dddd2SSubbaraya Sundeep return -ENOMEM;
176ab6dddd2SSubbaraya Sundeep cn10k_sq_aq->qidx = qidx;
177ab6dddd2SSubbaraya Sundeep cn10k_sq_aq->sq.ena = 0;
178ab6dddd2SSubbaraya Sundeep cn10k_sq_aq->sq_mask.ena = 1;
179ab6dddd2SSubbaraya Sundeep cn10k_sq_aq->ctype = NIX_AQ_CTYPE_SQ;
180ab6dddd2SSubbaraya Sundeep cn10k_sq_aq->op = NIX_AQ_INSTOP_WRITE;
181ab6dddd2SSubbaraya Sundeep } else {
182ab6dddd2SSubbaraya Sundeep sq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
183ab6dddd2SSubbaraya Sundeep if (!sq_aq)
184ab6dddd2SSubbaraya Sundeep return -ENOMEM;
185ab6dddd2SSubbaraya Sundeep sq_aq->qidx = qidx;
186ab6dddd2SSubbaraya Sundeep sq_aq->sq.ena = 0;
187ab6dddd2SSubbaraya Sundeep sq_aq->sq_mask.ena = 1;
188ab6dddd2SSubbaraya Sundeep sq_aq->ctype = NIX_AQ_CTYPE_SQ;
189ab6dddd2SSubbaraya Sundeep sq_aq->op = NIX_AQ_INSTOP_WRITE;
190ab6dddd2SSubbaraya Sundeep }
191ab6dddd2SSubbaraya Sundeep
192ab6dddd2SSubbaraya Sundeep aura_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
193ab6dddd2SSubbaraya Sundeep if (!aura_aq) {
194ab6dddd2SSubbaraya Sundeep otx2_mbox_reset(&pfvf->mbox.mbox, 0);
195ab6dddd2SSubbaraya Sundeep return -ENOMEM;
196ab6dddd2SSubbaraya Sundeep }
197ab6dddd2SSubbaraya Sundeep
198ab6dddd2SSubbaraya Sundeep aura_aq->aura_id = aura_id;
199ab6dddd2SSubbaraya Sundeep aura_aq->aura.ena = 0;
200ab6dddd2SSubbaraya Sundeep aura_aq->aura_mask.ena = 1;
201ab6dddd2SSubbaraya Sundeep aura_aq->ctype = NPA_AQ_CTYPE_AURA;
202ab6dddd2SSubbaraya Sundeep aura_aq->op = NPA_AQ_INSTOP_WRITE;
203ab6dddd2SSubbaraya Sundeep
204ab6dddd2SSubbaraya Sundeep pool_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
205ab6dddd2SSubbaraya Sundeep if (!pool_aq) {
206ab6dddd2SSubbaraya Sundeep otx2_mbox_reset(&pfvf->mbox.mbox, 0);
207ab6dddd2SSubbaraya Sundeep return -ENOMEM;
208ab6dddd2SSubbaraya Sundeep }
209ab6dddd2SSubbaraya Sundeep
210ab6dddd2SSubbaraya Sundeep pool_aq->aura_id = aura_id;
211ab6dddd2SSubbaraya Sundeep pool_aq->pool.ena = 0;
212ab6dddd2SSubbaraya Sundeep pool_aq->pool_mask.ena = 1;
213ab6dddd2SSubbaraya Sundeep
214ab6dddd2SSubbaraya Sundeep pool_aq->ctype = NPA_AQ_CTYPE_POOL;
215ab6dddd2SSubbaraya Sundeep pool_aq->op = NPA_AQ_INSTOP_WRITE;
216ab6dddd2SSubbaraya Sundeep
217ab6dddd2SSubbaraya Sundeep return otx2_sync_mbox_msg(&pfvf->mbox);
218ab6dddd2SSubbaraya Sundeep }
219ab6dddd2SSubbaraya Sundeep
otx2_qos_get_qid(struct otx2_nic * pfvf)2205e6808b4SNaveen Mamindlapalli int otx2_qos_get_qid(struct otx2_nic *pfvf)
2215e6808b4SNaveen Mamindlapalli {
2225e6808b4SNaveen Mamindlapalli int qidx;
2235e6808b4SNaveen Mamindlapalli
2245e6808b4SNaveen Mamindlapalli qidx = find_first_zero_bit(pfvf->qos.qos_sq_bmap,
2255e6808b4SNaveen Mamindlapalli pfvf->hw.tc_tx_queues);
2265e6808b4SNaveen Mamindlapalli
2275e6808b4SNaveen Mamindlapalli return qidx == pfvf->hw.tc_tx_queues ? -ENOSPC : qidx;
2285e6808b4SNaveen Mamindlapalli }
2295e6808b4SNaveen Mamindlapalli
otx2_qos_free_qid(struct otx2_nic * pfvf,int qidx)2305e6808b4SNaveen Mamindlapalli void otx2_qos_free_qid(struct otx2_nic *pfvf, int qidx)
2315e6808b4SNaveen Mamindlapalli {
2325e6808b4SNaveen Mamindlapalli clear_bit(qidx, pfvf->qos.qos_sq_bmap);
2335e6808b4SNaveen Mamindlapalli }
2345e6808b4SNaveen Mamindlapalli
otx2_qos_enable_sq(struct otx2_nic * pfvf,int qidx)2355e6808b4SNaveen Mamindlapalli int otx2_qos_enable_sq(struct otx2_nic *pfvf, int qidx)
236ab6dddd2SSubbaraya Sundeep {
237ab6dddd2SSubbaraya Sundeep struct otx2_hw *hw = &pfvf->hw;
238ab6dddd2SSubbaraya Sundeep int pool_id, sq_idx, err;
239ab6dddd2SSubbaraya Sundeep
240ab6dddd2SSubbaraya Sundeep if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
241ab6dddd2SSubbaraya Sundeep return -EPERM;
242ab6dddd2SSubbaraya Sundeep
243ab6dddd2SSubbaraya Sundeep sq_idx = hw->non_qos_queues + qidx;
244ab6dddd2SSubbaraya Sundeep
245ab6dddd2SSubbaraya Sundeep mutex_lock(&pfvf->mbox.lock);
246ab6dddd2SSubbaraya Sundeep err = otx2_qos_sq_aura_pool_init(pfvf, sq_idx);
247ab6dddd2SSubbaraya Sundeep if (err)
248ab6dddd2SSubbaraya Sundeep goto out;
249ab6dddd2SSubbaraya Sundeep
250ab6dddd2SSubbaraya Sundeep pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, sq_idx);
251ab6dddd2SSubbaraya Sundeep err = otx2_sq_init(pfvf, sq_idx, pool_id);
252ab6dddd2SSubbaraya Sundeep if (err)
253ab6dddd2SSubbaraya Sundeep goto out;
254ab6dddd2SSubbaraya Sundeep out:
255ab6dddd2SSubbaraya Sundeep mutex_unlock(&pfvf->mbox.lock);
256ab6dddd2SSubbaraya Sundeep return err;
257ab6dddd2SSubbaraya Sundeep }
258ab6dddd2SSubbaraya Sundeep
otx2_qos_disable_sq(struct otx2_nic * pfvf,int qidx)2595e6808b4SNaveen Mamindlapalli void otx2_qos_disable_sq(struct otx2_nic *pfvf, int qidx)
260ab6dddd2SSubbaraya Sundeep {
261ab6dddd2SSubbaraya Sundeep struct otx2_qset *qset = &pfvf->qset;
262ab6dddd2SSubbaraya Sundeep struct otx2_hw *hw = &pfvf->hw;
263ab6dddd2SSubbaraya Sundeep struct otx2_snd_queue *sq;
264ab6dddd2SSubbaraya Sundeep struct otx2_cq_queue *cq;
265ab6dddd2SSubbaraya Sundeep int pool_id, sq_idx;
266ab6dddd2SSubbaraya Sundeep
267ab6dddd2SSubbaraya Sundeep sq_idx = hw->non_qos_queues + qidx;
268ab6dddd2SSubbaraya Sundeep
269ab6dddd2SSubbaraya Sundeep /* If the DOWN flag is set SQs are already freed */
270ab6dddd2SSubbaraya Sundeep if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
271ab6dddd2SSubbaraya Sundeep return;
272ab6dddd2SSubbaraya Sundeep
273ab6dddd2SSubbaraya Sundeep sq = &pfvf->qset.sq[sq_idx];
274ab6dddd2SSubbaraya Sundeep if (!sq->sqb_ptrs)
275ab6dddd2SSubbaraya Sundeep return;
276ab6dddd2SSubbaraya Sundeep
277ab6dddd2SSubbaraya Sundeep if (sq_idx < hw->non_qos_queues ||
278ab6dddd2SSubbaraya Sundeep sq_idx >= otx2_get_total_tx_queues(pfvf)) {
279ab6dddd2SSubbaraya Sundeep netdev_err(pfvf->netdev, "Send Queue is not a QoS queue\n");
280ab6dddd2SSubbaraya Sundeep return;
281ab6dddd2SSubbaraya Sundeep }
282ab6dddd2SSubbaraya Sundeep
283ab6dddd2SSubbaraya Sundeep cq = &qset->cq[pfvf->hw.rx_queues + sq_idx];
284ab6dddd2SSubbaraya Sundeep pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, sq_idx);
285ab6dddd2SSubbaraya Sundeep
286ab6dddd2SSubbaraya Sundeep otx2_qos_sqb_flush(pfvf, sq_idx);
287ab6dddd2SSubbaraya Sundeep otx2_smq_flush(pfvf, otx2_get_smq_idx(pfvf, sq_idx));
288ab6dddd2SSubbaraya Sundeep otx2_cleanup_tx_cqes(pfvf, cq);
289ab6dddd2SSubbaraya Sundeep
290ab6dddd2SSubbaraya Sundeep mutex_lock(&pfvf->mbox.lock);
291ab6dddd2SSubbaraya Sundeep otx2_qos_ctx_disable(pfvf, sq_idx, pool_id);
292ab6dddd2SSubbaraya Sundeep mutex_unlock(&pfvf->mbox.lock);
293ab6dddd2SSubbaraya Sundeep
294ab6dddd2SSubbaraya Sundeep otx2_qos_sq_free_sqbs(pfvf, sq_idx);
295ab6dddd2SSubbaraya Sundeep otx2_qos_aura_pool_free(pfvf, pool_id);
296ab6dddd2SSubbaraya Sundeep }
297