1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Ethernet driver
3 *
4 * Copyright (C) 2023 Marvell.
5 *
6 */
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/inetdevice.h>
10 #include <linux/bitfield.h>
11
12 #include "otx2_common.h"
13 #include "cn10k.h"
14 #include "qos.h"
15
16 #define OTX2_QOS_QID_INNER 0xFFFFU
17 #define OTX2_QOS_QID_NONE 0xFFFEU
18 #define OTX2_QOS_ROOT_CLASSID 0xFFFFFFFF
19 #define OTX2_QOS_CLASS_NONE 0
20 #define OTX2_QOS_DEFAULT_PRIO 0xF
21 #define OTX2_QOS_INVALID_SQ 0xFFFF
22 #define OTX2_QOS_INVALID_TXSCHQ_IDX 0xFFFF
23 #define CN10K_MAX_RR_WEIGHT GENMASK_ULL(13, 0)
24 #define OTX2_MAX_RR_QUANTUM GENMASK_ULL(23, 0)
25
otx2_qos_update_tx_netdev_queues(struct otx2_nic * pfvf)26 static void otx2_qos_update_tx_netdev_queues(struct otx2_nic *pfvf)
27 {
28 struct otx2_hw *hw = &pfvf->hw;
29 int tx_queues, qos_txqs, err;
30
31 qos_txqs = bitmap_weight(pfvf->qos.qos_sq_bmap,
32 OTX2_QOS_MAX_LEAF_NODES);
33
34 tx_queues = hw->tx_queues + qos_txqs;
35
36 err = netif_set_real_num_tx_queues(pfvf->netdev, tx_queues);
37 if (err) {
38 netdev_err(pfvf->netdev,
39 "Failed to set no of Tx queues: %d\n", tx_queues);
40 return;
41 }
42 }
43
otx2_qos_get_regaddr(struct otx2_qos_node * node,struct nix_txschq_config * cfg,int index)44 static void otx2_qos_get_regaddr(struct otx2_qos_node *node,
45 struct nix_txschq_config *cfg,
46 int index)
47 {
48 if (node->level == NIX_TXSCH_LVL_SMQ) {
49 cfg->reg[index++] = NIX_AF_MDQX_PARENT(node->schq);
50 cfg->reg[index++] = NIX_AF_MDQX_SCHEDULE(node->schq);
51 cfg->reg[index++] = NIX_AF_MDQX_PIR(node->schq);
52 cfg->reg[index] = NIX_AF_MDQX_CIR(node->schq);
53 } else if (node->level == NIX_TXSCH_LVL_TL4) {
54 cfg->reg[index++] = NIX_AF_TL4X_PARENT(node->schq);
55 cfg->reg[index++] = NIX_AF_TL4X_SCHEDULE(node->schq);
56 cfg->reg[index++] = NIX_AF_TL4X_PIR(node->schq);
57 cfg->reg[index] = NIX_AF_TL4X_CIR(node->schq);
58 } else if (node->level == NIX_TXSCH_LVL_TL3) {
59 cfg->reg[index++] = NIX_AF_TL3X_PARENT(node->schq);
60 cfg->reg[index++] = NIX_AF_TL3X_SCHEDULE(node->schq);
61 cfg->reg[index++] = NIX_AF_TL3X_PIR(node->schq);
62 cfg->reg[index] = NIX_AF_TL3X_CIR(node->schq);
63 } else if (node->level == NIX_TXSCH_LVL_TL2) {
64 cfg->reg[index++] = NIX_AF_TL2X_PARENT(node->schq);
65 cfg->reg[index++] = NIX_AF_TL2X_SCHEDULE(node->schq);
66 cfg->reg[index++] = NIX_AF_TL2X_PIR(node->schq);
67 cfg->reg[index] = NIX_AF_TL2X_CIR(node->schq);
68 }
69 }
70
otx2_qos_quantum_to_dwrr_weight(struct otx2_nic * pfvf,u32 quantum)71 static int otx2_qos_quantum_to_dwrr_weight(struct otx2_nic *pfvf, u32 quantum)
72 {
73 u32 weight;
74
75 weight = quantum / pfvf->hw.dwrr_mtu;
76 if (quantum % pfvf->hw.dwrr_mtu)
77 weight += 1;
78
79 return weight;
80 }
81
otx2_config_sched_shaping(struct otx2_nic * pfvf,struct otx2_qos_node * node,struct nix_txschq_config * cfg,int * num_regs)82 static void otx2_config_sched_shaping(struct otx2_nic *pfvf,
83 struct otx2_qos_node *node,
84 struct nix_txschq_config *cfg,
85 int *num_regs)
86 {
87 u32 rr_weight;
88 u32 quantum;
89 u64 maxrate;
90
91 otx2_qos_get_regaddr(node, cfg, *num_regs);
92
93 /* configure parent txschq */
94 cfg->regval[*num_regs] = node->parent->schq << 16;
95 (*num_regs)++;
96
97 /* configure prio/quantum */
98 if (node->qid == OTX2_QOS_QID_NONE) {
99 cfg->regval[*num_regs] = node->prio << 24 |
100 mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
101 (*num_regs)++;
102 return;
103 }
104
105 /* configure priority/quantum */
106 if (node->is_static) {
107 cfg->regval[*num_regs] =
108 (node->schq - node->parent->prio_anchor) << 24;
109 } else {
110 quantum = node->quantum ?
111 node->quantum : pfvf->tx_max_pktlen;
112 rr_weight = otx2_qos_quantum_to_dwrr_weight(pfvf, quantum);
113 cfg->regval[*num_regs] = node->parent->child_dwrr_prio << 24 |
114 rr_weight;
115 }
116 (*num_regs)++;
117
118 /* configure PIR */
119 maxrate = (node->rate > node->ceil) ? node->rate : node->ceil;
120
121 cfg->regval[*num_regs] =
122 otx2_get_txschq_rate_regval(pfvf, maxrate, 65536);
123 (*num_regs)++;
124
125 /* Don't configure CIR when both CIR+PIR not supported
126 * On 96xx, CIR + PIR + RED_ALGO=STALL causes deadlock
127 */
128 if (!test_bit(QOS_CIR_PIR_SUPPORT, &pfvf->hw.cap_flag))
129 return;
130
131 cfg->regval[*num_regs] =
132 otx2_get_txschq_rate_regval(pfvf, node->rate, 65536);
133 (*num_regs)++;
134 }
135
__otx2_qos_txschq_cfg(struct otx2_nic * pfvf,struct otx2_qos_node * node,struct nix_txschq_config * cfg)136 static void __otx2_qos_txschq_cfg(struct otx2_nic *pfvf,
137 struct otx2_qos_node *node,
138 struct nix_txschq_config *cfg)
139 {
140 struct otx2_hw *hw = &pfvf->hw;
141 int num_regs = 0;
142 u8 level;
143
144 level = node->level;
145
146 /* program txschq registers */
147 if (level == NIX_TXSCH_LVL_SMQ) {
148 cfg->reg[num_regs] = NIX_AF_SMQX_CFG(node->schq);
149 cfg->regval[num_regs] = ((u64)pfvf->tx_max_pktlen << 8) |
150 OTX2_MIN_MTU;
151 cfg->regval[num_regs] |= (0x20ULL << 51) | (0x80ULL << 39) |
152 (0x2ULL << 36);
153 num_regs++;
154
155 otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
156 } else if (level == NIX_TXSCH_LVL_TL4) {
157 otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
158 } else if (level == NIX_TXSCH_LVL_TL3) {
159 /* configure link cfg */
160 if (level == pfvf->qos.link_cfg_lvl) {
161 cfg->reg[num_regs] = NIX_AF_TL3_TL2X_LINKX_CFG(node->schq, hw->tx_link);
162 cfg->regval[num_regs] = BIT_ULL(13) | BIT_ULL(12);
163 num_regs++;
164 }
165
166 otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
167 } else if (level == NIX_TXSCH_LVL_TL2) {
168 /* configure parent txschq */
169 cfg->reg[num_regs] = NIX_AF_TL2X_PARENT(node->schq);
170 cfg->regval[num_regs] = (u64)hw->tx_link << 16;
171 num_regs++;
172
173 /* configure link cfg */
174 if (level == pfvf->qos.link_cfg_lvl) {
175 cfg->reg[num_regs] = NIX_AF_TL3_TL2X_LINKX_CFG(node->schq, hw->tx_link);
176 cfg->regval[num_regs] = BIT_ULL(13) | BIT_ULL(12);
177 num_regs++;
178 }
179
180 /* check if node is root */
181 if (node->qid == OTX2_QOS_QID_INNER && !node->parent) {
182 cfg->reg[num_regs] = NIX_AF_TL2X_SCHEDULE(node->schq);
183 cfg->regval[num_regs] = (u64)hw->txschq_aggr_lvl_rr_prio << 24 |
184 mtu_to_dwrr_weight(pfvf,
185 pfvf->tx_max_pktlen);
186 num_regs++;
187 goto txschq_cfg_out;
188 }
189
190 otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
191 }
192
193 txschq_cfg_out:
194 cfg->num_regs = num_regs;
195 }
196
otx2_qos_txschq_set_parent_topology(struct otx2_nic * pfvf,struct otx2_qos_node * parent)197 static int otx2_qos_txschq_set_parent_topology(struct otx2_nic *pfvf,
198 struct otx2_qos_node *parent)
199 {
200 struct mbox *mbox = &pfvf->mbox;
201 struct nix_txschq_config *cfg;
202 int rc;
203
204 if (parent->level == NIX_TXSCH_LVL_MDQ)
205 return 0;
206
207 mutex_lock(&mbox->lock);
208
209 cfg = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
210 if (!cfg) {
211 mutex_unlock(&mbox->lock);
212 return -ENOMEM;
213 }
214
215 cfg->lvl = parent->level;
216
217 if (parent->level == NIX_TXSCH_LVL_TL4)
218 cfg->reg[0] = NIX_AF_TL4X_TOPOLOGY(parent->schq);
219 else if (parent->level == NIX_TXSCH_LVL_TL3)
220 cfg->reg[0] = NIX_AF_TL3X_TOPOLOGY(parent->schq);
221 else if (parent->level == NIX_TXSCH_LVL_TL2)
222 cfg->reg[0] = NIX_AF_TL2X_TOPOLOGY(parent->schq);
223 else if (parent->level == NIX_TXSCH_LVL_TL1)
224 cfg->reg[0] = NIX_AF_TL1X_TOPOLOGY(parent->schq);
225
226 cfg->regval[0] = (u64)parent->prio_anchor << 32;
227 cfg->regval[0] |= ((parent->child_dwrr_prio != OTX2_QOS_DEFAULT_PRIO) ?
228 parent->child_dwrr_prio : 0) << 1;
229 cfg->num_regs++;
230
231 rc = otx2_sync_mbox_msg(&pfvf->mbox);
232
233 mutex_unlock(&mbox->lock);
234
235 return rc;
236 }
237
otx2_qos_free_hw_node_schq(struct otx2_nic * pfvf,struct otx2_qos_node * parent)238 static void otx2_qos_free_hw_node_schq(struct otx2_nic *pfvf,
239 struct otx2_qos_node *parent)
240 {
241 struct otx2_qos_node *node;
242
243 list_for_each_entry_reverse(node, &parent->child_schq_list, list)
244 otx2_txschq_free_one(pfvf, node->level, node->schq);
245 }
246
otx2_qos_free_hw_node(struct otx2_nic * pfvf,struct otx2_qos_node * parent)247 static void otx2_qos_free_hw_node(struct otx2_nic *pfvf,
248 struct otx2_qos_node *parent)
249 {
250 struct otx2_qos_node *node, *tmp;
251
252 list_for_each_entry_safe(node, tmp, &parent->child_list, list) {
253 otx2_qos_free_hw_node(pfvf, node);
254 otx2_qos_free_hw_node_schq(pfvf, node);
255 otx2_txschq_free_one(pfvf, node->level, node->schq);
256 }
257 }
258
otx2_qos_free_hw_cfg(struct otx2_nic * pfvf,struct otx2_qos_node * node)259 static void otx2_qos_free_hw_cfg(struct otx2_nic *pfvf,
260 struct otx2_qos_node *node)
261 {
262 mutex_lock(&pfvf->qos.qos_lock);
263
264 /* free child node hw mappings */
265 otx2_qos_free_hw_node(pfvf, node);
266 otx2_qos_free_hw_node_schq(pfvf, node);
267
268 /* free node hw mappings */
269 otx2_txschq_free_one(pfvf, node->level, node->schq);
270
271 mutex_unlock(&pfvf->qos.qos_lock);
272 }
273
otx2_qos_sw_node_delete(struct otx2_nic * pfvf,struct otx2_qos_node * node)274 static void otx2_qos_sw_node_delete(struct otx2_nic *pfvf,
275 struct otx2_qos_node *node)
276 {
277 hash_del_rcu(&node->hlist);
278
279 if (node->qid != OTX2_QOS_QID_INNER && node->qid != OTX2_QOS_QID_NONE) {
280 __clear_bit(node->qid, pfvf->qos.qos_sq_bmap);
281 otx2_qos_update_tx_netdev_queues(pfvf);
282 }
283
284 list_del(&node->list);
285 kfree(node);
286 }
287
otx2_qos_free_sw_node_schq(struct otx2_nic * pfvf,struct otx2_qos_node * parent)288 static void otx2_qos_free_sw_node_schq(struct otx2_nic *pfvf,
289 struct otx2_qos_node *parent)
290 {
291 struct otx2_qos_node *node, *tmp;
292
293 list_for_each_entry_safe(node, tmp, &parent->child_schq_list, list) {
294 list_del(&node->list);
295 kfree(node);
296 }
297 }
298
__otx2_qos_free_sw_node(struct otx2_nic * pfvf,struct otx2_qos_node * parent)299 static void __otx2_qos_free_sw_node(struct otx2_nic *pfvf,
300 struct otx2_qos_node *parent)
301 {
302 struct otx2_qos_node *node, *tmp;
303
304 list_for_each_entry_safe(node, tmp, &parent->child_list, list) {
305 __otx2_qos_free_sw_node(pfvf, node);
306 otx2_qos_free_sw_node_schq(pfvf, node);
307 otx2_qos_sw_node_delete(pfvf, node);
308 }
309 }
310
otx2_qos_free_sw_node(struct otx2_nic * pfvf,struct otx2_qos_node * node)311 static void otx2_qos_free_sw_node(struct otx2_nic *pfvf,
312 struct otx2_qos_node *node)
313 {
314 mutex_lock(&pfvf->qos.qos_lock);
315
316 __otx2_qos_free_sw_node(pfvf, node);
317 otx2_qos_free_sw_node_schq(pfvf, node);
318 otx2_qos_sw_node_delete(pfvf, node);
319
320 mutex_unlock(&pfvf->qos.qos_lock);
321 }
322
otx2_qos_destroy_node(struct otx2_nic * pfvf,struct otx2_qos_node * node)323 static void otx2_qos_destroy_node(struct otx2_nic *pfvf,
324 struct otx2_qos_node *node)
325 {
326 otx2_qos_free_hw_cfg(pfvf, node);
327 otx2_qos_free_sw_node(pfvf, node);
328 }
329
otx2_qos_fill_cfg_schq(struct otx2_qos_node * parent,struct otx2_qos_cfg * cfg)330 static void otx2_qos_fill_cfg_schq(struct otx2_qos_node *parent,
331 struct otx2_qos_cfg *cfg)
332 {
333 struct otx2_qos_node *node;
334
335 list_for_each_entry(node, &parent->child_schq_list, list)
336 cfg->schq[node->level]++;
337 }
338
otx2_qos_fill_cfg_tl(struct otx2_qos_node * parent,struct otx2_qos_cfg * cfg)339 static void otx2_qos_fill_cfg_tl(struct otx2_qos_node *parent,
340 struct otx2_qos_cfg *cfg)
341 {
342 struct otx2_qos_node *node;
343
344 list_for_each_entry(node, &parent->child_list, list) {
345 otx2_qos_fill_cfg_tl(node, cfg);
346 otx2_qos_fill_cfg_schq(node, cfg);
347 }
348
349 /* Assign the required number of transmit schedular queues under the
350 * given class
351 */
352 cfg->schq_contig[parent->level - 1] += parent->child_dwrr_cnt +
353 parent->max_static_prio + 1;
354 }
355
otx2_qos_prepare_txschq_cfg(struct otx2_nic * pfvf,struct otx2_qos_node * parent,struct otx2_qos_cfg * cfg)356 static void otx2_qos_prepare_txschq_cfg(struct otx2_nic *pfvf,
357 struct otx2_qos_node *parent,
358 struct otx2_qos_cfg *cfg)
359 {
360 mutex_lock(&pfvf->qos.qos_lock);
361 otx2_qos_fill_cfg_tl(parent, cfg);
362 mutex_unlock(&pfvf->qos.qos_lock);
363 }
364
otx2_qos_read_txschq_cfg_schq(struct otx2_qos_node * parent,struct otx2_qos_cfg * cfg)365 static void otx2_qos_read_txschq_cfg_schq(struct otx2_qos_node *parent,
366 struct otx2_qos_cfg *cfg)
367 {
368 struct otx2_qos_node *node;
369 int cnt;
370
371 list_for_each_entry(node, &parent->child_schq_list, list) {
372 cnt = cfg->dwrr_node_pos[node->level];
373 cfg->schq_list[node->level][cnt] = node->schq;
374 cfg->schq[node->level]++;
375 cfg->dwrr_node_pos[node->level]++;
376 }
377 }
378
otx2_qos_read_txschq_cfg_tl(struct otx2_qos_node * parent,struct otx2_qos_cfg * cfg)379 static void otx2_qos_read_txschq_cfg_tl(struct otx2_qos_node *parent,
380 struct otx2_qos_cfg *cfg)
381 {
382 struct otx2_qos_node *node;
383 int cnt;
384
385 list_for_each_entry(node, &parent->child_list, list) {
386 otx2_qos_read_txschq_cfg_tl(node, cfg);
387 cnt = cfg->static_node_pos[node->level];
388 cfg->schq_contig_list[node->level][cnt] = node->schq;
389 cfg->schq_index_used[node->level][cnt] = true;
390 cfg->schq_contig[node->level]++;
391 cfg->static_node_pos[node->level]++;
392 otx2_qos_read_txschq_cfg_schq(node, cfg);
393 }
394 }
395
otx2_qos_read_txschq_cfg(struct otx2_nic * pfvf,struct otx2_qos_node * node,struct otx2_qos_cfg * cfg)396 static void otx2_qos_read_txschq_cfg(struct otx2_nic *pfvf,
397 struct otx2_qos_node *node,
398 struct otx2_qos_cfg *cfg)
399 {
400 mutex_lock(&pfvf->qos.qos_lock);
401 otx2_qos_read_txschq_cfg_tl(node, cfg);
402 mutex_unlock(&pfvf->qos.qos_lock);
403 }
404
405 static struct otx2_qos_node *
otx2_qos_alloc_root(struct otx2_nic * pfvf)406 otx2_qos_alloc_root(struct otx2_nic *pfvf)
407 {
408 struct otx2_qos_node *node;
409
410 node = kzalloc(sizeof(*node), GFP_KERNEL);
411 if (!node)
412 return ERR_PTR(-ENOMEM);
413
414 node->parent = NULL;
415 if (!is_otx2_vf(pfvf->pcifunc)) {
416 node->level = NIX_TXSCH_LVL_TL1;
417 } else {
418 node->level = NIX_TXSCH_LVL_TL2;
419 node->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO;
420 }
421
422 WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER);
423 node->classid = OTX2_QOS_ROOT_CLASSID;
424
425 hash_add_rcu(pfvf->qos.qos_hlist, &node->hlist, node->classid);
426 list_add_tail(&node->list, &pfvf->qos.qos_tree);
427 INIT_LIST_HEAD(&node->child_list);
428 INIT_LIST_HEAD(&node->child_schq_list);
429
430 return node;
431 }
432
otx2_qos_add_child_node(struct otx2_qos_node * parent,struct otx2_qos_node * node)433 static int otx2_qos_add_child_node(struct otx2_qos_node *parent,
434 struct otx2_qos_node *node)
435 {
436 struct list_head *head = &parent->child_list;
437 struct otx2_qos_node *tmp_node;
438 struct list_head *tmp;
439
440 if (node->prio > parent->max_static_prio)
441 parent->max_static_prio = node->prio;
442
443 for (tmp = head->next; tmp != head; tmp = tmp->next) {
444 tmp_node = list_entry(tmp, struct otx2_qos_node, list);
445 if (tmp_node->prio == node->prio &&
446 tmp_node->is_static)
447 return -EEXIST;
448 if (tmp_node->prio > node->prio) {
449 list_add_tail(&node->list, tmp);
450 return 0;
451 }
452 }
453
454 list_add_tail(&node->list, head);
455 return 0;
456 }
457
otx2_qos_alloc_txschq_node(struct otx2_nic * pfvf,struct otx2_qos_node * node)458 static int otx2_qos_alloc_txschq_node(struct otx2_nic *pfvf,
459 struct otx2_qos_node *node)
460 {
461 struct otx2_qos_node *txschq_node, *parent, *tmp;
462 int lvl;
463
464 parent = node;
465 for (lvl = node->level - 1; lvl >= NIX_TXSCH_LVL_MDQ; lvl--) {
466 txschq_node = kzalloc(sizeof(*txschq_node), GFP_KERNEL);
467 if (!txschq_node)
468 goto err_out;
469
470 txschq_node->parent = parent;
471 txschq_node->level = lvl;
472 txschq_node->classid = OTX2_QOS_CLASS_NONE;
473 WRITE_ONCE(txschq_node->qid, OTX2_QOS_QID_NONE);
474 txschq_node->rate = 0;
475 txschq_node->ceil = 0;
476 txschq_node->prio = 0;
477 txschq_node->quantum = 0;
478 txschq_node->is_static = true;
479 txschq_node->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO;
480 txschq_node->txschq_idx = OTX2_QOS_INVALID_TXSCHQ_IDX;
481
482 mutex_lock(&pfvf->qos.qos_lock);
483 list_add_tail(&txschq_node->list, &node->child_schq_list);
484 mutex_unlock(&pfvf->qos.qos_lock);
485
486 INIT_LIST_HEAD(&txschq_node->child_list);
487 INIT_LIST_HEAD(&txschq_node->child_schq_list);
488 parent = txschq_node;
489 }
490
491 return 0;
492
493 err_out:
494 list_for_each_entry_safe(txschq_node, tmp, &node->child_schq_list,
495 list) {
496 list_del(&txschq_node->list);
497 kfree(txschq_node);
498 }
499 return -ENOMEM;
500 }
501
502 static struct otx2_qos_node *
otx2_qos_sw_create_leaf_node(struct otx2_nic * pfvf,struct otx2_qos_node * parent,u16 classid,u32 prio,u64 rate,u64 ceil,u32 quantum,u16 qid,bool static_cfg)503 otx2_qos_sw_create_leaf_node(struct otx2_nic *pfvf,
504 struct otx2_qos_node *parent,
505 u16 classid, u32 prio, u64 rate, u64 ceil,
506 u32 quantum, u16 qid, bool static_cfg)
507 {
508 struct otx2_qos_node *node;
509 int err;
510
511 node = kzalloc(sizeof(*node), GFP_KERNEL);
512 if (!node)
513 return ERR_PTR(-ENOMEM);
514
515 node->parent = parent;
516 node->level = parent->level - 1;
517 node->classid = classid;
518 WRITE_ONCE(node->qid, qid);
519
520 node->rate = otx2_convert_rate(rate);
521 node->ceil = otx2_convert_rate(ceil);
522 node->prio = prio;
523 node->quantum = quantum;
524 node->is_static = static_cfg;
525 node->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO;
526 node->txschq_idx = OTX2_QOS_INVALID_TXSCHQ_IDX;
527
528 __set_bit(qid, pfvf->qos.qos_sq_bmap);
529
530 hash_add_rcu(pfvf->qos.qos_hlist, &node->hlist, classid);
531
532 mutex_lock(&pfvf->qos.qos_lock);
533 err = otx2_qos_add_child_node(parent, node);
534 if (err) {
535 mutex_unlock(&pfvf->qos.qos_lock);
536 return ERR_PTR(err);
537 }
538 mutex_unlock(&pfvf->qos.qos_lock);
539
540 INIT_LIST_HEAD(&node->child_list);
541 INIT_LIST_HEAD(&node->child_schq_list);
542
543 err = otx2_qos_alloc_txschq_node(pfvf, node);
544 if (err) {
545 otx2_qos_sw_node_delete(pfvf, node);
546 return ERR_PTR(-ENOMEM);
547 }
548
549 return node;
550 }
551
552 static struct otx2_qos_node *
otx2_sw_node_find(struct otx2_nic * pfvf,u32 classid)553 otx2_sw_node_find(struct otx2_nic *pfvf, u32 classid)
554 {
555 struct otx2_qos_node *node = NULL;
556
557 hash_for_each_possible(pfvf->qos.qos_hlist, node, hlist, classid) {
558 if (node->classid == classid)
559 break;
560 }
561
562 return node;
563 }
564
565 static struct otx2_qos_node *
otx2_sw_node_find_rcu(struct otx2_nic * pfvf,u32 classid)566 otx2_sw_node_find_rcu(struct otx2_nic *pfvf, u32 classid)
567 {
568 struct otx2_qos_node *node = NULL;
569
570 hash_for_each_possible_rcu(pfvf->qos.qos_hlist, node, hlist, classid) {
571 if (node->classid == classid)
572 break;
573 }
574
575 return node;
576 }
577
otx2_get_txq_by_classid(struct otx2_nic * pfvf,u16 classid)578 int otx2_get_txq_by_classid(struct otx2_nic *pfvf, u16 classid)
579 {
580 struct otx2_qos_node *node;
581 u16 qid;
582 int res;
583
584 node = otx2_sw_node_find_rcu(pfvf, classid);
585 if (!node) {
586 res = -ENOENT;
587 goto out;
588 }
589 qid = READ_ONCE(node->qid);
590 if (qid == OTX2_QOS_QID_INNER) {
591 res = -EINVAL;
592 goto out;
593 }
594 res = pfvf->hw.tx_queues + qid;
595 out:
596 return res;
597 }
598
599 static int
otx2_qos_txschq_config(struct otx2_nic * pfvf,struct otx2_qos_node * node)600 otx2_qos_txschq_config(struct otx2_nic *pfvf, struct otx2_qos_node *node)
601 {
602 struct mbox *mbox = &pfvf->mbox;
603 struct nix_txschq_config *req;
604 int rc;
605
606 mutex_lock(&mbox->lock);
607
608 req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
609 if (!req) {
610 mutex_unlock(&mbox->lock);
611 return -ENOMEM;
612 }
613
614 req->lvl = node->level;
615 __otx2_qos_txschq_cfg(pfvf, node, req);
616
617 rc = otx2_sync_mbox_msg(&pfvf->mbox);
618
619 mutex_unlock(&mbox->lock);
620
621 return rc;
622 }
623
otx2_qos_txschq_alloc(struct otx2_nic * pfvf,struct otx2_qos_cfg * cfg)624 static int otx2_qos_txschq_alloc(struct otx2_nic *pfvf,
625 struct otx2_qos_cfg *cfg)
626 {
627 struct nix_txsch_alloc_req *req;
628 struct nix_txsch_alloc_rsp *rsp;
629 struct mbox *mbox = &pfvf->mbox;
630 int lvl, rc, schq;
631
632 mutex_lock(&mbox->lock);
633 req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox);
634 if (!req) {
635 mutex_unlock(&mbox->lock);
636 return -ENOMEM;
637 }
638
639 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
640 req->schq[lvl] = cfg->schq[lvl];
641 req->schq_contig[lvl] = cfg->schq_contig[lvl];
642 }
643
644 rc = otx2_sync_mbox_msg(&pfvf->mbox);
645 if (rc) {
646 mutex_unlock(&mbox->lock);
647 return rc;
648 }
649
650 rsp = (struct nix_txsch_alloc_rsp *)
651 otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
652
653 if (IS_ERR(rsp)) {
654 rc = PTR_ERR(rsp);
655 goto out;
656 }
657
658 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
659 for (schq = 0; schq < rsp->schq_contig[lvl]; schq++) {
660 cfg->schq_contig_list[lvl][schq] =
661 rsp->schq_contig_list[lvl][schq];
662 }
663 }
664
665 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
666 for (schq = 0; schq < rsp->schq[lvl]; schq++) {
667 cfg->schq_list[lvl][schq] =
668 rsp->schq_list[lvl][schq];
669 }
670 }
671
672 pfvf->qos.link_cfg_lvl = rsp->link_cfg_lvl;
673 pfvf->hw.txschq_aggr_lvl_rr_prio = rsp->aggr_lvl_rr_prio;
674
675 out:
676 mutex_unlock(&mbox->lock);
677 return rc;
678 }
679
otx2_qos_free_unused_txschq(struct otx2_nic * pfvf,struct otx2_qos_cfg * cfg)680 static void otx2_qos_free_unused_txschq(struct otx2_nic *pfvf,
681 struct otx2_qos_cfg *cfg)
682 {
683 int lvl, idx, schq;
684
685 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
686 for (idx = 0; idx < cfg->schq_contig[lvl]; idx++) {
687 if (!cfg->schq_index_used[lvl][idx]) {
688 schq = cfg->schq_contig_list[lvl][idx];
689 otx2_txschq_free_one(pfvf, lvl, schq);
690 }
691 }
692 }
693 }
694
otx2_qos_txschq_fill_cfg_schq(struct otx2_nic * pfvf,struct otx2_qos_node * node,struct otx2_qos_cfg * cfg)695 static void otx2_qos_txschq_fill_cfg_schq(struct otx2_nic *pfvf,
696 struct otx2_qos_node *node,
697 struct otx2_qos_cfg *cfg)
698 {
699 struct otx2_qos_node *tmp;
700 int cnt;
701
702 list_for_each_entry(tmp, &node->child_schq_list, list) {
703 cnt = cfg->dwrr_node_pos[tmp->level];
704 tmp->schq = cfg->schq_list[tmp->level][cnt];
705 cfg->dwrr_node_pos[tmp->level]++;
706 }
707 }
708
otx2_qos_txschq_fill_cfg_tl(struct otx2_nic * pfvf,struct otx2_qos_node * node,struct otx2_qos_cfg * cfg)709 static void otx2_qos_txschq_fill_cfg_tl(struct otx2_nic *pfvf,
710 struct otx2_qos_node *node,
711 struct otx2_qos_cfg *cfg)
712 {
713 struct otx2_qos_node *tmp;
714 int cnt;
715
716 list_for_each_entry(tmp, &node->child_list, list) {
717 otx2_qos_txschq_fill_cfg_tl(pfvf, tmp, cfg);
718 cnt = cfg->static_node_pos[tmp->level];
719 tmp->schq = cfg->schq_contig_list[tmp->level][tmp->txschq_idx];
720 cfg->schq_index_used[tmp->level][tmp->txschq_idx] = true;
721 if (cnt == 0)
722 node->prio_anchor =
723 cfg->schq_contig_list[tmp->level][0];
724 cfg->static_node_pos[tmp->level]++;
725 otx2_qos_txschq_fill_cfg_schq(pfvf, tmp, cfg);
726 }
727 }
728
otx2_qos_txschq_fill_cfg(struct otx2_nic * pfvf,struct otx2_qos_node * node,struct otx2_qos_cfg * cfg)729 static void otx2_qos_txschq_fill_cfg(struct otx2_nic *pfvf,
730 struct otx2_qos_node *node,
731 struct otx2_qos_cfg *cfg)
732 {
733 mutex_lock(&pfvf->qos.qos_lock);
734 otx2_qos_txschq_fill_cfg_tl(pfvf, node, cfg);
735 otx2_qos_txschq_fill_cfg_schq(pfvf, node, cfg);
736 otx2_qos_free_unused_txschq(pfvf, cfg);
737 mutex_unlock(&pfvf->qos.qos_lock);
738 }
739
__otx2_qos_assign_base_idx_tl(struct otx2_nic * pfvf,struct otx2_qos_node * tmp,unsigned long * child_idx_bmap,int child_cnt)740 static void __otx2_qos_assign_base_idx_tl(struct otx2_nic *pfvf,
741 struct otx2_qos_node *tmp,
742 unsigned long *child_idx_bmap,
743 int child_cnt)
744 {
745 int idx;
746
747 if (tmp->txschq_idx != OTX2_QOS_INVALID_TXSCHQ_IDX)
748 return;
749
750 /* assign static nodes 1:1 prio mapping first, then remaining nodes */
751 for (idx = 0; idx < child_cnt; idx++) {
752 if (tmp->is_static && tmp->prio == idx &&
753 !test_bit(idx, child_idx_bmap)) {
754 tmp->txschq_idx = idx;
755 set_bit(idx, child_idx_bmap);
756 return;
757 } else if (!tmp->is_static && idx >= tmp->prio &&
758 !test_bit(idx, child_idx_bmap)) {
759 tmp->txschq_idx = idx;
760 set_bit(idx, child_idx_bmap);
761 return;
762 }
763 }
764 }
765
otx2_qos_assign_base_idx_tl(struct otx2_nic * pfvf,struct otx2_qos_node * node)766 static int otx2_qos_assign_base_idx_tl(struct otx2_nic *pfvf,
767 struct otx2_qos_node *node)
768 {
769 unsigned long *child_idx_bmap;
770 struct otx2_qos_node *tmp;
771 int child_cnt;
772
773 list_for_each_entry(tmp, &node->child_list, list)
774 tmp->txschq_idx = OTX2_QOS_INVALID_TXSCHQ_IDX;
775
776 /* allocate child index array */
777 child_cnt = node->child_dwrr_cnt + node->max_static_prio + 1;
778 child_idx_bmap = kcalloc(BITS_TO_LONGS(child_cnt),
779 sizeof(unsigned long),
780 GFP_KERNEL);
781 if (!child_idx_bmap)
782 return -ENOMEM;
783
784 list_for_each_entry(tmp, &node->child_list, list)
785 otx2_qos_assign_base_idx_tl(pfvf, tmp);
786
787 /* assign base index of static priority children first */
788 list_for_each_entry(tmp, &node->child_list, list) {
789 if (!tmp->is_static)
790 continue;
791 __otx2_qos_assign_base_idx_tl(pfvf, tmp, child_idx_bmap,
792 child_cnt);
793 }
794
795 /* assign base index of dwrr priority children */
796 list_for_each_entry(tmp, &node->child_list, list)
797 __otx2_qos_assign_base_idx_tl(pfvf, tmp, child_idx_bmap,
798 child_cnt);
799
800 kfree(child_idx_bmap);
801
802 return 0;
803 }
804
otx2_qos_assign_base_idx(struct otx2_nic * pfvf,struct otx2_qos_node * node)805 static int otx2_qos_assign_base_idx(struct otx2_nic *pfvf,
806 struct otx2_qos_node *node)
807 {
808 int ret = 0;
809
810 mutex_lock(&pfvf->qos.qos_lock);
811 ret = otx2_qos_assign_base_idx_tl(pfvf, node);
812 mutex_unlock(&pfvf->qos.qos_lock);
813
814 return ret;
815 }
816
otx2_qos_txschq_push_cfg_schq(struct otx2_nic * pfvf,struct otx2_qos_node * node,struct otx2_qos_cfg * cfg)817 static int otx2_qos_txschq_push_cfg_schq(struct otx2_nic *pfvf,
818 struct otx2_qos_node *node,
819 struct otx2_qos_cfg *cfg)
820 {
821 struct otx2_qos_node *tmp;
822 int ret;
823
824 list_for_each_entry(tmp, &node->child_schq_list, list) {
825 ret = otx2_qos_txschq_config(pfvf, tmp);
826 if (ret)
827 return -EIO;
828 ret = otx2_qos_txschq_set_parent_topology(pfvf, tmp->parent);
829 if (ret)
830 return -EIO;
831 }
832
833 return 0;
834 }
835
otx2_qos_txschq_push_cfg_tl(struct otx2_nic * pfvf,struct otx2_qos_node * node,struct otx2_qos_cfg * cfg)836 static int otx2_qos_txschq_push_cfg_tl(struct otx2_nic *pfvf,
837 struct otx2_qos_node *node,
838 struct otx2_qos_cfg *cfg)
839 {
840 struct otx2_qos_node *tmp;
841 int ret;
842
843 list_for_each_entry(tmp, &node->child_list, list) {
844 ret = otx2_qos_txschq_push_cfg_tl(pfvf, tmp, cfg);
845 if (ret)
846 return -EIO;
847 ret = otx2_qos_txschq_config(pfvf, tmp);
848 if (ret)
849 return -EIO;
850 ret = otx2_qos_txschq_push_cfg_schq(pfvf, tmp, cfg);
851 if (ret)
852 return -EIO;
853 }
854
855 ret = otx2_qos_txschq_set_parent_topology(pfvf, node);
856 if (ret)
857 return -EIO;
858
859 return 0;
860 }
861
otx2_qos_txschq_push_cfg(struct otx2_nic * pfvf,struct otx2_qos_node * node,struct otx2_qos_cfg * cfg)862 static int otx2_qos_txschq_push_cfg(struct otx2_nic *pfvf,
863 struct otx2_qos_node *node,
864 struct otx2_qos_cfg *cfg)
865 {
866 int ret;
867
868 mutex_lock(&pfvf->qos.qos_lock);
869 ret = otx2_qos_txschq_push_cfg_tl(pfvf, node, cfg);
870 if (ret)
871 goto out;
872 ret = otx2_qos_txschq_push_cfg_schq(pfvf, node, cfg);
873 out:
874 mutex_unlock(&pfvf->qos.qos_lock);
875 return ret;
876 }
877
otx2_qos_txschq_update_config(struct otx2_nic * pfvf,struct otx2_qos_node * node,struct otx2_qos_cfg * cfg)878 static int otx2_qos_txschq_update_config(struct otx2_nic *pfvf,
879 struct otx2_qos_node *node,
880 struct otx2_qos_cfg *cfg)
881 {
882 otx2_qos_txschq_fill_cfg(pfvf, node, cfg);
883
884 return otx2_qos_txschq_push_cfg(pfvf, node, cfg);
885 }
886
otx2_qos_txschq_update_root_cfg(struct otx2_nic * pfvf,struct otx2_qos_node * root,struct otx2_qos_cfg * cfg)887 static int otx2_qos_txschq_update_root_cfg(struct otx2_nic *pfvf,
888 struct otx2_qos_node *root,
889 struct otx2_qos_cfg *cfg)
890 {
891 root->schq = cfg->schq_list[root->level][0];
892 return otx2_qos_txschq_config(pfvf, root);
893 }
894
otx2_qos_free_cfg(struct otx2_nic * pfvf,struct otx2_qos_cfg * cfg)895 static void otx2_qos_free_cfg(struct otx2_nic *pfvf, struct otx2_qos_cfg *cfg)
896 {
897 int lvl, idx, schq;
898
899 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
900 for (idx = 0; idx < cfg->schq[lvl]; idx++) {
901 schq = cfg->schq_list[lvl][idx];
902 otx2_txschq_free_one(pfvf, lvl, schq);
903 }
904 }
905
906 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
907 for (idx = 0; idx < cfg->schq_contig[lvl]; idx++) {
908 if (cfg->schq_index_used[lvl][idx]) {
909 schq = cfg->schq_contig_list[lvl][idx];
910 otx2_txschq_free_one(pfvf, lvl, schq);
911 }
912 }
913 }
914 }
915
otx2_qos_enadis_sq(struct otx2_nic * pfvf,struct otx2_qos_node * node,u16 qid)916 static void otx2_qos_enadis_sq(struct otx2_nic *pfvf,
917 struct otx2_qos_node *node,
918 u16 qid)
919 {
920 if (pfvf->qos.qid_to_sqmap[qid] != OTX2_QOS_INVALID_SQ)
921 otx2_qos_disable_sq(pfvf, qid);
922
923 pfvf->qos.qid_to_sqmap[qid] = node->schq;
924 otx2_qos_enable_sq(pfvf, qid);
925 }
926
otx2_qos_update_smq_schq(struct otx2_nic * pfvf,struct otx2_qos_node * node,bool action)927 static void otx2_qos_update_smq_schq(struct otx2_nic *pfvf,
928 struct otx2_qos_node *node,
929 bool action)
930 {
931 struct otx2_qos_node *tmp;
932
933 if (node->qid == OTX2_QOS_QID_INNER)
934 return;
935
936 list_for_each_entry(tmp, &node->child_schq_list, list) {
937 if (tmp->level == NIX_TXSCH_LVL_MDQ) {
938 if (action == QOS_SMQ_FLUSH)
939 otx2_smq_flush(pfvf, tmp->schq);
940 else
941 otx2_qos_enadis_sq(pfvf, tmp, node->qid);
942 }
943 }
944 }
945
__otx2_qos_update_smq(struct otx2_nic * pfvf,struct otx2_qos_node * node,bool action)946 static void __otx2_qos_update_smq(struct otx2_nic *pfvf,
947 struct otx2_qos_node *node,
948 bool action)
949 {
950 struct otx2_qos_node *tmp;
951
952 list_for_each_entry(tmp, &node->child_list, list) {
953 __otx2_qos_update_smq(pfvf, tmp, action);
954 if (tmp->qid == OTX2_QOS_QID_INNER)
955 continue;
956 if (tmp->level == NIX_TXSCH_LVL_MDQ) {
957 if (action == QOS_SMQ_FLUSH)
958 otx2_smq_flush(pfvf, tmp->schq);
959 else
960 otx2_qos_enadis_sq(pfvf, tmp, tmp->qid);
961 } else {
962 otx2_qos_update_smq_schq(pfvf, tmp, action);
963 }
964 }
965 }
966
otx2_qos_update_smq(struct otx2_nic * pfvf,struct otx2_qos_node * node,bool action)967 static void otx2_qos_update_smq(struct otx2_nic *pfvf,
968 struct otx2_qos_node *node,
969 bool action)
970 {
971 mutex_lock(&pfvf->qos.qos_lock);
972 __otx2_qos_update_smq(pfvf, node, action);
973 otx2_qos_update_smq_schq(pfvf, node, action);
974 mutex_unlock(&pfvf->qos.qos_lock);
975 }
976
otx2_qos_push_txschq_cfg(struct otx2_nic * pfvf,struct otx2_qos_node * node,struct otx2_qos_cfg * cfg)977 static int otx2_qos_push_txschq_cfg(struct otx2_nic *pfvf,
978 struct otx2_qos_node *node,
979 struct otx2_qos_cfg *cfg)
980 {
981 int ret;
982
983 ret = otx2_qos_txschq_alloc(pfvf, cfg);
984 if (ret)
985 return -ENOSPC;
986
987 ret = otx2_qos_assign_base_idx(pfvf, node);
988 if (ret)
989 return -ENOMEM;
990
991 if (!(pfvf->netdev->flags & IFF_UP)) {
992 otx2_qos_txschq_fill_cfg(pfvf, node, cfg);
993 return 0;
994 }
995
996 ret = otx2_qos_txschq_update_config(pfvf, node, cfg);
997 if (ret) {
998 otx2_qos_free_cfg(pfvf, cfg);
999 return -EIO;
1000 }
1001
1002 otx2_qos_update_smq(pfvf, node, QOS_CFG_SQ);
1003
1004 return 0;
1005 }
1006
otx2_qos_update_tree(struct otx2_nic * pfvf,struct otx2_qos_node * node,struct otx2_qos_cfg * cfg)1007 static int otx2_qos_update_tree(struct otx2_nic *pfvf,
1008 struct otx2_qos_node *node,
1009 struct otx2_qos_cfg *cfg)
1010 {
1011 otx2_qos_prepare_txschq_cfg(pfvf, node->parent, cfg);
1012 return otx2_qos_push_txschq_cfg(pfvf, node->parent, cfg);
1013 }
1014
otx2_qos_root_add(struct otx2_nic * pfvf,u16 htb_maj_id,u16 htb_defcls,struct netlink_ext_ack * extack)1015 static int otx2_qos_root_add(struct otx2_nic *pfvf, u16 htb_maj_id, u16 htb_defcls,
1016 struct netlink_ext_ack *extack)
1017 {
1018 struct otx2_qos_cfg *new_cfg;
1019 struct otx2_qos_node *root;
1020 int err;
1021
1022 netdev_dbg(pfvf->netdev,
1023 "TC_HTB_CREATE: handle=0x%x defcls=0x%x\n",
1024 htb_maj_id, htb_defcls);
1025
1026 root = otx2_qos_alloc_root(pfvf);
1027 if (IS_ERR(root)) {
1028 err = PTR_ERR(root);
1029 return err;
1030 }
1031
1032 /* allocate txschq queue */
1033 new_cfg = kzalloc(sizeof(*new_cfg), GFP_KERNEL);
1034 if (!new_cfg) {
1035 NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
1036 err = -ENOMEM;
1037 goto free_root_node;
1038 }
1039 /* allocate htb root node */
1040 new_cfg->schq[root->level] = 1;
1041 err = otx2_qos_txschq_alloc(pfvf, new_cfg);
1042 if (err) {
1043 NL_SET_ERR_MSG_MOD(extack, "Error allocating txschq");
1044 goto free_root_node;
1045 }
1046
1047 /* Update TL1 RR PRIO */
1048 if (root->level == NIX_TXSCH_LVL_TL1) {
1049 root->child_dwrr_prio = pfvf->hw.txschq_aggr_lvl_rr_prio;
1050 netdev_dbg(pfvf->netdev,
1051 "TL1 DWRR Priority %d\n", root->child_dwrr_prio);
1052 }
1053
1054 if (!(pfvf->netdev->flags & IFF_UP) ||
1055 root->level == NIX_TXSCH_LVL_TL1) {
1056 root->schq = new_cfg->schq_list[root->level][0];
1057 goto out;
1058 }
1059
1060 /* update the txschq configuration in hw */
1061 err = otx2_qos_txschq_update_root_cfg(pfvf, root, new_cfg);
1062 if (err) {
1063 NL_SET_ERR_MSG_MOD(extack,
1064 "Error updating txschq configuration");
1065 goto txschq_free;
1066 }
1067
1068 out:
1069 WRITE_ONCE(pfvf->qos.defcls, htb_defcls);
1070 /* Pairs with smp_load_acquire() in ndo_select_queue */
1071 smp_store_release(&pfvf->qos.maj_id, htb_maj_id);
1072 kfree(new_cfg);
1073 return 0;
1074
1075 txschq_free:
1076 otx2_qos_free_cfg(pfvf, new_cfg);
1077 free_root_node:
1078 kfree(new_cfg);
1079 otx2_qos_sw_node_delete(pfvf, root);
1080 return err;
1081 }
1082
otx2_qos_root_destroy(struct otx2_nic * pfvf)1083 static int otx2_qos_root_destroy(struct otx2_nic *pfvf)
1084 {
1085 struct otx2_qos_node *root;
1086
1087 netdev_dbg(pfvf->netdev, "TC_HTB_DESTROY\n");
1088
1089 /* find root node */
1090 root = otx2_sw_node_find(pfvf, OTX2_QOS_ROOT_CLASSID);
1091 if (!root)
1092 return -ENOENT;
1093
1094 /* free the hw mappings */
1095 otx2_qos_destroy_node(pfvf, root);
1096
1097 return 0;
1098 }
1099
otx2_qos_validate_quantum(struct otx2_nic * pfvf,u32 quantum)1100 static int otx2_qos_validate_quantum(struct otx2_nic *pfvf, u32 quantum)
1101 {
1102 u32 rr_weight = otx2_qos_quantum_to_dwrr_weight(pfvf, quantum);
1103 int err = 0;
1104
1105 /* Max Round robin weight supported by octeontx2 and CN10K
1106 * is different. Validate accordingly
1107 */
1108 if (is_dev_otx2(pfvf->pdev))
1109 err = (rr_weight > OTX2_MAX_RR_QUANTUM) ? -EINVAL : 0;
1110 else if (rr_weight > CN10K_MAX_RR_WEIGHT)
1111 err = -EINVAL;
1112
1113 return err;
1114 }
1115
otx2_qos_validate_dwrr_cfg(struct otx2_qos_node * parent,struct netlink_ext_ack * extack,struct otx2_nic * pfvf,u64 prio,u64 quantum)1116 static int otx2_qos_validate_dwrr_cfg(struct otx2_qos_node *parent,
1117 struct netlink_ext_ack *extack,
1118 struct otx2_nic *pfvf,
1119 u64 prio, u64 quantum)
1120 {
1121 int err;
1122
1123 err = otx2_qos_validate_quantum(pfvf, quantum);
1124 if (err) {
1125 NL_SET_ERR_MSG_MOD(extack, "Unsupported quantum value");
1126 return err;
1127 }
1128
1129 if (parent->child_dwrr_prio == OTX2_QOS_DEFAULT_PRIO) {
1130 parent->child_dwrr_prio = prio;
1131 } else if (prio != parent->child_dwrr_prio) {
1132 NL_SET_ERR_MSG_MOD(extack, "Only one DWRR group is allowed");
1133 return -EOPNOTSUPP;
1134 }
1135
1136 return 0;
1137 }
1138
otx2_qos_validate_configuration(struct otx2_qos_node * parent,struct netlink_ext_ack * extack,struct otx2_nic * pfvf,u64 prio,bool static_cfg)1139 static int otx2_qos_validate_configuration(struct otx2_qos_node *parent,
1140 struct netlink_ext_ack *extack,
1141 struct otx2_nic *pfvf,
1142 u64 prio, bool static_cfg)
1143 {
1144 if (prio == parent->child_dwrr_prio && static_cfg) {
1145 NL_SET_ERR_MSG_MOD(extack, "DWRR child group with same priority exists");
1146 return -EEXIST;
1147 }
1148
1149 if (static_cfg && test_bit(prio, parent->prio_bmap)) {
1150 NL_SET_ERR_MSG_MOD(extack,
1151 "Static priority child with same priority exists");
1152 return -EEXIST;
1153 }
1154
1155 return 0;
1156 }
1157
otx2_reset_dwrr_prio(struct otx2_qos_node * parent,u64 prio)1158 static void otx2_reset_dwrr_prio(struct otx2_qos_node *parent, u64 prio)
1159 {
1160 /* For PF, root node dwrr priority is static */
1161 if (parent->level == NIX_TXSCH_LVL_TL1)
1162 return;
1163
1164 if (parent->child_dwrr_prio != OTX2_QOS_DEFAULT_PRIO) {
1165 parent->child_dwrr_prio = OTX2_QOS_DEFAULT_PRIO;
1166 clear_bit(prio, parent->prio_bmap);
1167 }
1168 }
1169
is_qos_node_dwrr(struct otx2_qos_node * parent,struct otx2_nic * pfvf,u64 prio)1170 static bool is_qos_node_dwrr(struct otx2_qos_node *parent,
1171 struct otx2_nic *pfvf,
1172 u64 prio)
1173 {
1174 struct otx2_qos_node *node;
1175 bool ret = false;
1176
1177 if (parent->child_dwrr_prio == prio)
1178 return true;
1179
1180 mutex_lock(&pfvf->qos.qos_lock);
1181 list_for_each_entry(node, &parent->child_list, list) {
1182 if (prio == node->prio) {
1183 if (parent->child_dwrr_prio != OTX2_QOS_DEFAULT_PRIO &&
1184 parent->child_dwrr_prio != prio)
1185 continue;
1186
1187 if (otx2_qos_validate_quantum(pfvf, node->quantum)) {
1188 netdev_err(pfvf->netdev,
1189 "Unsupported quantum value for existing classid=0x%x quantum=%d prio=%d",
1190 node->classid, node->quantum,
1191 node->prio);
1192 break;
1193 }
1194 /* mark old node as dwrr */
1195 node->is_static = false;
1196 parent->child_dwrr_cnt++;
1197 parent->child_static_cnt--;
1198 ret = true;
1199 break;
1200 }
1201 }
1202 mutex_unlock(&pfvf->qos.qos_lock);
1203
1204 return ret;
1205 }
1206
otx2_qos_leaf_alloc_queue(struct otx2_nic * pfvf,u16 classid,u32 parent_classid,u64 rate,u64 ceil,u64 prio,u32 quantum,struct netlink_ext_ack * extack)1207 static int otx2_qos_leaf_alloc_queue(struct otx2_nic *pfvf, u16 classid,
1208 u32 parent_classid, u64 rate, u64 ceil,
1209 u64 prio, u32 quantum,
1210 struct netlink_ext_ack *extack)
1211 {
1212 struct otx2_qos_cfg *old_cfg, *new_cfg;
1213 struct otx2_qos_node *node, *parent;
1214 int qid, ret, err;
1215 bool static_cfg;
1216
1217 netdev_dbg(pfvf->netdev,
1218 "TC_HTB_LEAF_ALLOC_QUEUE: classid=0x%x parent_classid=0x%x rate=%lld ceil=%lld prio=%lld quantum=%d\n",
1219 classid, parent_classid, rate, ceil, prio, quantum);
1220
1221 if (prio > OTX2_QOS_MAX_PRIO) {
1222 NL_SET_ERR_MSG_MOD(extack, "Valid priority range 0 to 7");
1223 ret = -EOPNOTSUPP;
1224 goto out;
1225 }
1226
1227 if (!quantum || quantum > INT_MAX) {
1228 NL_SET_ERR_MSG_MOD(extack, "Invalid quantum, range 1 - 2147483647 bytes");
1229 ret = -EOPNOTSUPP;
1230 goto out;
1231 }
1232
1233 /* get parent node */
1234 parent = otx2_sw_node_find(pfvf, parent_classid);
1235 if (!parent) {
1236 NL_SET_ERR_MSG_MOD(extack, "parent node not found");
1237 ret = -ENOENT;
1238 goto out;
1239 }
1240 if (parent->level == NIX_TXSCH_LVL_MDQ) {
1241 NL_SET_ERR_MSG_MOD(extack, "HTB qos max levels reached");
1242 ret = -EOPNOTSUPP;
1243 goto out;
1244 }
1245
1246 static_cfg = !is_qos_node_dwrr(parent, pfvf, prio);
1247 ret = otx2_qos_validate_configuration(parent, extack, pfvf, prio,
1248 static_cfg);
1249 if (ret)
1250 goto out;
1251
1252 if (!static_cfg) {
1253 ret = otx2_qos_validate_dwrr_cfg(parent, extack, pfvf, prio,
1254 quantum);
1255 if (ret)
1256 goto out;
1257 }
1258
1259 if (static_cfg)
1260 parent->child_static_cnt++;
1261 else
1262 parent->child_dwrr_cnt++;
1263
1264 set_bit(prio, parent->prio_bmap);
1265
1266 /* read current txschq configuration */
1267 old_cfg = kzalloc(sizeof(*old_cfg), GFP_KERNEL);
1268 if (!old_cfg) {
1269 NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
1270 ret = -ENOMEM;
1271 goto reset_prio;
1272 }
1273 otx2_qos_read_txschq_cfg(pfvf, parent, old_cfg);
1274
1275 /* allocate a new sq */
1276 qid = otx2_qos_get_qid(pfvf);
1277 if (qid < 0) {
1278 NL_SET_ERR_MSG_MOD(extack, "Reached max supported QOS SQ's");
1279 ret = -ENOMEM;
1280 goto free_old_cfg;
1281 }
1282
1283 /* Actual SQ mapping will be updated after SMQ alloc */
1284 pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
1285
1286 /* allocate and initialize a new child node */
1287 node = otx2_qos_sw_create_leaf_node(pfvf, parent, classid, prio, rate,
1288 ceil, quantum, qid, static_cfg);
1289 if (IS_ERR(node)) {
1290 NL_SET_ERR_MSG_MOD(extack, "Unable to allocate leaf node");
1291 ret = PTR_ERR(node);
1292 goto free_old_cfg;
1293 }
1294
1295 /* push new txschq config to hw */
1296 new_cfg = kzalloc(sizeof(*new_cfg), GFP_KERNEL);
1297 if (!new_cfg) {
1298 NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
1299 ret = -ENOMEM;
1300 goto free_node;
1301 }
1302 ret = otx2_qos_update_tree(pfvf, node, new_cfg);
1303 if (ret) {
1304 NL_SET_ERR_MSG_MOD(extack, "HTB HW configuration error");
1305 kfree(new_cfg);
1306 otx2_qos_sw_node_delete(pfvf, node);
1307 /* restore the old qos tree */
1308 err = otx2_qos_txschq_update_config(pfvf, parent, old_cfg);
1309 if (err) {
1310 netdev_err(pfvf->netdev,
1311 "Failed to restore txcshq configuration");
1312 goto free_old_cfg;
1313 }
1314
1315 otx2_qos_update_smq(pfvf, parent, QOS_CFG_SQ);
1316 goto free_old_cfg;
1317 }
1318
1319 /* update tx_real_queues */
1320 otx2_qos_update_tx_netdev_queues(pfvf);
1321
1322 /* free new txschq config */
1323 kfree(new_cfg);
1324
1325 /* free old txschq config */
1326 otx2_qos_free_cfg(pfvf, old_cfg);
1327 kfree(old_cfg);
1328
1329 return pfvf->hw.tx_queues + qid;
1330
1331 free_node:
1332 otx2_qos_sw_node_delete(pfvf, node);
1333 free_old_cfg:
1334 kfree(old_cfg);
1335 reset_prio:
1336 if (static_cfg)
1337 parent->child_static_cnt--;
1338 else
1339 parent->child_dwrr_cnt--;
1340
1341 clear_bit(prio, parent->prio_bmap);
1342 out:
1343 return ret;
1344 }
1345
otx2_qos_leaf_to_inner(struct otx2_nic * pfvf,u16 classid,u16 child_classid,u64 rate,u64 ceil,u64 prio,u32 quantum,struct netlink_ext_ack * extack)1346 static int otx2_qos_leaf_to_inner(struct otx2_nic *pfvf, u16 classid,
1347 u16 child_classid, u64 rate, u64 ceil, u64 prio,
1348 u32 quantum, struct netlink_ext_ack *extack)
1349 {
1350 struct otx2_qos_cfg *old_cfg, *new_cfg;
1351 struct otx2_qos_node *node, *child;
1352 bool static_cfg;
1353 int ret, err;
1354 u16 qid;
1355
1356 netdev_dbg(pfvf->netdev,
1357 "TC_HTB_LEAF_TO_INNER classid %04x, child %04x, rate %llu, ceil %llu\n",
1358 classid, child_classid, rate, ceil);
1359
1360 if (prio > OTX2_QOS_MAX_PRIO) {
1361 NL_SET_ERR_MSG_MOD(extack, "Valid priority range 0 to 7");
1362 ret = -EOPNOTSUPP;
1363 goto out;
1364 }
1365
1366 if (!quantum || quantum > INT_MAX) {
1367 NL_SET_ERR_MSG_MOD(extack, "Invalid quantum, range 1 - 2147483647 bytes");
1368 ret = -EOPNOTSUPP;
1369 goto out;
1370 }
1371
1372 /* find node related to classid */
1373 node = otx2_sw_node_find(pfvf, classid);
1374 if (!node) {
1375 NL_SET_ERR_MSG_MOD(extack, "HTB node not found");
1376 ret = -ENOENT;
1377 goto out;
1378 }
1379 /* check max qos txschq level */
1380 if (node->level == NIX_TXSCH_LVL_MDQ) {
1381 NL_SET_ERR_MSG_MOD(extack, "HTB qos level not supported");
1382 ret = -EOPNOTSUPP;
1383 goto out;
1384 }
1385
1386 static_cfg = !is_qos_node_dwrr(node, pfvf, prio);
1387 if (!static_cfg) {
1388 ret = otx2_qos_validate_dwrr_cfg(node, extack, pfvf, prio,
1389 quantum);
1390 if (ret)
1391 goto out;
1392 }
1393
1394 if (static_cfg)
1395 node->child_static_cnt++;
1396 else
1397 node->child_dwrr_cnt++;
1398
1399 set_bit(prio, node->prio_bmap);
1400
1401 /* store the qid to assign to leaf node */
1402 qid = node->qid;
1403
1404 /* read current txschq configuration */
1405 old_cfg = kzalloc(sizeof(*old_cfg), GFP_KERNEL);
1406 if (!old_cfg) {
1407 NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
1408 ret = -ENOMEM;
1409 goto reset_prio;
1410 }
1411 otx2_qos_read_txschq_cfg(pfvf, node, old_cfg);
1412
1413 /* delete the txschq nodes allocated for this node */
1414 otx2_qos_disable_sq(pfvf, qid);
1415 otx2_qos_free_hw_node_schq(pfvf, node);
1416 otx2_qos_free_sw_node_schq(pfvf, node);
1417 pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
1418
1419 /* mark this node as htb inner node */
1420 WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER);
1421
1422 /* allocate and initialize a new child node */
1423 child = otx2_qos_sw_create_leaf_node(pfvf, node, child_classid,
1424 prio, rate, ceil, quantum,
1425 qid, static_cfg);
1426 if (IS_ERR(child)) {
1427 NL_SET_ERR_MSG_MOD(extack, "Unable to allocate leaf node");
1428 ret = PTR_ERR(child);
1429 goto free_old_cfg;
1430 }
1431
1432 /* push new txschq config to hw */
1433 new_cfg = kzalloc(sizeof(*new_cfg), GFP_KERNEL);
1434 if (!new_cfg) {
1435 NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
1436 ret = -ENOMEM;
1437 goto free_node;
1438 }
1439 ret = otx2_qos_update_tree(pfvf, child, new_cfg);
1440 if (ret) {
1441 NL_SET_ERR_MSG_MOD(extack, "HTB HW configuration error");
1442 kfree(new_cfg);
1443 otx2_qos_sw_node_delete(pfvf, child);
1444 /* restore the old qos tree */
1445 WRITE_ONCE(node->qid, qid);
1446 err = otx2_qos_alloc_txschq_node(pfvf, node);
1447 if (err) {
1448 netdev_err(pfvf->netdev,
1449 "Failed to restore old leaf node");
1450 goto free_old_cfg;
1451 }
1452 err = otx2_qos_txschq_update_config(pfvf, node, old_cfg);
1453 if (err) {
1454 netdev_err(pfvf->netdev,
1455 "Failed to restore txcshq configuration");
1456 goto free_old_cfg;
1457 }
1458 otx2_qos_update_smq(pfvf, node, QOS_CFG_SQ);
1459 goto free_old_cfg;
1460 }
1461
1462 /* free new txschq config */
1463 kfree(new_cfg);
1464
1465 /* free old txschq config */
1466 otx2_qos_free_cfg(pfvf, old_cfg);
1467 kfree(old_cfg);
1468
1469 return 0;
1470
1471 free_node:
1472 otx2_qos_sw_node_delete(pfvf, child);
1473 free_old_cfg:
1474 kfree(old_cfg);
1475 reset_prio:
1476 if (static_cfg)
1477 node->child_static_cnt--;
1478 else
1479 node->child_dwrr_cnt--;
1480 clear_bit(prio, node->prio_bmap);
1481 out:
1482 return ret;
1483 }
1484
otx2_qos_leaf_del(struct otx2_nic * pfvf,u16 * classid,struct netlink_ext_ack * extack)1485 static int otx2_qos_leaf_del(struct otx2_nic *pfvf, u16 *classid,
1486 struct netlink_ext_ack *extack)
1487 {
1488 struct otx2_qos_node *node, *parent;
1489 int dwrr_del_node = false;
1490 u64 prio;
1491 u16 qid;
1492
1493 netdev_dbg(pfvf->netdev, "TC_HTB_LEAF_DEL classid %04x\n", *classid);
1494
1495 /* find node related to classid */
1496 node = otx2_sw_node_find(pfvf, *classid);
1497 if (!node) {
1498 NL_SET_ERR_MSG_MOD(extack, "HTB node not found");
1499 return -ENOENT;
1500 }
1501 parent = node->parent;
1502 prio = node->prio;
1503 qid = node->qid;
1504
1505 if (!node->is_static)
1506 dwrr_del_node = true;
1507
1508 otx2_qos_disable_sq(pfvf, node->qid);
1509
1510 otx2_qos_destroy_node(pfvf, node);
1511 pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
1512
1513 if (dwrr_del_node) {
1514 parent->child_dwrr_cnt--;
1515 } else {
1516 parent->child_static_cnt--;
1517 clear_bit(prio, parent->prio_bmap);
1518 }
1519
1520 /* Reset DWRR priority if all dwrr nodes are deleted */
1521 if (!parent->child_dwrr_cnt)
1522 otx2_reset_dwrr_prio(parent, prio);
1523
1524 if (!parent->child_static_cnt)
1525 parent->max_static_prio = 0;
1526
1527 return 0;
1528 }
1529
otx2_qos_leaf_del_last(struct otx2_nic * pfvf,u16 classid,bool force,struct netlink_ext_ack * extack)1530 static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force,
1531 struct netlink_ext_ack *extack)
1532 {
1533 struct otx2_qos_node *node, *parent;
1534 struct otx2_qos_cfg *new_cfg;
1535 int dwrr_del_node = false;
1536 u64 prio;
1537 int err;
1538 u16 qid;
1539
1540 netdev_dbg(pfvf->netdev,
1541 "TC_HTB_LEAF_DEL_LAST classid %04x\n", classid);
1542
1543 /* find node related to classid */
1544 node = otx2_sw_node_find(pfvf, classid);
1545 if (!node) {
1546 NL_SET_ERR_MSG_MOD(extack, "HTB node not found");
1547 return -ENOENT;
1548 }
1549
1550 /* save qid for use by parent */
1551 qid = node->qid;
1552 prio = node->prio;
1553
1554 parent = otx2_sw_node_find(pfvf, node->parent->classid);
1555 if (!parent) {
1556 NL_SET_ERR_MSG_MOD(extack, "parent node not found");
1557 return -ENOENT;
1558 }
1559
1560 if (!node->is_static)
1561 dwrr_del_node = true;
1562
1563 WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER);
1564 /* destroy the leaf node */
1565 otx2_qos_disable_sq(pfvf, qid);
1566 otx2_qos_destroy_node(pfvf, node);
1567 pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
1568
1569 if (dwrr_del_node) {
1570 parent->child_dwrr_cnt--;
1571 } else {
1572 parent->child_static_cnt--;
1573 clear_bit(prio, parent->prio_bmap);
1574 }
1575
1576 /* Reset DWRR priority if all dwrr nodes are deleted */
1577 if (!parent->child_dwrr_cnt)
1578 otx2_reset_dwrr_prio(parent, prio);
1579
1580 if (!parent->child_static_cnt)
1581 parent->max_static_prio = 0;
1582
1583 /* create downstream txschq entries to parent */
1584 err = otx2_qos_alloc_txschq_node(pfvf, parent);
1585 if (err) {
1586 NL_SET_ERR_MSG_MOD(extack, "HTB failed to create txsch configuration");
1587 return err;
1588 }
1589 WRITE_ONCE(parent->qid, qid);
1590 __set_bit(qid, pfvf->qos.qos_sq_bmap);
1591
1592 /* push new txschq config to hw */
1593 new_cfg = kzalloc(sizeof(*new_cfg), GFP_KERNEL);
1594 if (!new_cfg) {
1595 NL_SET_ERR_MSG_MOD(extack, "Memory allocation error");
1596 return -ENOMEM;
1597 }
1598 /* fill txschq cfg and push txschq cfg to hw */
1599 otx2_qos_fill_cfg_schq(parent, new_cfg);
1600 err = otx2_qos_push_txschq_cfg(pfvf, parent, new_cfg);
1601 if (err) {
1602 NL_SET_ERR_MSG_MOD(extack, "HTB HW configuration error");
1603 kfree(new_cfg);
1604 return err;
1605 }
1606 kfree(new_cfg);
1607
1608 return 0;
1609 }
1610
otx2_clean_qos_queues(struct otx2_nic * pfvf)1611 void otx2_clean_qos_queues(struct otx2_nic *pfvf)
1612 {
1613 struct otx2_qos_node *root;
1614
1615 root = otx2_sw_node_find(pfvf, OTX2_QOS_ROOT_CLASSID);
1616 if (!root)
1617 return;
1618
1619 otx2_qos_update_smq(pfvf, root, QOS_SMQ_FLUSH);
1620 }
1621
otx2_qos_config_txschq(struct otx2_nic * pfvf)1622 void otx2_qos_config_txschq(struct otx2_nic *pfvf)
1623 {
1624 struct otx2_qos_node *root;
1625 int err;
1626
1627 root = otx2_sw_node_find(pfvf, OTX2_QOS_ROOT_CLASSID);
1628 if (!root)
1629 return;
1630
1631 if (root->level != NIX_TXSCH_LVL_TL1) {
1632 err = otx2_qos_txschq_config(pfvf, root);
1633 if (err) {
1634 netdev_err(pfvf->netdev, "Error update txschq configuration\n");
1635 goto root_destroy;
1636 }
1637 }
1638
1639 err = otx2_qos_txschq_push_cfg_tl(pfvf, root, NULL);
1640 if (err) {
1641 netdev_err(pfvf->netdev, "Error update txschq configuration\n");
1642 goto root_destroy;
1643 }
1644
1645 otx2_qos_update_smq(pfvf, root, QOS_CFG_SQ);
1646 return;
1647
1648 root_destroy:
1649 netdev_err(pfvf->netdev, "Failed to update Scheduler/Shaping config in Hardware\n");
1650 /* Free resources allocated */
1651 otx2_qos_root_destroy(pfvf);
1652 }
1653
otx2_setup_tc_htb(struct net_device * ndev,struct tc_htb_qopt_offload * htb)1654 int otx2_setup_tc_htb(struct net_device *ndev, struct tc_htb_qopt_offload *htb)
1655 {
1656 struct otx2_nic *pfvf = netdev_priv(ndev);
1657 int res;
1658
1659 switch (htb->command) {
1660 case TC_HTB_CREATE:
1661 return otx2_qos_root_add(pfvf, htb->parent_classid,
1662 htb->classid, htb->extack);
1663 case TC_HTB_DESTROY:
1664 return otx2_qos_root_destroy(pfvf);
1665 case TC_HTB_LEAF_ALLOC_QUEUE:
1666 res = otx2_qos_leaf_alloc_queue(pfvf, htb->classid,
1667 htb->parent_classid,
1668 htb->rate, htb->ceil,
1669 htb->prio, htb->quantum,
1670 htb->extack);
1671 if (res < 0)
1672 return res;
1673 htb->qid = res;
1674 return 0;
1675 case TC_HTB_LEAF_TO_INNER:
1676 return otx2_qos_leaf_to_inner(pfvf, htb->parent_classid,
1677 htb->classid, htb->rate,
1678 htb->ceil, htb->prio,
1679 htb->quantum, htb->extack);
1680 case TC_HTB_LEAF_DEL:
1681 return otx2_qos_leaf_del(pfvf, &htb->classid, htb->extack);
1682 case TC_HTB_LEAF_DEL_LAST:
1683 case TC_HTB_LEAF_DEL_LAST_FORCE:
1684 return otx2_qos_leaf_del_last(pfvf, htb->classid,
1685 htb->command == TC_HTB_LEAF_DEL_LAST_FORCE,
1686 htb->extack);
1687 case TC_HTB_LEAF_QUERY_QUEUE:
1688 res = otx2_get_txq_by_classid(pfvf, htb->classid);
1689 htb->qid = res;
1690 return 0;
1691 case TC_HTB_NODE_MODIFY:
1692 fallthrough;
1693 default:
1694 return -EOPNOTSUPP;
1695 }
1696 }
1697