1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #include "ice_sched.h"
5 
6 /**
7  * ice_sched_add_root_node - Insert the Tx scheduler root node in SW DB
8  * @pi: port information structure
9  * @info: Scheduler element information from firmware
10  *
11  * This function inserts the root node of the scheduling tree topology
12  * to the SW DB.
13  */
14 static enum ice_status
15 ice_sched_add_root_node(struct ice_port_info *pi,
16 			struct ice_aqc_txsched_elem_data *info)
17 {
18 	struct ice_sched_node *root;
19 	struct ice_hw *hw;
20 
21 	if (!pi)
22 		return ICE_ERR_PARAM;
23 
24 	hw = pi->hw;
25 
26 	root = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*root), GFP_KERNEL);
27 	if (!root)
28 		return ICE_ERR_NO_MEMORY;
29 
30 	/* coverity[suspicious_sizeof] */
31 	root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0],
32 				      sizeof(*root), GFP_KERNEL);
33 	if (!root->children) {
34 		devm_kfree(ice_hw_to_dev(hw), root);
35 		return ICE_ERR_NO_MEMORY;
36 	}
37 
38 	memcpy(&root->info, info, sizeof(*info));
39 	pi->root = root;
40 	return 0;
41 }
42 
43 /**
44  * ice_sched_find_node_by_teid - Find the Tx scheduler node in SW DB
45  * @start_node: pointer to the starting ice_sched_node struct in a sub-tree
46  * @teid: node TEID to search
47  *
48  * This function searches for a node matching the TEID in the scheduling tree
49  * from the SW DB. The search is recursive and is restricted by the number of
50  * layers it has searched through; stopping at the max supported layer.
51  *
52  * This function needs to be called when holding the port_info->sched_lock
53  */
54 struct ice_sched_node *
55 ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
56 {
57 	u16 i;
58 
59 	/* The TEID is same as that of the start_node */
60 	if (ICE_TXSCHED_GET_NODE_TEID(start_node) == teid)
61 		return start_node;
62 
63 	/* The node has no children or is at the max layer */
64 	if (!start_node->num_children ||
65 	    start_node->tx_sched_layer >= ICE_AQC_TOPO_MAX_LEVEL_NUM ||
66 	    start_node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF)
67 		return NULL;
68 
69 	/* Check if TEID matches to any of the children nodes */
70 	for (i = 0; i < start_node->num_children; i++)
71 		if (ICE_TXSCHED_GET_NODE_TEID(start_node->children[i]) == teid)
72 			return start_node->children[i];
73 
74 	/* Search within each child's sub-tree */
75 	for (i = 0; i < start_node->num_children; i++) {
76 		struct ice_sched_node *tmp;
77 
78 		tmp = ice_sched_find_node_by_teid(start_node->children[i],
79 						  teid);
80 		if (tmp)
81 			return tmp;
82 	}
83 
84 	return NULL;
85 }
86 
87 /**
88  * ice_aqc_send_sched_elem_cmd - send scheduling elements cmd
89  * @hw: pointer to the HW struct
90  * @cmd_opc: cmd opcode
91  * @elems_req: number of elements to request
92  * @buf: pointer to buffer
93  * @buf_size: buffer size in bytes
94  * @elems_resp: returns total number of elements response
95  * @cd: pointer to command details structure or NULL
96  *
97  * This function sends a scheduling elements cmd (cmd_opc)
98  */
99 static enum ice_status
100 ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
101 			    u16 elems_req, void *buf, u16 buf_size,
102 			    u16 *elems_resp, struct ice_sq_cd *cd)
103 {
104 	struct ice_aqc_sched_elem_cmd *cmd;
105 	struct ice_aq_desc desc;
106 	enum ice_status status;
107 
108 	cmd = &desc.params.sched_elem_cmd;
109 	ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc);
110 	cmd->num_elem_req = cpu_to_le16(elems_req);
111 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
112 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
113 	if (!status && elems_resp)
114 		*elems_resp = le16_to_cpu(cmd->num_elem_resp);
115 
116 	return status;
117 }
118 
119 /**
120  * ice_aq_query_sched_elems - query scheduler elements
121  * @hw: pointer to the HW struct
122  * @elems_req: number of elements to query
123  * @buf: pointer to buffer
124  * @buf_size: buffer size in bytes
125  * @elems_ret: returns total number of elements returned
126  * @cd: pointer to command details structure or NULL
127  *
128  * Query scheduling elements (0x0404)
129  */
130 enum ice_status
131 ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
132 			 struct ice_aqc_get_elem *buf, u16 buf_size,
133 			 u16 *elems_ret, struct ice_sq_cd *cd)
134 {
135 	return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_get_sched_elems,
136 					   elems_req, (void *)buf, buf_size,
137 					   elems_ret, cd);
138 }
139 
140 /**
141  * ice_sched_add_node - Insert the Tx scheduler node in SW DB
142  * @pi: port information structure
143  * @layer: Scheduler layer of the node
144  * @info: Scheduler element information from firmware
145  *
146  * This function inserts a scheduler node to the SW DB.
147  */
148 enum ice_status
149 ice_sched_add_node(struct ice_port_info *pi, u8 layer,
150 		   struct ice_aqc_txsched_elem_data *info)
151 {
152 	struct ice_sched_node *parent;
153 	struct ice_aqc_get_elem elem;
154 	struct ice_sched_node *node;
155 	enum ice_status status;
156 	struct ice_hw *hw;
157 
158 	if (!pi)
159 		return ICE_ERR_PARAM;
160 
161 	hw = pi->hw;
162 
163 	/* A valid parent node should be there */
164 	parent = ice_sched_find_node_by_teid(pi->root,
165 					     le32_to_cpu(info->parent_teid));
166 	if (!parent) {
167 		ice_debug(hw, ICE_DBG_SCHED,
168 			  "Parent Node not found for parent_teid=0x%x\n",
169 			  le32_to_cpu(info->parent_teid));
170 		return ICE_ERR_PARAM;
171 	}
172 
173 	/* query the current node information from FW  before additing it
174 	 * to the SW DB
175 	 */
176 	status = ice_sched_query_elem(hw, le32_to_cpu(info->node_teid), &elem);
177 	if (status)
178 		return status;
179 
180 	node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL);
181 	if (!node)
182 		return ICE_ERR_NO_MEMORY;
183 	if (hw->max_children[layer]) {
184 		/* coverity[suspicious_sizeof] */
185 		node->children = devm_kcalloc(ice_hw_to_dev(hw),
186 					      hw->max_children[layer],
187 					      sizeof(*node), GFP_KERNEL);
188 		if (!node->children) {
189 			devm_kfree(ice_hw_to_dev(hw), node);
190 			return ICE_ERR_NO_MEMORY;
191 		}
192 	}
193 
194 	node->in_use = true;
195 	node->parent = parent;
196 	node->tx_sched_layer = layer;
197 	parent->children[parent->num_children++] = node;
198 	memcpy(&node->info, &elem.generic[0], sizeof(node->info));
199 	return 0;
200 }
201 
202 /**
203  * ice_aq_delete_sched_elems - delete scheduler elements
204  * @hw: pointer to the HW struct
205  * @grps_req: number of groups to delete
206  * @buf: pointer to buffer
207  * @buf_size: buffer size in bytes
208  * @grps_del: returns total number of elements deleted
209  * @cd: pointer to command details structure or NULL
210  *
211  * Delete scheduling elements (0x040F)
212  */
213 static enum ice_status
214 ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
215 			  struct ice_aqc_delete_elem *buf, u16 buf_size,
216 			  u16 *grps_del, struct ice_sq_cd *cd)
217 {
218 	return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_delete_sched_elems,
219 					   grps_req, (void *)buf, buf_size,
220 					   grps_del, cd);
221 }
222 
223 /**
224  * ice_sched_remove_elems - remove nodes from HW
225  * @hw: pointer to the HW struct
226  * @parent: pointer to the parent node
227  * @num_nodes: number of nodes
228  * @node_teids: array of node teids to be deleted
229  *
230  * This function remove nodes from HW
231  */
232 static enum ice_status
233 ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
234 		       u16 num_nodes, u32 *node_teids)
235 {
236 	struct ice_aqc_delete_elem *buf;
237 	u16 i, num_groups_removed = 0;
238 	enum ice_status status;
239 	u16 buf_size;
240 
241 	buf_size = sizeof(*buf) + sizeof(u32) * (num_nodes - 1);
242 	buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
243 	if (!buf)
244 		return ICE_ERR_NO_MEMORY;
245 
246 	buf->hdr.parent_teid = parent->info.node_teid;
247 	buf->hdr.num_elems = cpu_to_le16(num_nodes);
248 	for (i = 0; i < num_nodes; i++)
249 		buf->teid[i] = cpu_to_le32(node_teids[i]);
250 
251 	status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,
252 					   &num_groups_removed, NULL);
253 	if (status || num_groups_removed != 1)
254 		ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n",
255 			  hw->adminq.sq_last_status);
256 
257 	devm_kfree(ice_hw_to_dev(hw), buf);
258 	return status;
259 }
260 
261 /**
262  * ice_sched_get_first_node - get the first node of the given layer
263  * @pi: port information structure
264  * @parent: pointer the base node of the subtree
265  * @layer: layer number
266  *
267  * This function retrieves the first node of the given layer from the subtree
268  */
269 static struct ice_sched_node *
270 ice_sched_get_first_node(struct ice_port_info *pi,
271 			 struct ice_sched_node *parent, u8 layer)
272 {
273 	return pi->sib_head[parent->tc_num][layer];
274 }
275 
276 /**
277  * ice_sched_get_tc_node - get pointer to TC node
278  * @pi: port information structure
279  * @tc: TC number
280  *
281  * This function returns the TC node pointer
282  */
283 struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc)
284 {
285 	u8 i;
286 
287 	if (!pi || !pi->root)
288 		return NULL;
289 	for (i = 0; i < pi->root->num_children; i++)
290 		if (pi->root->children[i]->tc_num == tc)
291 			return pi->root->children[i];
292 	return NULL;
293 }
294 
295 /**
296  * ice_free_sched_node - Free a Tx scheduler node from SW DB
297  * @pi: port information structure
298  * @node: pointer to the ice_sched_node struct
299  *
300  * This function frees up a node from SW DB as well as from HW
301  *
302  * This function needs to be called with the port_info->sched_lock held
303  */
304 void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
305 {
306 	struct ice_sched_node *parent;
307 	struct ice_hw *hw = pi->hw;
308 	u8 i, j;
309 
310 	/* Free the children before freeing up the parent node
311 	 * The parent array is updated below and that shifts the nodes
312 	 * in the array. So always pick the first child if num children > 0
313 	 */
314 	while (node->num_children)
315 		ice_free_sched_node(pi, node->children[0]);
316 
317 	/* Leaf, TC and root nodes can't be deleted by SW */
318 	if (node->tx_sched_layer >= hw->sw_entry_point_layer &&
319 	    node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
320 	    node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT &&
321 	    node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) {
322 		u32 teid = le32_to_cpu(node->info.node_teid);
323 
324 		ice_sched_remove_elems(hw, node->parent, 1, &teid);
325 	}
326 	parent = node->parent;
327 	/* root has no parent */
328 	if (parent) {
329 		struct ice_sched_node *p;
330 
331 		/* update the parent */
332 		for (i = 0; i < parent->num_children; i++)
333 			if (parent->children[i] == node) {
334 				for (j = i + 1; j < parent->num_children; j++)
335 					parent->children[j - 1] =
336 						parent->children[j];
337 				parent->num_children--;
338 				break;
339 			}
340 
341 		p = ice_sched_get_first_node(pi, node, node->tx_sched_layer);
342 		while (p) {
343 			if (p->sibling == node) {
344 				p->sibling = node->sibling;
345 				break;
346 			}
347 			p = p->sibling;
348 		}
349 
350 		/* update the sibling head if head is getting removed */
351 		if (pi->sib_head[node->tc_num][node->tx_sched_layer] == node)
352 			pi->sib_head[node->tc_num][node->tx_sched_layer] =
353 				node->sibling;
354 	}
355 
356 	/* leaf nodes have no children */
357 	if (node->children)
358 		devm_kfree(ice_hw_to_dev(hw), node->children);
359 	devm_kfree(ice_hw_to_dev(hw), node);
360 }
361 
362 /**
363  * ice_aq_get_dflt_topo - gets default scheduler topology
364  * @hw: pointer to the HW struct
365  * @lport: logical port number
366  * @buf: pointer to buffer
367  * @buf_size: buffer size in bytes
368  * @num_branches: returns total number of queue to port branches
369  * @cd: pointer to command details structure or NULL
370  *
371  * Get default scheduler topology (0x400)
372  */
373 static enum ice_status
374 ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
375 		     struct ice_aqc_get_topo_elem *buf, u16 buf_size,
376 		     u8 *num_branches, struct ice_sq_cd *cd)
377 {
378 	struct ice_aqc_get_topo *cmd;
379 	struct ice_aq_desc desc;
380 	enum ice_status status;
381 
382 	cmd = &desc.params.get_topo;
383 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo);
384 	cmd->port_num = lport;
385 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
386 	if (!status && num_branches)
387 		*num_branches = cmd->num_branches;
388 
389 	return status;
390 }
391 
392 /**
393  * ice_aq_add_sched_elems - adds scheduling element
394  * @hw: pointer to the HW struct
395  * @grps_req: the number of groups that are requested to be added
396  * @buf: pointer to buffer
397  * @buf_size: buffer size in bytes
398  * @grps_added: returns total number of groups added
399  * @cd: pointer to command details structure or NULL
400  *
401  * Add scheduling elements (0x0401)
402  */
403 static enum ice_status
404 ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
405 		       struct ice_aqc_add_elem *buf, u16 buf_size,
406 		       u16 *grps_added, struct ice_sq_cd *cd)
407 {
408 	return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_add_sched_elems,
409 					   grps_req, (void *)buf, buf_size,
410 					   grps_added, cd);
411 }
412 
413 /**
414  * ice_aq_cfg_sched_elems - configures scheduler elements
415  * @hw: pointer to the HW struct
416  * @elems_req: number of elements to configure
417  * @buf: pointer to buffer
418  * @buf_size: buffer size in bytes
419  * @elems_cfgd: returns total number of elements configured
420  * @cd: pointer to command details structure or NULL
421  *
422  * Configure scheduling elements (0x0403)
423  */
424 static enum ice_status
425 ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
426 		       struct ice_aqc_conf_elem *buf, u16 buf_size,
427 		       u16 *elems_cfgd, struct ice_sq_cd *cd)
428 {
429 	return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_cfg_sched_elems,
430 					   elems_req, (void *)buf, buf_size,
431 					   elems_cfgd, cd);
432 }
433 
434 /**
435  * ice_aq_suspend_sched_elems - suspend scheduler elements
436  * @hw: pointer to the HW struct
437  * @elems_req: number of elements to suspend
438  * @buf: pointer to buffer
439  * @buf_size: buffer size in bytes
440  * @elems_ret: returns total number of elements suspended
441  * @cd: pointer to command details structure or NULL
442  *
443  * Suspend scheduling elements (0x0409)
444  */
445 static enum ice_status
446 ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req,
447 			   struct ice_aqc_suspend_resume_elem *buf,
448 			   u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
449 {
450 	return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_suspend_sched_elems,
451 					   elems_req, (void *)buf, buf_size,
452 					   elems_ret, cd);
453 }
454 
455 /**
456  * ice_aq_resume_sched_elems - resume scheduler elements
457  * @hw: pointer to the HW struct
458  * @elems_req: number of elements to resume
459  * @buf: pointer to buffer
460  * @buf_size: buffer size in bytes
461  * @elems_ret: returns total number of elements resumed
462  * @cd: pointer to command details structure or NULL
463  *
464  * resume scheduling elements (0x040A)
465  */
466 static enum ice_status
467 ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req,
468 			  struct ice_aqc_suspend_resume_elem *buf,
469 			  u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
470 {
471 	return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_resume_sched_elems,
472 					   elems_req, (void *)buf, buf_size,
473 					   elems_ret, cd);
474 }
475 
476 /**
477  * ice_aq_query_sched_res - query scheduler resource
478  * @hw: pointer to the HW struct
479  * @buf_size: buffer size in bytes
480  * @buf: pointer to buffer
481  * @cd: pointer to command details structure or NULL
482  *
483  * Query scheduler resource allocation (0x0412)
484  */
485 static enum ice_status
486 ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
487 		       struct ice_aqc_query_txsched_res_resp *buf,
488 		       struct ice_sq_cd *cd)
489 {
490 	struct ice_aq_desc desc;
491 
492 	ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_sched_res);
493 	return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
494 }
495 
496 /**
497  * ice_sched_suspend_resume_elems - suspend or resume HW nodes
498  * @hw: pointer to the HW struct
499  * @num_nodes: number of nodes
500  * @node_teids: array of node teids to be suspended or resumed
501  * @suspend: true means suspend / false means resume
502  *
503  * This function suspends or resumes HW nodes
504  */
505 static enum ice_status
506 ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
507 			       bool suspend)
508 {
509 	struct ice_aqc_suspend_resume_elem *buf;
510 	u16 i, buf_size, num_elem_ret = 0;
511 	enum ice_status status;
512 
513 	buf_size = sizeof(*buf) * num_nodes;
514 	buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
515 	if (!buf)
516 		return ICE_ERR_NO_MEMORY;
517 
518 	for (i = 0; i < num_nodes; i++)
519 		buf->teid[i] = cpu_to_le32(node_teids[i]);
520 
521 	if (suspend)
522 		status = ice_aq_suspend_sched_elems(hw, num_nodes, buf,
523 						    buf_size, &num_elem_ret,
524 						    NULL);
525 	else
526 		status = ice_aq_resume_sched_elems(hw, num_nodes, buf,
527 						   buf_size, &num_elem_ret,
528 						   NULL);
529 	if (status || num_elem_ret != num_nodes)
530 		ice_debug(hw, ICE_DBG_SCHED, "suspend/resume failed\n");
531 
532 	devm_kfree(ice_hw_to_dev(hw), buf);
533 	return status;
534 }
535 
536 /**
537  * ice_alloc_lan_q_ctx - allocate LAN queue contexts for the given VSI and TC
538  * @hw: pointer to the HW struct
539  * @vsi_handle: VSI handle
540  * @tc: TC number
541  * @new_numqs: number of queues
542  */
543 static enum ice_status
544 ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
545 {
546 	struct ice_vsi_ctx *vsi_ctx;
547 	struct ice_q_ctx *q_ctx;
548 
549 	vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
550 	if (!vsi_ctx)
551 		return ICE_ERR_PARAM;
552 	/* allocate LAN queue contexts */
553 	if (!vsi_ctx->lan_q_ctx[tc]) {
554 		vsi_ctx->lan_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw),
555 						      new_numqs,
556 						      sizeof(*q_ctx),
557 						      GFP_KERNEL);
558 		if (!vsi_ctx->lan_q_ctx[tc])
559 			return ICE_ERR_NO_MEMORY;
560 		vsi_ctx->num_lan_q_entries[tc] = new_numqs;
561 		return 0;
562 	}
563 	/* num queues are increased, update the queue contexts */
564 	if (new_numqs > vsi_ctx->num_lan_q_entries[tc]) {
565 		u16 prev_num = vsi_ctx->num_lan_q_entries[tc];
566 
567 		q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
568 				     sizeof(*q_ctx), GFP_KERNEL);
569 		if (!q_ctx)
570 			return ICE_ERR_NO_MEMORY;
571 		memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc],
572 		       prev_num * sizeof(*q_ctx));
573 		devm_kfree(ice_hw_to_dev(hw), vsi_ctx->lan_q_ctx[tc]);
574 		vsi_ctx->lan_q_ctx[tc] = q_ctx;
575 		vsi_ctx->num_lan_q_entries[tc] = new_numqs;
576 	}
577 	return 0;
578 }
579 
580 /**
581  * ice_aq_rl_profile - performs a rate limiting task
582  * @hw: pointer to the HW struct
583  * @opcode:opcode for add, query, or remove profile(s)
584  * @num_profiles: the number of profiles
585  * @buf: pointer to buffer
586  * @buf_size: buffer size in bytes
587  * @num_processed: number of processed add or remove profile(s) to return
588  * @cd: pointer to command details structure
589  *
590  * RL profile function to add, query, or remove profile(s)
591  */
592 static enum ice_status
593 ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
594 		  u16 num_profiles, struct ice_aqc_rl_profile_generic_elem *buf,
595 		  u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd)
596 {
597 	struct ice_aqc_rl_profile *cmd;
598 	struct ice_aq_desc desc;
599 	enum ice_status status;
600 
601 	cmd = &desc.params.rl_profile;
602 
603 	ice_fill_dflt_direct_cmd_desc(&desc, opcode);
604 	desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
605 	cmd->num_profiles = cpu_to_le16(num_profiles);
606 	status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
607 	if (!status && num_processed)
608 		*num_processed = le16_to_cpu(cmd->num_processed);
609 	return status;
610 }
611 
612 /**
613  * ice_aq_add_rl_profile - adds rate limiting profile(s)
614  * @hw: pointer to the HW struct
615  * @num_profiles: the number of profile(s) to be add
616  * @buf: pointer to buffer
617  * @buf_size: buffer size in bytes
618  * @num_profiles_added: total number of profiles added to return
619  * @cd: pointer to command details structure
620  *
621  * Add RL profile (0x0410)
622  */
623 static enum ice_status
624 ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,
625 		      struct ice_aqc_rl_profile_generic_elem *buf,
626 		      u16 buf_size, u16 *num_profiles_added,
627 		      struct ice_sq_cd *cd)
628 {
629 	return ice_aq_rl_profile(hw, ice_aqc_opc_add_rl_profiles,
630 				 num_profiles, buf,
631 				 buf_size, num_profiles_added, cd);
632 }
633 
634 /**
635  * ice_aq_remove_rl_profile - removes RL profile(s)
636  * @hw: pointer to the HW struct
637  * @num_profiles: the number of profile(s) to remove
638  * @buf: pointer to buffer
639  * @buf_size: buffer size in bytes
640  * @num_profiles_removed: total number of profiles removed to return
641  * @cd: pointer to command details structure or NULL
642  *
643  * Remove RL profile (0x0415)
644  */
645 static enum ice_status
646 ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles,
647 			 struct ice_aqc_rl_profile_generic_elem *buf,
648 			 u16 buf_size, u16 *num_profiles_removed,
649 			 struct ice_sq_cd *cd)
650 {
651 	return ice_aq_rl_profile(hw, ice_aqc_opc_remove_rl_profiles,
652 				 num_profiles, buf,
653 				 buf_size, num_profiles_removed, cd);
654 }
655 
656 /**
657  * ice_sched_del_rl_profile - remove RL profile
658  * @hw: pointer to the HW struct
659  * @rl_info: rate limit profile information
660  *
661  * If the profile ID is not referenced anymore, it removes profile ID with
662  * its associated parameters from HW DB,and locally. The caller needs to
663  * hold scheduler lock.
664  */
665 static enum ice_status
666 ice_sched_del_rl_profile(struct ice_hw *hw,
667 			 struct ice_aqc_rl_profile_info *rl_info)
668 {
669 	struct ice_aqc_rl_profile_generic_elem *buf;
670 	u16 num_profiles_removed;
671 	enum ice_status status;
672 	u16 num_profiles = 1;
673 
674 	if (rl_info->prof_id_ref != 0)
675 		return ICE_ERR_IN_USE;
676 
677 	/* Safe to remove profile ID */
678 	buf = (struct ice_aqc_rl_profile_generic_elem *)
679 		&rl_info->profile;
680 	status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf),
681 					  &num_profiles_removed, NULL);
682 	if (status || num_profiles_removed != num_profiles)
683 		return ICE_ERR_CFG;
684 
685 	/* Delete stale entry now */
686 	list_del(&rl_info->list_entry);
687 	devm_kfree(ice_hw_to_dev(hw), rl_info);
688 	return status;
689 }
690 
691 /**
692  * ice_sched_clear_rl_prof - clears RL prof entries
693  * @pi: port information structure
694  *
695  * This function removes all RL profile from HW as well as from SW DB.
696  */
697 static void ice_sched_clear_rl_prof(struct ice_port_info *pi)
698 {
699 	u16 ln;
700 
701 	for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) {
702 		struct ice_aqc_rl_profile_info *rl_prof_elem;
703 		struct ice_aqc_rl_profile_info *rl_prof_tmp;
704 
705 		list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp,
706 					 &pi->rl_prof_list[ln], list_entry) {
707 			struct ice_hw *hw = pi->hw;
708 			enum ice_status status;
709 
710 			rl_prof_elem->prof_id_ref = 0;
711 			status = ice_sched_del_rl_profile(hw, rl_prof_elem);
712 			if (status) {
713 				ice_debug(hw, ICE_DBG_SCHED,
714 					  "Remove rl profile failed\n");
715 				/* On error, free mem required */
716 				list_del(&rl_prof_elem->list_entry);
717 				devm_kfree(ice_hw_to_dev(hw), rl_prof_elem);
718 			}
719 		}
720 	}
721 }
722 
723 /**
724  * ice_sched_clear_agg - clears the aggregator related information
725  * @hw: pointer to the hardware structure
726  *
727  * This function removes aggregator list and free up aggregator related memory
728  * previously allocated.
729  */
730 void ice_sched_clear_agg(struct ice_hw *hw)
731 {
732 	struct ice_sched_agg_info *agg_info;
733 	struct ice_sched_agg_info *atmp;
734 
735 	list_for_each_entry_safe(agg_info, atmp, &hw->agg_list, list_entry) {
736 		struct ice_sched_agg_vsi_info *agg_vsi_info;
737 		struct ice_sched_agg_vsi_info *vtmp;
738 
739 		list_for_each_entry_safe(agg_vsi_info, vtmp,
740 					 &agg_info->agg_vsi_list, list_entry) {
741 			list_del(&agg_vsi_info->list_entry);
742 			devm_kfree(ice_hw_to_dev(hw), agg_vsi_info);
743 		}
744 		list_del(&agg_info->list_entry);
745 		devm_kfree(ice_hw_to_dev(hw), agg_info);
746 	}
747 }
748 
749 /**
750  * ice_sched_clear_tx_topo - clears the scheduler tree nodes
751  * @pi: port information structure
752  *
753  * This function removes all the nodes from HW as well as from SW DB.
754  */
755 static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
756 {
757 	if (!pi)
758 		return;
759 	/* remove RL profiles related lists */
760 	ice_sched_clear_rl_prof(pi);
761 	if (pi->root) {
762 		ice_free_sched_node(pi, pi->root);
763 		pi->root = NULL;
764 	}
765 }
766 
767 /**
768  * ice_sched_clear_port - clear the scheduler elements from SW DB for a port
769  * @pi: port information structure
770  *
771  * Cleanup scheduling elements from SW DB
772  */
773 void ice_sched_clear_port(struct ice_port_info *pi)
774 {
775 	if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
776 		return;
777 
778 	pi->port_state = ICE_SCHED_PORT_STATE_INIT;
779 	mutex_lock(&pi->sched_lock);
780 	ice_sched_clear_tx_topo(pi);
781 	mutex_unlock(&pi->sched_lock);
782 	mutex_destroy(&pi->sched_lock);
783 }
784 
785 /**
786  * ice_sched_cleanup_all - cleanup scheduler elements from SW DB for all ports
787  * @hw: pointer to the HW struct
788  *
789  * Cleanup scheduling elements from SW DB for all the ports
790  */
791 void ice_sched_cleanup_all(struct ice_hw *hw)
792 {
793 	if (!hw)
794 		return;
795 
796 	if (hw->layer_info) {
797 		devm_kfree(ice_hw_to_dev(hw), hw->layer_info);
798 		hw->layer_info = NULL;
799 	}
800 
801 	ice_sched_clear_port(hw->port_info);
802 
803 	hw->num_tx_sched_layers = 0;
804 	hw->num_tx_sched_phys_layers = 0;
805 	hw->flattened_layers = 0;
806 	hw->max_cgds = 0;
807 }
808 
809 /**
810  * ice_sched_add_elems - add nodes to HW and SW DB
811  * @pi: port information structure
812  * @tc_node: pointer to the branch node
813  * @parent: pointer to the parent node
814  * @layer: layer number to add nodes
815  * @num_nodes: number of nodes
816  * @num_nodes_added: pointer to num nodes added
817  * @first_node_teid: if new nodes are added then return the TEID of first node
818  *
819  * This function add nodes to HW as well as to SW DB for a given layer
820  */
821 static enum ice_status
822 ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
823 		    struct ice_sched_node *parent, u8 layer, u16 num_nodes,
824 		    u16 *num_nodes_added, u32 *first_node_teid)
825 {
826 	struct ice_sched_node *prev, *new_node;
827 	struct ice_aqc_add_elem *buf;
828 	u16 i, num_groups_added = 0;
829 	enum ice_status status = 0;
830 	struct ice_hw *hw = pi->hw;
831 	size_t buf_size;
832 	u32 teid;
833 
834 	buf_size = struct_size(buf, generic, num_nodes - 1);
835 	buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
836 	if (!buf)
837 		return ICE_ERR_NO_MEMORY;
838 
839 	buf->hdr.parent_teid = parent->info.node_teid;
840 	buf->hdr.num_elems = cpu_to_le16(num_nodes);
841 	for (i = 0; i < num_nodes; i++) {
842 		buf->generic[i].parent_teid = parent->info.node_teid;
843 		buf->generic[i].data.elem_type = ICE_AQC_ELEM_TYPE_SE_GENERIC;
844 		buf->generic[i].data.valid_sections =
845 			ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
846 			ICE_AQC_ELEM_VALID_EIR;
847 		buf->generic[i].data.generic = 0;
848 		buf->generic[i].data.cir_bw.bw_profile_idx =
849 			cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
850 		buf->generic[i].data.cir_bw.bw_alloc =
851 			cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
852 		buf->generic[i].data.eir_bw.bw_profile_idx =
853 			cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
854 		buf->generic[i].data.eir_bw.bw_alloc =
855 			cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
856 	}
857 
858 	status = ice_aq_add_sched_elems(hw, 1, buf, buf_size,
859 					&num_groups_added, NULL);
860 	if (status || num_groups_added != 1) {
861 		ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n",
862 			  hw->adminq.sq_last_status);
863 		devm_kfree(ice_hw_to_dev(hw), buf);
864 		return ICE_ERR_CFG;
865 	}
866 
867 	*num_nodes_added = num_nodes;
868 	/* add nodes to the SW DB */
869 	for (i = 0; i < num_nodes; i++) {
870 		status = ice_sched_add_node(pi, layer, &buf->generic[i]);
871 		if (status) {
872 			ice_debug(hw, ICE_DBG_SCHED,
873 				  "add nodes in SW DB failed status =%d\n",
874 				  status);
875 			break;
876 		}
877 
878 		teid = le32_to_cpu(buf->generic[i].node_teid);
879 		new_node = ice_sched_find_node_by_teid(parent, teid);
880 		if (!new_node) {
881 			ice_debug(hw, ICE_DBG_SCHED,
882 				  "Node is missing for teid =%d\n", teid);
883 			break;
884 		}
885 
886 		new_node->sibling = NULL;
887 		new_node->tc_num = tc_node->tc_num;
888 
889 		/* add it to previous node sibling pointer */
890 		/* Note: siblings are not linked across branches */
891 		prev = ice_sched_get_first_node(pi, tc_node, layer);
892 		if (prev && prev != new_node) {
893 			while (prev->sibling)
894 				prev = prev->sibling;
895 			prev->sibling = new_node;
896 		}
897 
898 		/* initialize the sibling head */
899 		if (!pi->sib_head[tc_node->tc_num][layer])
900 			pi->sib_head[tc_node->tc_num][layer] = new_node;
901 
902 		if (i == 0)
903 			*first_node_teid = teid;
904 	}
905 
906 	devm_kfree(ice_hw_to_dev(hw), buf);
907 	return status;
908 }
909 
910 /**
911  * ice_sched_add_nodes_to_layer - Add nodes to a given layer
912  * @pi: port information structure
913  * @tc_node: pointer to TC node
914  * @parent: pointer to parent node
915  * @layer: layer number to add nodes
916  * @num_nodes: number of nodes to be added
917  * @first_node_teid: pointer to the first node TEID
918  * @num_nodes_added: pointer to number of nodes added
919  *
920  * This function add nodes to a given layer.
921  */
922 static enum ice_status
923 ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
924 			     struct ice_sched_node *tc_node,
925 			     struct ice_sched_node *parent, u8 layer,
926 			     u16 num_nodes, u32 *first_node_teid,
927 			     u16 *num_nodes_added)
928 {
929 	u32 *first_teid_ptr = first_node_teid;
930 	u16 new_num_nodes, max_child_nodes;
931 	enum ice_status status = 0;
932 	struct ice_hw *hw = pi->hw;
933 	u16 num_added = 0;
934 	u32 temp;
935 
936 	*num_nodes_added = 0;
937 
938 	if (!num_nodes)
939 		return status;
940 
941 	if (!parent || layer < hw->sw_entry_point_layer)
942 		return ICE_ERR_PARAM;
943 
944 	/* max children per node per layer */
945 	max_child_nodes = hw->max_children[parent->tx_sched_layer];
946 
947 	/* current number of children + required nodes exceed max children ? */
948 	if ((parent->num_children + num_nodes) > max_child_nodes) {
949 		/* Fail if the parent is a TC node */
950 		if (parent == tc_node)
951 			return ICE_ERR_CFG;
952 
953 		/* utilize all the spaces if the parent is not full */
954 		if (parent->num_children < max_child_nodes) {
955 			new_num_nodes = max_child_nodes - parent->num_children;
956 			/* this recursion is intentional, and wouldn't
957 			 * go more than 2 calls
958 			 */
959 			status = ice_sched_add_nodes_to_layer(pi, tc_node,
960 							      parent, layer,
961 							      new_num_nodes,
962 							      first_node_teid,
963 							      &num_added);
964 			if (status)
965 				return status;
966 
967 			*num_nodes_added += num_added;
968 		}
969 		/* Don't modify the first node TEID memory if the first node was
970 		 * added already in the above call. Instead send some temp
971 		 * memory for all other recursive calls.
972 		 */
973 		if (num_added)
974 			first_teid_ptr = &temp;
975 
976 		new_num_nodes = num_nodes - num_added;
977 
978 		/* This parent is full, try the next sibling */
979 		parent = parent->sibling;
980 
981 		/* this recursion is intentional, for 1024 queues
982 		 * per VSI, it goes max of 16 iterations.
983 		 * 1024 / 8 = 128 layer 8 nodes
984 		 * 128 /8 = 16 (add 8 nodes per iteration)
985 		 */
986 		status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
987 						      layer, new_num_nodes,
988 						      first_teid_ptr,
989 						      &num_added);
990 		*num_nodes_added += num_added;
991 		return status;
992 	}
993 
994 	status = ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes,
995 				     num_nodes_added, first_node_teid);
996 	return status;
997 }
998 
999 /**
1000  * ice_sched_get_qgrp_layer - get the current queue group layer number
1001  * @hw: pointer to the HW struct
1002  *
1003  * This function returns the current queue group layer number
1004  */
1005 static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw)
1006 {
1007 	/* It's always total layers - 1, the array is 0 relative so -2 */
1008 	return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET;
1009 }
1010 
1011 /**
1012  * ice_sched_get_vsi_layer - get the current VSI layer number
1013  * @hw: pointer to the HW struct
1014  *
1015  * This function returns the current VSI layer number
1016  */
1017 static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
1018 {
1019 	/* Num Layers       VSI layer
1020 	 *     9               6
1021 	 *     7               4
1022 	 *     5 or less       sw_entry_point_layer
1023 	 */
1024 	/* calculate the VSI layer based on number of layers. */
1025 	if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) {
1026 		u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
1027 
1028 		if (layer > hw->sw_entry_point_layer)
1029 			return layer;
1030 	}
1031 	return hw->sw_entry_point_layer;
1032 }
1033 
1034 /**
1035  * ice_rm_dflt_leaf_node - remove the default leaf node in the tree
1036  * @pi: port information structure
1037  *
1038  * This function removes the leaf node that was created by the FW
1039  * during initialization
1040  */
1041 static void ice_rm_dflt_leaf_node(struct ice_port_info *pi)
1042 {
1043 	struct ice_sched_node *node;
1044 
1045 	node = pi->root;
1046 	while (node) {
1047 		if (!node->num_children)
1048 			break;
1049 		node = node->children[0];
1050 	}
1051 	if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) {
1052 		u32 teid = le32_to_cpu(node->info.node_teid);
1053 		enum ice_status status;
1054 
1055 		/* remove the default leaf node */
1056 		status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid);
1057 		if (!status)
1058 			ice_free_sched_node(pi, node);
1059 	}
1060 }
1061 
1062 /**
1063  * ice_sched_rm_dflt_nodes - free the default nodes in the tree
1064  * @pi: port information structure
1065  *
1066  * This function frees all the nodes except root and TC that were created by
1067  * the FW during initialization
1068  */
1069 static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi)
1070 {
1071 	struct ice_sched_node *node;
1072 
1073 	ice_rm_dflt_leaf_node(pi);
1074 
1075 	/* remove the default nodes except TC and root nodes */
1076 	node = pi->root;
1077 	while (node) {
1078 		if (node->tx_sched_layer >= pi->hw->sw_entry_point_layer &&
1079 		    node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
1080 		    node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT) {
1081 			ice_free_sched_node(pi, node);
1082 			break;
1083 		}
1084 
1085 		if (!node->num_children)
1086 			break;
1087 		node = node->children[0];
1088 	}
1089 }
1090 
1091 /**
1092  * ice_sched_init_port - Initialize scheduler by querying information from FW
1093  * @pi: port info structure for the tree to cleanup
1094  *
1095  * This function is the initial call to find the total number of Tx scheduler
1096  * resources, default topology created by firmware and storing the information
1097  * in SW DB.
1098  */
1099 enum ice_status ice_sched_init_port(struct ice_port_info *pi)
1100 {
1101 	struct ice_aqc_get_topo_elem *buf;
1102 	enum ice_status status;
1103 	struct ice_hw *hw;
1104 	u8 num_branches;
1105 	u16 num_elems;
1106 	u8 i, j;
1107 
1108 	if (!pi)
1109 		return ICE_ERR_PARAM;
1110 	hw = pi->hw;
1111 
1112 	/* Query the Default Topology from FW */
1113 	buf = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
1114 	if (!buf)
1115 		return ICE_ERR_NO_MEMORY;
1116 
1117 	/* Query default scheduling tree topology */
1118 	status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN,
1119 				      &num_branches, NULL);
1120 	if (status)
1121 		goto err_init_port;
1122 
1123 	/* num_branches should be between 1-8 */
1124 	if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) {
1125 		ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n",
1126 			  num_branches);
1127 		status = ICE_ERR_PARAM;
1128 		goto err_init_port;
1129 	}
1130 
1131 	/* get the number of elements on the default/first branch */
1132 	num_elems = le16_to_cpu(buf[0].hdr.num_elems);
1133 
1134 	/* num_elems should always be between 1-9 */
1135 	if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) {
1136 		ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n",
1137 			  num_elems);
1138 		status = ICE_ERR_PARAM;
1139 		goto err_init_port;
1140 	}
1141 
1142 	/* If the last node is a leaf node then the index of the queue group
1143 	 * layer is two less than the number of elements.
1144 	 */
1145 	if (num_elems > 2 && buf[0].generic[num_elems - 1].data.elem_type ==
1146 	    ICE_AQC_ELEM_TYPE_LEAF)
1147 		pi->last_node_teid =
1148 			le32_to_cpu(buf[0].generic[num_elems - 2].node_teid);
1149 	else
1150 		pi->last_node_teid =
1151 			le32_to_cpu(buf[0].generic[num_elems - 1].node_teid);
1152 
1153 	/* Insert the Tx Sched root node */
1154 	status = ice_sched_add_root_node(pi, &buf[0].generic[0]);
1155 	if (status)
1156 		goto err_init_port;
1157 
1158 	/* Parse the default tree and cache the information */
1159 	for (i = 0; i < num_branches; i++) {
1160 		num_elems = le16_to_cpu(buf[i].hdr.num_elems);
1161 
1162 		/* Skip root element as already inserted */
1163 		for (j = 1; j < num_elems; j++) {
1164 			/* update the sw entry point */
1165 			if (buf[0].generic[j].data.elem_type ==
1166 			    ICE_AQC_ELEM_TYPE_ENTRY_POINT)
1167 				hw->sw_entry_point_layer = j;
1168 
1169 			status = ice_sched_add_node(pi, j, &buf[i].generic[j]);
1170 			if (status)
1171 				goto err_init_port;
1172 		}
1173 	}
1174 
1175 	/* Remove the default nodes. */
1176 	if (pi->root)
1177 		ice_sched_rm_dflt_nodes(pi);
1178 
1179 	/* initialize the port for handling the scheduler tree */
1180 	pi->port_state = ICE_SCHED_PORT_STATE_READY;
1181 	mutex_init(&pi->sched_lock);
1182 	for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
1183 		INIT_LIST_HEAD(&pi->rl_prof_list[i]);
1184 
1185 err_init_port:
1186 	if (status && pi->root) {
1187 		ice_free_sched_node(pi, pi->root);
1188 		pi->root = NULL;
1189 	}
1190 
1191 	devm_kfree(ice_hw_to_dev(hw), buf);
1192 	return status;
1193 }
1194 
1195 /**
1196  * ice_sched_query_res_alloc - query the FW for num of logical sched layers
1197  * @hw: pointer to the HW struct
1198  *
1199  * query FW for allocated scheduler resources and store in HW struct
1200  */
1201 enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
1202 {
1203 	struct ice_aqc_query_txsched_res_resp *buf;
1204 	enum ice_status status = 0;
1205 	__le16 max_sibl;
1206 	u16 i;
1207 
1208 	if (hw->layer_info)
1209 		return status;
1210 
1211 	buf = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*buf), GFP_KERNEL);
1212 	if (!buf)
1213 		return ICE_ERR_NO_MEMORY;
1214 
1215 	status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL);
1216 	if (status)
1217 		goto sched_query_out;
1218 
1219 	hw->num_tx_sched_layers = le16_to_cpu(buf->sched_props.logical_levels);
1220 	hw->num_tx_sched_phys_layers =
1221 		le16_to_cpu(buf->sched_props.phys_levels);
1222 	hw->flattened_layers = buf->sched_props.flattening_bitmap;
1223 	hw->max_cgds = buf->sched_props.max_pf_cgds;
1224 
1225 	/* max sibling group size of current layer refers to the max children
1226 	 * of the below layer node.
1227 	 * layer 1 node max children will be layer 2 max sibling group size
1228 	 * layer 2 node max children will be layer 3 max sibling group size
1229 	 * and so on. This array will be populated from root (index 0) to
1230 	 * qgroup layer 7. Leaf node has no children.
1231 	 */
1232 	for (i = 0; i < hw->num_tx_sched_layers - 1; i++) {
1233 		max_sibl = buf->layer_props[i + 1].max_sibl_grp_sz;
1234 		hw->max_children[i] = le16_to_cpu(max_sibl);
1235 	}
1236 
1237 	hw->layer_info = devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props,
1238 				      (hw->num_tx_sched_layers *
1239 				       sizeof(*hw->layer_info)),
1240 				      GFP_KERNEL);
1241 	if (!hw->layer_info) {
1242 		status = ICE_ERR_NO_MEMORY;
1243 		goto sched_query_out;
1244 	}
1245 
1246 sched_query_out:
1247 	devm_kfree(ice_hw_to_dev(hw), buf);
1248 	return status;
1249 }
1250 
1251 /**
1252  * ice_sched_find_node_in_subtree - Find node in part of base node subtree
1253  * @hw: pointer to the HW struct
1254  * @base: pointer to the base node
1255  * @node: pointer to the node to search
1256  *
1257  * This function checks whether a given node is part of the base node
1258  * subtree or not
1259  */
1260 static bool
1261 ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
1262 			       struct ice_sched_node *node)
1263 {
1264 	u8 i;
1265 
1266 	for (i = 0; i < base->num_children; i++) {
1267 		struct ice_sched_node *child = base->children[i];
1268 
1269 		if (node == child)
1270 			return true;
1271 
1272 		if (child->tx_sched_layer > node->tx_sched_layer)
1273 			return false;
1274 
1275 		/* this recursion is intentional, and wouldn't
1276 		 * go more than 8 calls
1277 		 */
1278 		if (ice_sched_find_node_in_subtree(hw, child, node))
1279 			return true;
1280 	}
1281 	return false;
1282 }
1283 
1284 /**
1285  * ice_sched_get_free_qparent - Get a free LAN or RDMA queue group node
1286  * @pi: port information structure
1287  * @vsi_handle: software VSI handle
1288  * @tc: branch number
1289  * @owner: LAN or RDMA
1290  *
1291  * This function retrieves a free LAN or RDMA queue group node
1292  */
1293 struct ice_sched_node *
1294 ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
1295 			   u8 owner)
1296 {
1297 	struct ice_sched_node *vsi_node, *qgrp_node = NULL;
1298 	struct ice_vsi_ctx *vsi_ctx;
1299 	u16 max_children;
1300 	u8 qgrp_layer;
1301 
1302 	qgrp_layer = ice_sched_get_qgrp_layer(pi->hw);
1303 	max_children = pi->hw->max_children[qgrp_layer];
1304 
1305 	vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
1306 	if (!vsi_ctx)
1307 		return NULL;
1308 	vsi_node = vsi_ctx->sched.vsi_node[tc];
1309 	/* validate invalid VSI ID */
1310 	if (!vsi_node)
1311 		goto lan_q_exit;
1312 
1313 	/* get the first queue group node from VSI sub-tree */
1314 	qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
1315 	while (qgrp_node) {
1316 		/* make sure the qgroup node is part of the VSI subtree */
1317 		if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
1318 			if (qgrp_node->num_children < max_children &&
1319 			    qgrp_node->owner == owner)
1320 				break;
1321 		qgrp_node = qgrp_node->sibling;
1322 	}
1323 
1324 lan_q_exit:
1325 	return qgrp_node;
1326 }
1327 
1328 /**
1329  * ice_sched_get_vsi_node - Get a VSI node based on VSI ID
1330  * @hw: pointer to the HW struct
1331  * @tc_node: pointer to the TC node
1332  * @vsi_handle: software VSI handle
1333  *
1334  * This function retrieves a VSI node for a given VSI ID from a given
1335  * TC branch
1336  */
1337 static struct ice_sched_node *
1338 ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node,
1339 		       u16 vsi_handle)
1340 {
1341 	struct ice_sched_node *node;
1342 	u8 vsi_layer;
1343 
1344 	vsi_layer = ice_sched_get_vsi_layer(hw);
1345 	node = ice_sched_get_first_node(hw->port_info, tc_node, vsi_layer);
1346 
1347 	/* Check whether it already exists */
1348 	while (node) {
1349 		if (node->vsi_handle == vsi_handle)
1350 			return node;
1351 		node = node->sibling;
1352 	}
1353 
1354 	return node;
1355 }
1356 
1357 /**
1358  * ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes
1359  * @hw: pointer to the HW struct
1360  * @num_qs: number of queues
1361  * @num_nodes: num nodes array
1362  *
1363  * This function calculates the number of VSI child nodes based on the
1364  * number of queues.
1365  */
1366 static void
1367 ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
1368 {
1369 	u16 num = num_qs;
1370 	u8 i, qgl, vsil;
1371 
1372 	qgl = ice_sched_get_qgrp_layer(hw);
1373 	vsil = ice_sched_get_vsi_layer(hw);
1374 
1375 	/* calculate num nodes from queue group to VSI layer */
1376 	for (i = qgl; i > vsil; i--) {
1377 		/* round to the next integer if there is a remainder */
1378 		num = DIV_ROUND_UP(num, hw->max_children[i]);
1379 
1380 		/* need at least one node */
1381 		num_nodes[i] = num ? num : 1;
1382 	}
1383 }
1384 
1385 /**
1386  * ice_sched_add_vsi_child_nodes - add VSI child nodes to tree
1387  * @pi: port information structure
1388  * @vsi_handle: software VSI handle
1389  * @tc_node: pointer to the TC node
1390  * @num_nodes: pointer to the num nodes that needs to be added per layer
1391  * @owner: node owner (LAN or RDMA)
1392  *
1393  * This function adds the VSI child nodes to tree. It gets called for
1394  * LAN and RDMA separately.
1395  */
1396 static enum ice_status
1397 ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
1398 			      struct ice_sched_node *tc_node, u16 *num_nodes,
1399 			      u8 owner)
1400 {
1401 	struct ice_sched_node *parent, *node;
1402 	struct ice_hw *hw = pi->hw;
1403 	enum ice_status status;
1404 	u32 first_node_teid;
1405 	u16 num_added = 0;
1406 	u8 i, qgl, vsil;
1407 
1408 	qgl = ice_sched_get_qgrp_layer(hw);
1409 	vsil = ice_sched_get_vsi_layer(hw);
1410 	parent = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
1411 	for (i = vsil + 1; i <= qgl; i++) {
1412 		if (!parent)
1413 			return ICE_ERR_CFG;
1414 
1415 		status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
1416 						      num_nodes[i],
1417 						      &first_node_teid,
1418 						      &num_added);
1419 		if (status || num_nodes[i] != num_added)
1420 			return ICE_ERR_CFG;
1421 
1422 		/* The newly added node can be a new parent for the next
1423 		 * layer nodes
1424 		 */
1425 		if (num_added) {
1426 			parent = ice_sched_find_node_by_teid(tc_node,
1427 							     first_node_teid);
1428 			node = parent;
1429 			while (node) {
1430 				node->owner = owner;
1431 				node = node->sibling;
1432 			}
1433 		} else {
1434 			parent = parent->children[0];
1435 		}
1436 	}
1437 
1438 	return 0;
1439 }
1440 
1441 /**
1442  * ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes
1443  * @hw: pointer to the HW struct
1444  * @tc_node: pointer to TC node
1445  * @num_nodes: pointer to num nodes array
1446  *
1447  * This function calculates the number of supported nodes needed to add this
1448  * VSI into Tx tree including the VSI, parent and intermediate nodes in below
1449  * layers
1450  */
1451 static void
1452 ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
1453 				 struct ice_sched_node *tc_node, u16 *num_nodes)
1454 {
1455 	struct ice_sched_node *node;
1456 	u8 vsil;
1457 	int i;
1458 
1459 	vsil = ice_sched_get_vsi_layer(hw);
1460 	for (i = vsil; i >= hw->sw_entry_point_layer; i--)
1461 		/* Add intermediate nodes if TC has no children and
1462 		 * need at least one node for VSI
1463 		 */
1464 		if (!tc_node->num_children || i == vsil) {
1465 			num_nodes[i]++;
1466 		} else {
1467 			/* If intermediate nodes are reached max children
1468 			 * then add a new one.
1469 			 */
1470 			node = ice_sched_get_first_node(hw->port_info, tc_node,
1471 							(u8)i);
1472 			/* scan all the siblings */
1473 			while (node) {
1474 				if (node->num_children < hw->max_children[i])
1475 					break;
1476 				node = node->sibling;
1477 			}
1478 
1479 			/* tree has one intermediate node to add this new VSI.
1480 			 * So no need to calculate supported nodes for below
1481 			 * layers.
1482 			 */
1483 			if (node)
1484 				break;
1485 			/* all the nodes are full, allocate a new one */
1486 			num_nodes[i]++;
1487 		}
1488 }
1489 
1490 /**
1491  * ice_sched_add_vsi_support_nodes - add VSI supported nodes into Tx tree
1492  * @pi: port information structure
1493  * @vsi_handle: software VSI handle
1494  * @tc_node: pointer to TC node
1495  * @num_nodes: pointer to num nodes array
1496  *
1497  * This function adds the VSI supported nodes into Tx tree including the
1498  * VSI, its parent and intermediate nodes in below layers
1499  */
1500 static enum ice_status
1501 ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
1502 				struct ice_sched_node *tc_node, u16 *num_nodes)
1503 {
1504 	struct ice_sched_node *parent = tc_node;
1505 	enum ice_status status;
1506 	u32 first_node_teid;
1507 	u16 num_added = 0;
1508 	u8 i, vsil;
1509 
1510 	if (!pi)
1511 		return ICE_ERR_PARAM;
1512 
1513 	vsil = ice_sched_get_vsi_layer(pi->hw);
1514 	for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {
1515 		status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
1516 						      i, num_nodes[i],
1517 						      &first_node_teid,
1518 						      &num_added);
1519 		if (status || num_nodes[i] != num_added)
1520 			return ICE_ERR_CFG;
1521 
1522 		/* The newly added node can be a new parent for the next
1523 		 * layer nodes
1524 		 */
1525 		if (num_added)
1526 			parent = ice_sched_find_node_by_teid(tc_node,
1527 							     first_node_teid);
1528 		else
1529 			parent = parent->children[0];
1530 
1531 		if (!parent)
1532 			return ICE_ERR_CFG;
1533 
1534 		if (i == vsil)
1535 			parent->vsi_handle = vsi_handle;
1536 	}
1537 
1538 	return 0;
1539 }
1540 
1541 /**
1542  * ice_sched_add_vsi_to_topo - add a new VSI into tree
1543  * @pi: port information structure
1544  * @vsi_handle: software VSI handle
1545  * @tc: TC number
1546  *
1547  * This function adds a new VSI into scheduler tree
1548  */
1549 static enum ice_status
1550 ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
1551 {
1552 	u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
1553 	struct ice_sched_node *tc_node;
1554 	struct ice_hw *hw = pi->hw;
1555 
1556 	tc_node = ice_sched_get_tc_node(pi, tc);
1557 	if (!tc_node)
1558 		return ICE_ERR_PARAM;
1559 
1560 	/* calculate number of supported nodes needed for this VSI */
1561 	ice_sched_calc_vsi_support_nodes(hw, tc_node, num_nodes);
1562 
1563 	/* add VSI supported nodes to TC subtree */
1564 	return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node,
1565 					       num_nodes);
1566 }
1567 
1568 /**
1569  * ice_sched_update_vsi_child_nodes - update VSI child nodes
1570  * @pi: port information structure
1571  * @vsi_handle: software VSI handle
1572  * @tc: TC number
1573  * @new_numqs: new number of max queues
1574  * @owner: owner of this subtree
1575  *
1576  * This function updates the VSI child nodes based on the number of queues
1577  */
1578 static enum ice_status
1579 ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
1580 				 u8 tc, u16 new_numqs, u8 owner)
1581 {
1582 	u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
1583 	struct ice_sched_node *vsi_node;
1584 	struct ice_sched_node *tc_node;
1585 	struct ice_vsi_ctx *vsi_ctx;
1586 	enum ice_status status = 0;
1587 	struct ice_hw *hw = pi->hw;
1588 	u16 prev_numqs;
1589 
1590 	tc_node = ice_sched_get_tc_node(pi, tc);
1591 	if (!tc_node)
1592 		return ICE_ERR_CFG;
1593 
1594 	vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
1595 	if (!vsi_node)
1596 		return ICE_ERR_CFG;
1597 
1598 	vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1599 	if (!vsi_ctx)
1600 		return ICE_ERR_PARAM;
1601 
1602 	prev_numqs = vsi_ctx->sched.max_lanq[tc];
1603 	/* num queues are not changed or less than the previous number */
1604 	if (new_numqs <= prev_numqs)
1605 		return status;
1606 	status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs);
1607 	if (status)
1608 		return status;
1609 
1610 	if (new_numqs)
1611 		ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
1612 	/* Keep the max number of queue configuration all the time. Update the
1613 	 * tree only if number of queues > previous number of queues. This may
1614 	 * leave some extra nodes in the tree if number of queues < previous
1615 	 * number but that wouldn't harm anything. Removing those extra nodes
1616 	 * may complicate the code if those nodes are part of SRL or
1617 	 * individually rate limited.
1618 	 */
1619 	status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
1620 					       new_num_nodes, owner);
1621 	if (status)
1622 		return status;
1623 	vsi_ctx->sched.max_lanq[tc] = new_numqs;
1624 
1625 	return 0;
1626 }
1627 
1628 /**
1629  * ice_sched_cfg_vsi - configure the new/existing VSI
1630  * @pi: port information structure
1631  * @vsi_handle: software VSI handle
1632  * @tc: TC number
1633  * @maxqs: max number of queues
1634  * @owner: LAN or RDMA
1635  * @enable: TC enabled or disabled
1636  *
1637  * This function adds/updates VSI nodes based on the number of queues. If TC is
1638  * enabled and VSI is in suspended state then resume the VSI back. If TC is
1639  * disabled then suspend the VSI if it is not already.
1640  */
1641 enum ice_status
1642 ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
1643 		  u8 owner, bool enable)
1644 {
1645 	struct ice_sched_node *vsi_node, *tc_node;
1646 	struct ice_vsi_ctx *vsi_ctx;
1647 	enum ice_status status = 0;
1648 	struct ice_hw *hw = pi->hw;
1649 
1650 	ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle);
1651 	tc_node = ice_sched_get_tc_node(pi, tc);
1652 	if (!tc_node)
1653 		return ICE_ERR_PARAM;
1654 	vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1655 	if (!vsi_ctx)
1656 		return ICE_ERR_PARAM;
1657 	vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
1658 
1659 	/* suspend the VSI if TC is not enabled */
1660 	if (!enable) {
1661 		if (vsi_node && vsi_node->in_use) {
1662 			u32 teid = le32_to_cpu(vsi_node->info.node_teid);
1663 
1664 			status = ice_sched_suspend_resume_elems(hw, 1, &teid,
1665 								true);
1666 			if (!status)
1667 				vsi_node->in_use = false;
1668 		}
1669 		return status;
1670 	}
1671 
1672 	/* TC is enabled, if it is a new VSI then add it to the tree */
1673 	if (!vsi_node) {
1674 		status = ice_sched_add_vsi_to_topo(pi, vsi_handle, tc);
1675 		if (status)
1676 			return status;
1677 
1678 		vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
1679 		if (!vsi_node)
1680 			return ICE_ERR_CFG;
1681 
1682 		vsi_ctx->sched.vsi_node[tc] = vsi_node;
1683 		vsi_node->in_use = true;
1684 		/* invalidate the max queues whenever VSI gets added first time
1685 		 * into the scheduler tree (boot or after reset). We need to
1686 		 * recreate the child nodes all the time in these cases.
1687 		 */
1688 		vsi_ctx->sched.max_lanq[tc] = 0;
1689 	}
1690 
1691 	/* update the VSI child nodes */
1692 	status = ice_sched_update_vsi_child_nodes(pi, vsi_handle, tc, maxqs,
1693 						  owner);
1694 	if (status)
1695 		return status;
1696 
1697 	/* TC is enabled, resume the VSI if it is in the suspend state */
1698 	if (!vsi_node->in_use) {
1699 		u32 teid = le32_to_cpu(vsi_node->info.node_teid);
1700 
1701 		status = ice_sched_suspend_resume_elems(hw, 1, &teid, false);
1702 		if (!status)
1703 			vsi_node->in_use = true;
1704 	}
1705 
1706 	return status;
1707 }
1708 
1709 /**
1710  * ice_sched_rm_agg_vsi_entry - remove aggregator related VSI info entry
1711  * @pi: port information structure
1712  * @vsi_handle: software VSI handle
1713  *
1714  * This function removes single aggregator VSI info entry from
1715  * aggregator list.
1716  */
1717 static void ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle)
1718 {
1719 	struct ice_sched_agg_info *agg_info;
1720 	struct ice_sched_agg_info *atmp;
1721 
1722 	list_for_each_entry_safe(agg_info, atmp, &pi->hw->agg_list,
1723 				 list_entry) {
1724 		struct ice_sched_agg_vsi_info *agg_vsi_info;
1725 		struct ice_sched_agg_vsi_info *vtmp;
1726 
1727 		list_for_each_entry_safe(agg_vsi_info, vtmp,
1728 					 &agg_info->agg_vsi_list, list_entry)
1729 			if (agg_vsi_info->vsi_handle == vsi_handle) {
1730 				list_del(&agg_vsi_info->list_entry);
1731 				devm_kfree(ice_hw_to_dev(pi->hw),
1732 					   agg_vsi_info);
1733 				return;
1734 			}
1735 	}
1736 }
1737 
1738 /**
1739  * ice_sched_is_leaf_node_present - check for a leaf node in the sub-tree
1740  * @node: pointer to the sub-tree node
1741  *
1742  * This function checks for a leaf node presence in a given sub-tree node.
1743  */
1744 static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node)
1745 {
1746 	u8 i;
1747 
1748 	for (i = 0; i < node->num_children; i++)
1749 		if (ice_sched_is_leaf_node_present(node->children[i]))
1750 			return true;
1751 	/* check for a leaf node */
1752 	return (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF);
1753 }
1754 
1755 /**
1756  * ice_sched_rm_vsi_cfg - remove the VSI and its children nodes
1757  * @pi: port information structure
1758  * @vsi_handle: software VSI handle
1759  * @owner: LAN or RDMA
1760  *
1761  * This function removes the VSI and its LAN or RDMA children nodes from the
1762  * scheduler tree.
1763  */
1764 static enum ice_status
1765 ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
1766 {
1767 	enum ice_status status = ICE_ERR_PARAM;
1768 	struct ice_vsi_ctx *vsi_ctx;
1769 	u8 i;
1770 
1771 	ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle);
1772 	if (!ice_is_vsi_valid(pi->hw, vsi_handle))
1773 		return status;
1774 	mutex_lock(&pi->sched_lock);
1775 	vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
1776 	if (!vsi_ctx)
1777 		goto exit_sched_rm_vsi_cfg;
1778 
1779 	ice_for_each_traffic_class(i) {
1780 		struct ice_sched_node *vsi_node, *tc_node;
1781 		u8 j = 0;
1782 
1783 		tc_node = ice_sched_get_tc_node(pi, i);
1784 		if (!tc_node)
1785 			continue;
1786 
1787 		vsi_node = ice_sched_get_vsi_node(pi->hw, tc_node, vsi_handle);
1788 		if (!vsi_node)
1789 			continue;
1790 
1791 		if (ice_sched_is_leaf_node_present(vsi_node)) {
1792 			ice_debug(pi->hw, ICE_DBG_SCHED,
1793 				  "VSI has leaf nodes in TC %d\n", i);
1794 			status = ICE_ERR_IN_USE;
1795 			goto exit_sched_rm_vsi_cfg;
1796 		}
1797 		while (j < vsi_node->num_children) {
1798 			if (vsi_node->children[j]->owner == owner) {
1799 				ice_free_sched_node(pi, vsi_node->children[j]);
1800 
1801 				/* reset the counter again since the num
1802 				 * children will be updated after node removal
1803 				 */
1804 				j = 0;
1805 			} else {
1806 				j++;
1807 			}
1808 		}
1809 		/* remove the VSI if it has no children */
1810 		if (!vsi_node->num_children) {
1811 			ice_free_sched_node(pi, vsi_node);
1812 			vsi_ctx->sched.vsi_node[i] = NULL;
1813 
1814 			/* clean up aggregator related VSI info if any */
1815 			ice_sched_rm_agg_vsi_info(pi, vsi_handle);
1816 		}
1817 		if (owner == ICE_SCHED_NODE_OWNER_LAN)
1818 			vsi_ctx->sched.max_lanq[i] = 0;
1819 	}
1820 	status = 0;
1821 
1822 exit_sched_rm_vsi_cfg:
1823 	mutex_unlock(&pi->sched_lock);
1824 	return status;
1825 }
1826 
1827 /**
1828  * ice_rm_vsi_lan_cfg - remove VSI and its LAN children nodes
1829  * @pi: port information structure
1830  * @vsi_handle: software VSI handle
1831  *
1832  * This function clears the VSI and its LAN children nodes from scheduler tree
1833  * for all TCs.
1834  */
1835 enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
1836 {
1837 	return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN);
1838 }
1839 
1840 /**
1841  * ice_sched_rm_unused_rl_prof - remove unused RL profile
1842  * @pi: port information structure
1843  *
1844  * This function removes unused rate limit profiles from the HW and
1845  * SW DB. The caller needs to hold scheduler lock.
1846  */
1847 static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi)
1848 {
1849 	u16 ln;
1850 
1851 	for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) {
1852 		struct ice_aqc_rl_profile_info *rl_prof_elem;
1853 		struct ice_aqc_rl_profile_info *rl_prof_tmp;
1854 
1855 		list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp,
1856 					 &pi->rl_prof_list[ln], list_entry) {
1857 			if (!ice_sched_del_rl_profile(pi->hw, rl_prof_elem))
1858 				ice_debug(pi->hw, ICE_DBG_SCHED,
1859 					  "Removed rl profile\n");
1860 		}
1861 	}
1862 }
1863 
1864 /**
1865  * ice_sched_update_elem - update element
1866  * @hw: pointer to the HW struct
1867  * @node: pointer to node
1868  * @info: node info to update
1869  *
1870  * It updates the HW DB, and local SW DB of node. It updates the scheduling
1871  * parameters of node from argument info data buffer (Info->data buf) and
1872  * returns success or error on config sched element failure. The caller
1873  * needs to hold scheduler lock.
1874  */
1875 static enum ice_status
1876 ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
1877 		      struct ice_aqc_txsched_elem_data *info)
1878 {
1879 	struct ice_aqc_conf_elem buf;
1880 	enum ice_status status;
1881 	u16 elem_cfgd = 0;
1882 	u16 num_elems = 1;
1883 
1884 	buf.generic[0] = *info;
1885 	/* Parent TEID is reserved field in this aq call */
1886 	buf.generic[0].parent_teid = 0;
1887 	/* Element type is reserved field in this aq call */
1888 	buf.generic[0].data.elem_type = 0;
1889 	/* Flags is reserved field in this aq call */
1890 	buf.generic[0].data.flags = 0;
1891 
1892 	/* Update HW DB */
1893 	/* Configure element node */
1894 	status = ice_aq_cfg_sched_elems(hw, num_elems, &buf, sizeof(buf),
1895 					&elem_cfgd, NULL);
1896 	if (status || elem_cfgd != num_elems) {
1897 		ice_debug(hw, ICE_DBG_SCHED, "Config sched elem error\n");
1898 		return ICE_ERR_CFG;
1899 	}
1900 
1901 	/* Config success case */
1902 	/* Now update local SW DB */
1903 	/* Only copy the data portion of info buffer */
1904 	node->info.data = info->data;
1905 	return status;
1906 }
1907 
1908 /**
1909  * ice_sched_cfg_node_bw_alloc - configure node BW weight/alloc params
1910  * @hw: pointer to the HW struct
1911  * @node: sched node to configure
1912  * @rl_type: rate limit type CIR, EIR, or shared
1913  * @bw_alloc: BW weight/allocation
1914  *
1915  * This function configures node element's BW allocation.
1916  */
1917 static enum ice_status
1918 ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
1919 			    enum ice_rl_type rl_type, u16 bw_alloc)
1920 {
1921 	struct ice_aqc_txsched_elem_data buf;
1922 	struct ice_aqc_txsched_elem *data;
1923 	enum ice_status status;
1924 
1925 	buf = node->info;
1926 	data = &buf.data;
1927 	if (rl_type == ICE_MIN_BW) {
1928 		data->valid_sections |= ICE_AQC_ELEM_VALID_CIR;
1929 		data->cir_bw.bw_alloc = cpu_to_le16(bw_alloc);
1930 	} else if (rl_type == ICE_MAX_BW) {
1931 		data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
1932 		data->eir_bw.bw_alloc = cpu_to_le16(bw_alloc);
1933 	} else {
1934 		return ICE_ERR_PARAM;
1935 	}
1936 
1937 	/* Configure element */
1938 	status = ice_sched_update_elem(hw, node, &buf);
1939 	return status;
1940 }
1941 
1942 /**
1943  * ice_set_clear_cir_bw - set or clear CIR BW
1944  * @bw_t_info: bandwidth type information structure
1945  * @bw: bandwidth in Kbps - Kilo bits per sec
1946  *
1947  * Save or clear CIR bandwidth (BW) in the passed param bw_t_info.
1948  */
1949 static void ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
1950 {
1951 	if (bw == ICE_SCHED_DFLT_BW) {
1952 		clear_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
1953 		bw_t_info->cir_bw.bw = 0;
1954 	} else {
1955 		/* Save type of BW information */
1956 		set_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
1957 		bw_t_info->cir_bw.bw = bw;
1958 	}
1959 }
1960 
1961 /**
1962  * ice_set_clear_eir_bw - set or clear EIR BW
1963  * @bw_t_info: bandwidth type information structure
1964  * @bw: bandwidth in Kbps - Kilo bits per sec
1965  *
1966  * Save or clear EIR bandwidth (BW) in the passed param bw_t_info.
1967  */
1968 static void ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
1969 {
1970 	if (bw == ICE_SCHED_DFLT_BW) {
1971 		clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
1972 		bw_t_info->eir_bw.bw = 0;
1973 	} else {
1974 		/* EIR BW and Shared BW profiles are mutually exclusive and
1975 		 * hence only one of them may be set for any given element.
1976 		 * First clear earlier saved shared BW information.
1977 		 */
1978 		clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
1979 		bw_t_info->shared_bw = 0;
1980 		/* save EIR BW information */
1981 		set_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
1982 		bw_t_info->eir_bw.bw = bw;
1983 	}
1984 }
1985 
1986 /**
1987  * ice_set_clear_shared_bw - set or clear shared BW
1988  * @bw_t_info: bandwidth type information structure
1989  * @bw: bandwidth in Kbps - Kilo bits per sec
1990  *
1991  * Save or clear shared bandwidth (BW) in the passed param bw_t_info.
1992  */
1993 static void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
1994 {
1995 	if (bw == ICE_SCHED_DFLT_BW) {
1996 		clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
1997 		bw_t_info->shared_bw = 0;
1998 	} else {
1999 		/* EIR BW and Shared BW profiles are mutually exclusive and
2000 		 * hence only one of them may be set for any given element.
2001 		 * First clear earlier saved EIR BW information.
2002 		 */
2003 		clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
2004 		bw_t_info->eir_bw.bw = 0;
2005 		/* save shared BW information */
2006 		set_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
2007 		bw_t_info->shared_bw = bw;
2008 	}
2009 }
2010 
2011 /**
2012  * ice_sched_calc_wakeup - calculate RL profile wakeup parameter
2013  * @bw: bandwidth in Kbps
2014  *
2015  * This function calculates the wakeup parameter of RL profile.
2016  */
2017 static u16 ice_sched_calc_wakeup(s32 bw)
2018 {
2019 	s64 bytes_per_sec, wakeup_int, wakeup_a, wakeup_b, wakeup_f;
2020 	s32 wakeup_f_int;
2021 	u16 wakeup = 0;
2022 
2023 	/* Get the wakeup integer value */
2024 	bytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE);
2025 	wakeup_int = div64_long(ICE_RL_PROF_FREQUENCY, bytes_per_sec);
2026 	if (wakeup_int > 63) {
2027 		wakeup = (u16)((1 << 15) | wakeup_int);
2028 	} else {
2029 		/* Calculate fraction value up to 4 decimals
2030 		 * Convert Integer value to a constant multiplier
2031 		 */
2032 		wakeup_b = (s64)ICE_RL_PROF_MULTIPLIER * wakeup_int;
2033 		wakeup_a = div64_long((s64)ICE_RL_PROF_MULTIPLIER *
2034 					   ICE_RL_PROF_FREQUENCY,
2035 				      bytes_per_sec);
2036 
2037 		/* Get Fraction value */
2038 		wakeup_f = wakeup_a - wakeup_b;
2039 
2040 		/* Round up the Fractional value via Ceil(Fractional value) */
2041 		if (wakeup_f > div64_long(ICE_RL_PROF_MULTIPLIER, 2))
2042 			wakeup_f += 1;
2043 
2044 		wakeup_f_int = (s32)div64_long(wakeup_f * ICE_RL_PROF_FRACTION,
2045 					       ICE_RL_PROF_MULTIPLIER);
2046 		wakeup |= (u16)(wakeup_int << 9);
2047 		wakeup |= (u16)(0x1ff & wakeup_f_int);
2048 	}
2049 
2050 	return wakeup;
2051 }
2052 
2053 /**
2054  * ice_sched_bw_to_rl_profile - convert BW to profile parameters
2055  * @bw: bandwidth in Kbps
2056  * @profile: profile parameters to return
2057  *
2058  * This function converts the BW to profile structure format.
2059  */
2060 static enum ice_status
2061 ice_sched_bw_to_rl_profile(u32 bw, struct ice_aqc_rl_profile_elem *profile)
2062 {
2063 	enum ice_status status = ICE_ERR_PARAM;
2064 	s64 bytes_per_sec, ts_rate, mv_tmp;
2065 	bool found = false;
2066 	s32 encode = 0;
2067 	s64 mv = 0;
2068 	s32 i;
2069 
2070 	/* Bw settings range is from 0.5Mb/sec to 100Gb/sec */
2071 	if (bw < ICE_SCHED_MIN_BW || bw > ICE_SCHED_MAX_BW)
2072 		return status;
2073 
2074 	/* Bytes per second from Kbps */
2075 	bytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE);
2076 
2077 	/* encode is 6 bits but really useful are 5 bits */
2078 	for (i = 0; i < 64; i++) {
2079 		u64 pow_result = BIT_ULL(i);
2080 
2081 		ts_rate = div64_long((s64)ICE_RL_PROF_FREQUENCY,
2082 				     pow_result * ICE_RL_PROF_TS_MULTIPLIER);
2083 		if (ts_rate <= 0)
2084 			continue;
2085 
2086 		/* Multiplier value */
2087 		mv_tmp = div64_long(bytes_per_sec * ICE_RL_PROF_MULTIPLIER,
2088 				    ts_rate);
2089 
2090 		/* Round to the nearest ICE_RL_PROF_MULTIPLIER */
2091 		mv = round_up_64bit(mv_tmp, ICE_RL_PROF_MULTIPLIER);
2092 
2093 		/* First multiplier value greater than the given
2094 		 * accuracy bytes
2095 		 */
2096 		if (mv > ICE_RL_PROF_ACCURACY_BYTES) {
2097 			encode = i;
2098 			found = true;
2099 			break;
2100 		}
2101 	}
2102 	if (found) {
2103 		u16 wm;
2104 
2105 		wm = ice_sched_calc_wakeup(bw);
2106 		profile->rl_multiply = cpu_to_le16(mv);
2107 		profile->wake_up_calc = cpu_to_le16(wm);
2108 		profile->rl_encode = cpu_to_le16(encode);
2109 		status = 0;
2110 	} else {
2111 		status = ICE_ERR_DOES_NOT_EXIST;
2112 	}
2113 
2114 	return status;
2115 }
2116 
2117 /**
2118  * ice_sched_add_rl_profile - add RL profile
2119  * @pi: port information structure
2120  * @rl_type: type of rate limit BW - min, max, or shared
2121  * @bw: bandwidth in Kbps - Kilo bits per sec
2122  * @layer_num: specifies in which layer to create profile
2123  *
2124  * This function first checks the existing list for corresponding BW
2125  * parameter. If it exists, it returns the associated profile otherwise
2126  * it creates a new rate limit profile for requested BW, and adds it to
2127  * the HW DB and local list. It returns the new profile or null on error.
2128  * The caller needs to hold the scheduler lock.
2129  */
2130 static struct ice_aqc_rl_profile_info *
2131 ice_sched_add_rl_profile(struct ice_port_info *pi,
2132 			 enum ice_rl_type rl_type, u32 bw, u8 layer_num)
2133 {
2134 	struct ice_aqc_rl_profile_generic_elem *buf;
2135 	struct ice_aqc_rl_profile_info *rl_prof_elem;
2136 	u16 profiles_added = 0, num_profiles = 1;
2137 	enum ice_status status;
2138 	struct ice_hw *hw;
2139 	u8 profile_type;
2140 
2141 	if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
2142 		return NULL;
2143 	switch (rl_type) {
2144 	case ICE_MIN_BW:
2145 		profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR;
2146 		break;
2147 	case ICE_MAX_BW:
2148 		profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR;
2149 		break;
2150 	case ICE_SHARED_BW:
2151 		profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL;
2152 		break;
2153 	default:
2154 		return NULL;
2155 	}
2156 
2157 	if (!pi)
2158 		return NULL;
2159 	hw = pi->hw;
2160 	list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
2161 			    list_entry)
2162 		if (rl_prof_elem->profile.flags == profile_type &&
2163 		    rl_prof_elem->bw == bw)
2164 			/* Return existing profile ID info */
2165 			return rl_prof_elem;
2166 
2167 	/* Create new profile ID */
2168 	rl_prof_elem = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rl_prof_elem),
2169 				    GFP_KERNEL);
2170 
2171 	if (!rl_prof_elem)
2172 		return NULL;
2173 
2174 	status = ice_sched_bw_to_rl_profile(bw, &rl_prof_elem->profile);
2175 	if (status)
2176 		goto exit_add_rl_prof;
2177 
2178 	rl_prof_elem->bw = bw;
2179 	/* layer_num is zero relative, and fw expects level from 1 to 9 */
2180 	rl_prof_elem->profile.level = layer_num + 1;
2181 	rl_prof_elem->profile.flags = profile_type;
2182 	rl_prof_elem->profile.max_burst_size = cpu_to_le16(hw->max_burst_size);
2183 
2184 	/* Create new entry in HW DB */
2185 	buf = (struct ice_aqc_rl_profile_generic_elem *)
2186 		&rl_prof_elem->profile;
2187 	status = ice_aq_add_rl_profile(hw, num_profiles, buf, sizeof(*buf),
2188 				       &profiles_added, NULL);
2189 	if (status || profiles_added != num_profiles)
2190 		goto exit_add_rl_prof;
2191 
2192 	/* Good entry - add in the list */
2193 	rl_prof_elem->prof_id_ref = 0;
2194 	list_add(&rl_prof_elem->list_entry, &pi->rl_prof_list[layer_num]);
2195 	return rl_prof_elem;
2196 
2197 exit_add_rl_prof:
2198 	devm_kfree(ice_hw_to_dev(hw), rl_prof_elem);
2199 	return NULL;
2200 }
2201 
2202 /**
2203  * ice_sched_cfg_node_bw_lmt - configure node sched params
2204  * @hw: pointer to the HW struct
2205  * @node: sched node to configure
2206  * @rl_type: rate limit type CIR, EIR, or shared
2207  * @rl_prof_id: rate limit profile ID
2208  *
2209  * This function configures node element's BW limit.
2210  */
2211 static enum ice_status
2212 ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,
2213 			  enum ice_rl_type rl_type, u16 rl_prof_id)
2214 {
2215 	struct ice_aqc_txsched_elem_data buf;
2216 	struct ice_aqc_txsched_elem *data;
2217 
2218 	buf = node->info;
2219 	data = &buf.data;
2220 	switch (rl_type) {
2221 	case ICE_MIN_BW:
2222 		data->valid_sections |= ICE_AQC_ELEM_VALID_CIR;
2223 		data->cir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id);
2224 		break;
2225 	case ICE_MAX_BW:
2226 		/* EIR BW and Shared BW profiles are mutually exclusive and
2227 		 * hence only one of them may be set for any given element
2228 		 */
2229 		if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED)
2230 			return ICE_ERR_CFG;
2231 		data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
2232 		data->eir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id);
2233 		break;
2234 	case ICE_SHARED_BW:
2235 		/* Check for removing shared BW */
2236 		if (rl_prof_id == ICE_SCHED_NO_SHARED_RL_PROF_ID) {
2237 			/* remove shared profile */
2238 			data->valid_sections &= ~ICE_AQC_ELEM_VALID_SHARED;
2239 			data->srl_id = 0; /* clear SRL field */
2240 
2241 			/* enable back EIR to default profile */
2242 			data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
2243 			data->eir_bw.bw_profile_idx =
2244 				cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
2245 			break;
2246 		}
2247 		/* EIR BW and Shared BW profiles are mutually exclusive and
2248 		 * hence only one of them may be set for any given element
2249 		 */
2250 		if ((data->valid_sections & ICE_AQC_ELEM_VALID_EIR) &&
2251 		    (le16_to_cpu(data->eir_bw.bw_profile_idx) !=
2252 			    ICE_SCHED_DFLT_RL_PROF_ID))
2253 			return ICE_ERR_CFG;
2254 		/* EIR BW is set to default, disable it */
2255 		data->valid_sections &= ~ICE_AQC_ELEM_VALID_EIR;
2256 		/* Okay to enable shared BW now */
2257 		data->valid_sections |= ICE_AQC_ELEM_VALID_SHARED;
2258 		data->srl_id = cpu_to_le16(rl_prof_id);
2259 		break;
2260 	default:
2261 		/* Unknown rate limit type */
2262 		return ICE_ERR_PARAM;
2263 	}
2264 
2265 	/* Configure element */
2266 	return ice_sched_update_elem(hw, node, &buf);
2267 }
2268 
2269 /**
2270  * ice_sched_get_node_rl_prof_id - get node's rate limit profile ID
2271  * @node: sched node
2272  * @rl_type: rate limit type
2273  *
2274  * If existing profile matches, it returns the corresponding rate
2275  * limit profile ID, otherwise it returns an invalid ID as error.
2276  */
2277 static u16
2278 ice_sched_get_node_rl_prof_id(struct ice_sched_node *node,
2279 			      enum ice_rl_type rl_type)
2280 {
2281 	u16 rl_prof_id = ICE_SCHED_INVAL_PROF_ID;
2282 	struct ice_aqc_txsched_elem *data;
2283 
2284 	data = &node->info.data;
2285 	switch (rl_type) {
2286 	case ICE_MIN_BW:
2287 		if (data->valid_sections & ICE_AQC_ELEM_VALID_CIR)
2288 			rl_prof_id = le16_to_cpu(data->cir_bw.bw_profile_idx);
2289 		break;
2290 	case ICE_MAX_BW:
2291 		if (data->valid_sections & ICE_AQC_ELEM_VALID_EIR)
2292 			rl_prof_id = le16_to_cpu(data->eir_bw.bw_profile_idx);
2293 		break;
2294 	case ICE_SHARED_BW:
2295 		if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED)
2296 			rl_prof_id = le16_to_cpu(data->srl_id);
2297 		break;
2298 	default:
2299 		break;
2300 	}
2301 
2302 	return rl_prof_id;
2303 }
2304 
2305 /**
2306  * ice_sched_get_rl_prof_layer - selects rate limit profile creation layer
2307  * @pi: port information structure
2308  * @rl_type: type of rate limit BW - min, max, or shared
2309  * @layer_index: layer index
2310  *
2311  * This function returns requested profile creation layer.
2312  */
2313 static u8
2314 ice_sched_get_rl_prof_layer(struct ice_port_info *pi, enum ice_rl_type rl_type,
2315 			    u8 layer_index)
2316 {
2317 	struct ice_hw *hw = pi->hw;
2318 
2319 	if (layer_index >= hw->num_tx_sched_layers)
2320 		return ICE_SCHED_INVAL_LAYER_NUM;
2321 	switch (rl_type) {
2322 	case ICE_MIN_BW:
2323 		if (hw->layer_info[layer_index].max_cir_rl_profiles)
2324 			return layer_index;
2325 		break;
2326 	case ICE_MAX_BW:
2327 		if (hw->layer_info[layer_index].max_eir_rl_profiles)
2328 			return layer_index;
2329 		break;
2330 	case ICE_SHARED_BW:
2331 		/* if current layer doesn't support SRL profile creation
2332 		 * then try a layer up or down.
2333 		 */
2334 		if (hw->layer_info[layer_index].max_srl_profiles)
2335 			return layer_index;
2336 		else if (layer_index < hw->num_tx_sched_layers - 1 &&
2337 			 hw->layer_info[layer_index + 1].max_srl_profiles)
2338 			return layer_index + 1;
2339 		else if (layer_index > 0 &&
2340 			 hw->layer_info[layer_index - 1].max_srl_profiles)
2341 			return layer_index - 1;
2342 		break;
2343 	default:
2344 		break;
2345 	}
2346 	return ICE_SCHED_INVAL_LAYER_NUM;
2347 }
2348 
2349 /**
2350  * ice_sched_get_srl_node - get shared rate limit node
2351  * @node: tree node
2352  * @srl_layer: shared rate limit layer
2353  *
2354  * This function returns SRL node to be used for shared rate limit purpose.
2355  * The caller needs to hold scheduler lock.
2356  */
2357 static struct ice_sched_node *
2358 ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer)
2359 {
2360 	if (srl_layer > node->tx_sched_layer)
2361 		return node->children[0];
2362 	else if (srl_layer < node->tx_sched_layer)
2363 		/* Node can't be created without a parent. It will always
2364 		 * have a valid parent except root node.
2365 		 */
2366 		return node->parent;
2367 	else
2368 		return node;
2369 }
2370 
2371 /**
2372  * ice_sched_rm_rl_profile - remove RL profile ID
2373  * @pi: port information structure
2374  * @layer_num: layer number where profiles are saved
2375  * @profile_type: profile type like EIR, CIR, or SRL
2376  * @profile_id: profile ID to remove
2377  *
2378  * This function removes rate limit profile from layer 'layer_num' of type
2379  * 'profile_type' and profile ID as 'profile_id'. The caller needs to hold
2380  * scheduler lock.
2381  */
2382 static enum ice_status
2383 ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
2384 			u16 profile_id)
2385 {
2386 	struct ice_aqc_rl_profile_info *rl_prof_elem;
2387 	enum ice_status status = 0;
2388 
2389 	if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
2390 		return ICE_ERR_PARAM;
2391 	/* Check the existing list for RL profile */
2392 	list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
2393 			    list_entry)
2394 		if (rl_prof_elem->profile.flags == profile_type &&
2395 		    le16_to_cpu(rl_prof_elem->profile.profile_id) ==
2396 		    profile_id) {
2397 			if (rl_prof_elem->prof_id_ref)
2398 				rl_prof_elem->prof_id_ref--;
2399 
2400 			/* Remove old profile ID from database */
2401 			status = ice_sched_del_rl_profile(pi->hw, rl_prof_elem);
2402 			if (status && status != ICE_ERR_IN_USE)
2403 				ice_debug(pi->hw, ICE_DBG_SCHED,
2404 					  "Remove rl profile failed\n");
2405 			break;
2406 		}
2407 	if (status == ICE_ERR_IN_USE)
2408 		status = 0;
2409 	return status;
2410 }
2411 
2412 /**
2413  * ice_sched_set_node_bw_dflt - set node's bandwidth limit to default
2414  * @pi: port information structure
2415  * @node: pointer to node structure
2416  * @rl_type: rate limit type min, max, or shared
2417  * @layer_num: layer number where RL profiles are saved
2418  *
2419  * This function configures node element's BW rate limit profile ID of
2420  * type CIR, EIR, or SRL to default. This function needs to be called
2421  * with the scheduler lock held.
2422  */
2423 static enum ice_status
2424 ice_sched_set_node_bw_dflt(struct ice_port_info *pi,
2425 			   struct ice_sched_node *node,
2426 			   enum ice_rl_type rl_type, u8 layer_num)
2427 {
2428 	enum ice_status status;
2429 	struct ice_hw *hw;
2430 	u8 profile_type;
2431 	u16 rl_prof_id;
2432 	u16 old_id;
2433 
2434 	hw = pi->hw;
2435 	switch (rl_type) {
2436 	case ICE_MIN_BW:
2437 		profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR;
2438 		rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID;
2439 		break;
2440 	case ICE_MAX_BW:
2441 		profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR;
2442 		rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID;
2443 		break;
2444 	case ICE_SHARED_BW:
2445 		profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL;
2446 		/* No SRL is configured for default case */
2447 		rl_prof_id = ICE_SCHED_NO_SHARED_RL_PROF_ID;
2448 		break;
2449 	default:
2450 		return ICE_ERR_PARAM;
2451 	}
2452 	/* Save existing RL prof ID for later clean up */
2453 	old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
2454 	/* Configure BW scheduling parameters */
2455 	status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id);
2456 	if (status)
2457 		return status;
2458 
2459 	/* Remove stale RL profile ID */
2460 	if (old_id == ICE_SCHED_DFLT_RL_PROF_ID ||
2461 	    old_id == ICE_SCHED_INVAL_PROF_ID)
2462 		return 0;
2463 
2464 	return ice_sched_rm_rl_profile(pi, layer_num, profile_type, old_id);
2465 }
2466 
2467 /**
2468  * ice_sched_set_eir_srl_excl - set EIR/SRL exclusiveness
2469  * @pi: port information structure
2470  * @node: pointer to node structure
2471  * @layer_num: layer number where rate limit profiles are saved
2472  * @rl_type: rate limit type min, max, or shared
2473  * @bw: bandwidth value
2474  *
2475  * This function prepares node element's bandwidth to SRL or EIR exclusively.
2476  * EIR BW and Shared BW profiles are mutually exclusive and hence only one of
2477  * them may be set for any given element. This function needs to be called
2478  * with the scheduler lock held.
2479  */
2480 static enum ice_status
2481 ice_sched_set_eir_srl_excl(struct ice_port_info *pi,
2482 			   struct ice_sched_node *node,
2483 			   u8 layer_num, enum ice_rl_type rl_type, u32 bw)
2484 {
2485 	if (rl_type == ICE_SHARED_BW) {
2486 		/* SRL node passed in this case, it may be different node */
2487 		if (bw == ICE_SCHED_DFLT_BW)
2488 			/* SRL being removed, ice_sched_cfg_node_bw_lmt()
2489 			 * enables EIR to default. EIR is not set in this
2490 			 * case, so no additional action is required.
2491 			 */
2492 			return 0;
2493 
2494 		/* SRL being configured, set EIR to default here.
2495 		 * ice_sched_cfg_node_bw_lmt() disables EIR when it
2496 		 * configures SRL
2497 		 */
2498 		return ice_sched_set_node_bw_dflt(pi, node, ICE_MAX_BW,
2499 						  layer_num);
2500 	} else if (rl_type == ICE_MAX_BW &&
2501 		   node->info.data.valid_sections & ICE_AQC_ELEM_VALID_SHARED) {
2502 		/* Remove Shared profile. Set default shared BW call
2503 		 * removes shared profile for a node.
2504 		 */
2505 		return ice_sched_set_node_bw_dflt(pi, node,
2506 						  ICE_SHARED_BW,
2507 						  layer_num);
2508 	}
2509 	return 0;
2510 }
2511 
2512 /**
2513  * ice_sched_set_node_bw - set node's bandwidth
2514  * @pi: port information structure
2515  * @node: tree node
2516  * @rl_type: rate limit type min, max, or shared
2517  * @bw: bandwidth in Kbps - Kilo bits per sec
2518  * @layer_num: layer number
2519  *
2520  * This function adds new profile corresponding to requested BW, configures
2521  * node's RL profile ID of type CIR, EIR, or SRL, and removes old profile
2522  * ID from local database. The caller needs to hold scheduler lock.
2523  */
2524 static enum ice_status
2525 ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
2526 		      enum ice_rl_type rl_type, u32 bw, u8 layer_num)
2527 {
2528 	struct ice_aqc_rl_profile_info *rl_prof_info;
2529 	enum ice_status status = ICE_ERR_PARAM;
2530 	struct ice_hw *hw = pi->hw;
2531 	u16 old_id, rl_prof_id;
2532 
2533 	rl_prof_info = ice_sched_add_rl_profile(pi, rl_type, bw, layer_num);
2534 	if (!rl_prof_info)
2535 		return status;
2536 
2537 	rl_prof_id = le16_to_cpu(rl_prof_info->profile.profile_id);
2538 
2539 	/* Save existing RL prof ID for later clean up */
2540 	old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
2541 	/* Configure BW scheduling parameters */
2542 	status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id);
2543 	if (status)
2544 		return status;
2545 
2546 	/* New changes has been applied */
2547 	/* Increment the profile ID reference count */
2548 	rl_prof_info->prof_id_ref++;
2549 
2550 	/* Check for old ID removal */
2551 	if ((old_id == ICE_SCHED_DFLT_RL_PROF_ID && rl_type != ICE_SHARED_BW) ||
2552 	    old_id == ICE_SCHED_INVAL_PROF_ID || old_id == rl_prof_id)
2553 		return 0;
2554 
2555 	return ice_sched_rm_rl_profile(pi, layer_num,
2556 				       rl_prof_info->profile.flags,
2557 				       old_id);
2558 }
2559 
2560 /**
2561  * ice_sched_set_node_bw_lmt - set node's BW limit
2562  * @pi: port information structure
2563  * @node: tree node
2564  * @rl_type: rate limit type min, max, or shared
2565  * @bw: bandwidth in Kbps - Kilo bits per sec
2566  *
2567  * It updates node's BW limit parameters like BW RL profile ID of type CIR,
2568  * EIR, or SRL. The caller needs to hold scheduler lock.
2569  */
2570 static enum ice_status
2571 ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
2572 			  enum ice_rl_type rl_type, u32 bw)
2573 {
2574 	struct ice_sched_node *cfg_node = node;
2575 	enum ice_status status;
2576 
2577 	struct ice_hw *hw;
2578 	u8 layer_num;
2579 
2580 	if (!pi)
2581 		return ICE_ERR_PARAM;
2582 	hw = pi->hw;
2583 	/* Remove unused RL profile IDs from HW and SW DB */
2584 	ice_sched_rm_unused_rl_prof(pi);
2585 	layer_num = ice_sched_get_rl_prof_layer(pi, rl_type,
2586 						node->tx_sched_layer);
2587 	if (layer_num >= hw->num_tx_sched_layers)
2588 		return ICE_ERR_PARAM;
2589 
2590 	if (rl_type == ICE_SHARED_BW) {
2591 		/* SRL node may be different */
2592 		cfg_node = ice_sched_get_srl_node(node, layer_num);
2593 		if (!cfg_node)
2594 			return ICE_ERR_CFG;
2595 	}
2596 	/* EIR BW and Shared BW profiles are mutually exclusive and
2597 	 * hence only one of them may be set for any given element
2598 	 */
2599 	status = ice_sched_set_eir_srl_excl(pi, cfg_node, layer_num, rl_type,
2600 					    bw);
2601 	if (status)
2602 		return status;
2603 	if (bw == ICE_SCHED_DFLT_BW)
2604 		return ice_sched_set_node_bw_dflt(pi, cfg_node, rl_type,
2605 						  layer_num);
2606 	return ice_sched_set_node_bw(pi, cfg_node, rl_type, bw, layer_num);
2607 }
2608 
2609 /**
2610  * ice_sched_set_node_bw_dflt_lmt - set node's BW limit to default
2611  * @pi: port information structure
2612  * @node: pointer to node structure
2613  * @rl_type: rate limit type min, max, or shared
2614  *
2615  * This function configures node element's BW rate limit profile ID of
2616  * type CIR, EIR, or SRL to default. This function needs to be called
2617  * with the scheduler lock held.
2618  */
2619 static enum ice_status
2620 ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi,
2621 			       struct ice_sched_node *node,
2622 			       enum ice_rl_type rl_type)
2623 {
2624 	return ice_sched_set_node_bw_lmt(pi, node, rl_type,
2625 					 ICE_SCHED_DFLT_BW);
2626 }
2627 
2628 /**
2629  * ice_sched_validate_srl_node - Check node for SRL applicability
2630  * @node: sched node to configure
2631  * @sel_layer: selected SRL layer
2632  *
2633  * This function checks if the SRL can be applied to a selected layer node on
2634  * behalf of the requested node (first argument). This function needs to be
2635  * called with scheduler lock held.
2636  */
2637 static enum ice_status
2638 ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer)
2639 {
2640 	/* SRL profiles are not available on all layers. Check if the
2641 	 * SRL profile can be applied to a node above or below the
2642 	 * requested node. SRL configuration is possible only if the
2643 	 * selected layer's node has single child.
2644 	 */
2645 	if (sel_layer == node->tx_sched_layer ||
2646 	    ((sel_layer == node->tx_sched_layer + 1) &&
2647 	    node->num_children == 1) ||
2648 	    ((sel_layer == node->tx_sched_layer - 1) &&
2649 	    (node->parent && node->parent->num_children == 1)))
2650 		return 0;
2651 
2652 	return ICE_ERR_CFG;
2653 }
2654 
2655 /**
2656  * ice_sched_save_q_bw - save queue node's BW information
2657  * @q_ctx: queue context structure
2658  * @rl_type: rate limit type min, max, or shared
2659  * @bw: bandwidth in Kbps - Kilo bits per sec
2660  *
2661  * Save BW information of queue type node for post replay use.
2662  */
2663 static enum ice_status
2664 ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw)
2665 {
2666 	switch (rl_type) {
2667 	case ICE_MIN_BW:
2668 		ice_set_clear_cir_bw(&q_ctx->bw_t_info, bw);
2669 		break;
2670 	case ICE_MAX_BW:
2671 		ice_set_clear_eir_bw(&q_ctx->bw_t_info, bw);
2672 		break;
2673 	case ICE_SHARED_BW:
2674 		ice_set_clear_shared_bw(&q_ctx->bw_t_info, bw);
2675 		break;
2676 	default:
2677 		return ICE_ERR_PARAM;
2678 	}
2679 	return 0;
2680 }
2681 
2682 /**
2683  * ice_sched_set_q_bw_lmt - sets queue BW limit
2684  * @pi: port information structure
2685  * @vsi_handle: sw VSI handle
2686  * @tc: traffic class
2687  * @q_handle: software queue handle
2688  * @rl_type: min, max, or shared
2689  * @bw: bandwidth in Kbps
2690  *
2691  * This function sets BW limit of queue scheduling node.
2692  */
2693 static enum ice_status
2694 ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
2695 		       u16 q_handle, enum ice_rl_type rl_type, u32 bw)
2696 {
2697 	enum ice_status status = ICE_ERR_PARAM;
2698 	struct ice_sched_node *node;
2699 	struct ice_q_ctx *q_ctx;
2700 
2701 	if (!ice_is_vsi_valid(pi->hw, vsi_handle))
2702 		return ICE_ERR_PARAM;
2703 	mutex_lock(&pi->sched_lock);
2704 	q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handle);
2705 	if (!q_ctx)
2706 		goto exit_q_bw_lmt;
2707 	node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
2708 	if (!node) {
2709 		ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong q_teid\n");
2710 		goto exit_q_bw_lmt;
2711 	}
2712 
2713 	/* Return error if it is not a leaf node */
2714 	if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF)
2715 		goto exit_q_bw_lmt;
2716 
2717 	/* SRL bandwidth layer selection */
2718 	if (rl_type == ICE_SHARED_BW) {
2719 		u8 sel_layer; /* selected layer */
2720 
2721 		sel_layer = ice_sched_get_rl_prof_layer(pi, rl_type,
2722 							node->tx_sched_layer);
2723 		if (sel_layer >= pi->hw->num_tx_sched_layers) {
2724 			status = ICE_ERR_PARAM;
2725 			goto exit_q_bw_lmt;
2726 		}
2727 		status = ice_sched_validate_srl_node(node, sel_layer);
2728 		if (status)
2729 			goto exit_q_bw_lmt;
2730 	}
2731 
2732 	if (bw == ICE_SCHED_DFLT_BW)
2733 		status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type);
2734 	else
2735 		status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw);
2736 
2737 	if (!status)
2738 		status = ice_sched_save_q_bw(q_ctx, rl_type, bw);
2739 
2740 exit_q_bw_lmt:
2741 	mutex_unlock(&pi->sched_lock);
2742 	return status;
2743 }
2744 
2745 /**
2746  * ice_cfg_q_bw_lmt - configure queue BW limit
2747  * @pi: port information structure
2748  * @vsi_handle: sw VSI handle
2749  * @tc: traffic class
2750  * @q_handle: software queue handle
2751  * @rl_type: min, max, or shared
2752  * @bw: bandwidth in Kbps
2753  *
2754  * This function configures BW limit of queue scheduling node.
2755  */
2756 enum ice_status
2757 ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
2758 		 u16 q_handle, enum ice_rl_type rl_type, u32 bw)
2759 {
2760 	return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type,
2761 				      bw);
2762 }
2763 
2764 /**
2765  * ice_cfg_q_bw_dflt_lmt - configure queue BW default limit
2766  * @pi: port information structure
2767  * @vsi_handle: sw VSI handle
2768  * @tc: traffic class
2769  * @q_handle: software queue handle
2770  * @rl_type: min, max, or shared
2771  *
2772  * This function configures BW default limit of queue scheduling node.
2773  */
2774 enum ice_status
2775 ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
2776 		      u16 q_handle, enum ice_rl_type rl_type)
2777 {
2778 	return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type,
2779 				      ICE_SCHED_DFLT_BW);
2780 }
2781 
2782 /**
2783  * ice_cfg_rl_burst_size - Set burst size value
2784  * @hw: pointer to the HW struct
2785  * @bytes: burst size in bytes
2786  *
2787  * This function configures/set the burst size to requested new value. The new
2788  * burst size value is used for future rate limit calls. It doesn't change the
2789  * existing or previously created RL profiles.
2790  */
2791 enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)
2792 {
2793 	u16 burst_size_to_prog;
2794 
2795 	if (bytes < ICE_MIN_BURST_SIZE_ALLOWED ||
2796 	    bytes > ICE_MAX_BURST_SIZE_ALLOWED)
2797 		return ICE_ERR_PARAM;
2798 	if (ice_round_to_num(bytes, 64) <=
2799 	    ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY) {
2800 		/* 64 byte granularity case */
2801 		/* Disable MSB granularity bit */
2802 		burst_size_to_prog = ICE_64_BYTE_GRANULARITY;
2803 		/* round number to nearest 64 byte granularity */
2804 		bytes = ice_round_to_num(bytes, 64);
2805 		/* The value is in 64 byte chunks */
2806 		burst_size_to_prog |= (u16)(bytes / 64);
2807 	} else {
2808 		/* k bytes granularity case */
2809 		/* Enable MSB granularity bit */
2810 		burst_size_to_prog = ICE_KBYTE_GRANULARITY;
2811 		/* round number to nearest 1024 granularity */
2812 		bytes = ice_round_to_num(bytes, 1024);
2813 		/* check rounding doesn't go beyond allowed */
2814 		if (bytes > ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY)
2815 			bytes = ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY;
2816 		/* The value is in k bytes */
2817 		burst_size_to_prog |= (u16)(bytes / 1024);
2818 	}
2819 	hw->max_burst_size = burst_size_to_prog;
2820 	return 0;
2821 }
2822 
2823 /**
2824  * ice_sched_replay_node_prio - re-configure node priority
2825  * @hw: pointer to the HW struct
2826  * @node: sched node to configure
2827  * @priority: priority value
2828  *
2829  * This function configures node element's priority value. It
2830  * needs to be called with scheduler lock held.
2831  */
2832 static enum ice_status
2833 ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node,
2834 			   u8 priority)
2835 {
2836 	struct ice_aqc_txsched_elem_data buf;
2837 	struct ice_aqc_txsched_elem *data;
2838 	enum ice_status status;
2839 
2840 	buf = node->info;
2841 	data = &buf.data;
2842 	data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC;
2843 	data->generic = priority;
2844 
2845 	/* Configure element */
2846 	status = ice_sched_update_elem(hw, node, &buf);
2847 	return status;
2848 }
2849 
2850 /**
2851  * ice_sched_replay_node_bw - replay node(s) BW
2852  * @hw: pointer to the HW struct
2853  * @node: sched node to configure
2854  * @bw_t_info: BW type information
2855  *
2856  * This function restores node's BW from bw_t_info. The caller needs
2857  * to hold the scheduler lock.
2858  */
2859 static enum ice_status
2860 ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node,
2861 			 struct ice_bw_type_info *bw_t_info)
2862 {
2863 	struct ice_port_info *pi = hw->port_info;
2864 	enum ice_status status = ICE_ERR_PARAM;
2865 	u16 bw_alloc;
2866 
2867 	if (!node)
2868 		return status;
2869 	if (bitmap_empty(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CNT))
2870 		return 0;
2871 	if (test_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap)) {
2872 		status = ice_sched_replay_node_prio(hw, node,
2873 						    bw_t_info->generic);
2874 		if (status)
2875 			return status;
2876 	}
2877 	if (test_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap)) {
2878 		status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW,
2879 						   bw_t_info->cir_bw.bw);
2880 		if (status)
2881 			return status;
2882 	}
2883 	if (test_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap)) {
2884 		bw_alloc = bw_t_info->cir_bw.bw_alloc;
2885 		status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MIN_BW,
2886 						     bw_alloc);
2887 		if (status)
2888 			return status;
2889 	}
2890 	if (test_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap)) {
2891 		status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW,
2892 						   bw_t_info->eir_bw.bw);
2893 		if (status)
2894 			return status;
2895 	}
2896 	if (test_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap)) {
2897 		bw_alloc = bw_t_info->eir_bw.bw_alloc;
2898 		status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MAX_BW,
2899 						     bw_alloc);
2900 		if (status)
2901 			return status;
2902 	}
2903 	if (test_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap))
2904 		status = ice_sched_set_node_bw_lmt(pi, node, ICE_SHARED_BW,
2905 						   bw_t_info->shared_bw);
2906 	return status;
2907 }
2908 
2909 /**
2910  * ice_sched_replay_q_bw - replay queue type node BW
2911  * @pi: port information structure
2912  * @q_ctx: queue context structure
2913  *
2914  * This function replays queue type node bandwidth. This function needs to be
2915  * called with scheduler lock held.
2916  */
2917 enum ice_status
2918 ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx)
2919 {
2920 	struct ice_sched_node *q_node;
2921 
2922 	/* Following also checks the presence of node in tree */
2923 	q_node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
2924 	if (!q_node)
2925 		return ICE_ERR_PARAM;
2926 	return ice_sched_replay_node_bw(pi->hw, q_node, &q_ctx->bw_t_info);
2927 }
2928