xref: /openbmc/linux/drivers/net/ethernet/mellanox/mlx5/core/rl.c (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
1  /*
2   * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved.
3   *
4   * This software is available to you under a choice of one of two
5   * licenses.  You may choose to be licensed under the terms of the GNU
6   * General Public License (GPL) Version 2, available from the file
7   * COPYING in the main directory of this source tree, or the
8   * OpenIB.org BSD license below:
9   *
10   *     Redistribution and use in source and binary forms, with or
11   *     without modification, are permitted provided that the following
12   *     conditions are met:
13   *
14   *      - Redistributions of source code must retain the above
15   *        copyright notice, this list of conditions and the following
16   *        disclaimer.
17   *
18   *      - Redistributions in binary form must reproduce the above
19   *        copyright notice, this list of conditions and the following
20   *        disclaimer in the documentation and/or other materials
21   *        provided with the distribution.
22   *
23   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24   * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25   * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26   * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27   * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28   * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29   * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30   * SOFTWARE.
31   */
32  
33  #include <linux/kernel.h>
34  #include <linux/mlx5/driver.h>
35  #include "mlx5_core.h"
36  
37  /* Scheduling element fw management */
mlx5_create_scheduling_element_cmd(struct mlx5_core_dev * dev,u8 hierarchy,void * ctx,u32 * element_id)38  int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
39  				       void *ctx, u32 *element_id)
40  {
41  	u32 out[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {};
42  	u32 in[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {};
43  	void *schedc;
44  	int err;
45  
46  	schedc = MLX5_ADDR_OF(create_scheduling_element_in, in,
47  			      scheduling_context);
48  	MLX5_SET(create_scheduling_element_in, in, opcode,
49  		 MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT);
50  	MLX5_SET(create_scheduling_element_in, in, scheduling_hierarchy,
51  		 hierarchy);
52  	memcpy(schedc, ctx, MLX5_ST_SZ_BYTES(scheduling_context));
53  
54  	err = mlx5_cmd_exec_inout(dev, create_scheduling_element, in, out);
55  	if (err)
56  		return err;
57  
58  	*element_id = MLX5_GET(create_scheduling_element_out, out,
59  			       scheduling_element_id);
60  	return 0;
61  }
62  
mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev * dev,u8 hierarchy,void * ctx,u32 element_id,u32 modify_bitmask)63  int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
64  				       void *ctx, u32 element_id,
65  				       u32 modify_bitmask)
66  {
67  	u32 in[MLX5_ST_SZ_DW(modify_scheduling_element_in)] = {};
68  	void *schedc;
69  
70  	schedc = MLX5_ADDR_OF(modify_scheduling_element_in, in,
71  			      scheduling_context);
72  	MLX5_SET(modify_scheduling_element_in, in, opcode,
73  		 MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT);
74  	MLX5_SET(modify_scheduling_element_in, in, scheduling_element_id,
75  		 element_id);
76  	MLX5_SET(modify_scheduling_element_in, in, modify_bitmask,
77  		 modify_bitmask);
78  	MLX5_SET(modify_scheduling_element_in, in, scheduling_hierarchy,
79  		 hierarchy);
80  	memcpy(schedc, ctx, MLX5_ST_SZ_BYTES(scheduling_context));
81  
82  	return mlx5_cmd_exec_in(dev, modify_scheduling_element, in);
83  }
84  
mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev * dev,u8 hierarchy,u32 element_id)85  int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
86  					u32 element_id)
87  {
88  	u32 in[MLX5_ST_SZ_DW(destroy_scheduling_element_in)] = {};
89  
90  	MLX5_SET(destroy_scheduling_element_in, in, opcode,
91  		 MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
92  	MLX5_SET(destroy_scheduling_element_in, in, scheduling_element_id,
93  		 element_id);
94  	MLX5_SET(destroy_scheduling_element_in, in, scheduling_hierarchy,
95  		 hierarchy);
96  
97  	return mlx5_cmd_exec_in(dev, destroy_scheduling_element, in);
98  }
99  
mlx5_rl_are_equal_raw(struct mlx5_rl_entry * entry,void * rl_in,u16 uid)100  static bool mlx5_rl_are_equal_raw(struct mlx5_rl_entry *entry, void *rl_in,
101  				  u16 uid)
102  {
103  	return (!memcmp(entry->rl_raw, rl_in, sizeof(entry->rl_raw)) &&
104  		entry->uid == uid);
105  }
106  
107  /* Finds an entry where we can register the given rate
108   * If the rate already exists, return the entry where it is registered,
109   * otherwise return the first available entry.
110   * If the table is full, return NULL
111   */
find_rl_entry(struct mlx5_rl_table * table,void * rl_in,u16 uid,bool dedicated)112  static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
113  					   void *rl_in, u16 uid, bool dedicated)
114  {
115  	struct mlx5_rl_entry *ret_entry = NULL;
116  	bool empty_found = false;
117  	int i;
118  
119  	lockdep_assert_held(&table->rl_lock);
120  	WARN_ON(!table->rl_entry);
121  
122  	for (i = 0; i < table->max_size; i++) {
123  		if (dedicated) {
124  			if (!table->rl_entry[i].refcount)
125  				return &table->rl_entry[i];
126  			continue;
127  		}
128  
129  		if (table->rl_entry[i].refcount) {
130  			if (table->rl_entry[i].dedicated)
131  				continue;
132  			if (mlx5_rl_are_equal_raw(&table->rl_entry[i], rl_in,
133  						  uid))
134  				return &table->rl_entry[i];
135  		} else if (!empty_found) {
136  			empty_found = true;
137  			ret_entry = &table->rl_entry[i];
138  		}
139  	}
140  
141  	return ret_entry;
142  }
143  
mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev * dev,struct mlx5_rl_entry * entry,bool set)144  static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev,
145  				      struct mlx5_rl_entry *entry, bool set)
146  {
147  	u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)] = {};
148  	void *pp_context;
149  
150  	pp_context = MLX5_ADDR_OF(set_pp_rate_limit_in, in, ctx);
151  	MLX5_SET(set_pp_rate_limit_in, in, opcode,
152  		 MLX5_CMD_OP_SET_PP_RATE_LIMIT);
153  	MLX5_SET(set_pp_rate_limit_in, in, uid, entry->uid);
154  	MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, entry->index);
155  	if (set)
156  		memcpy(pp_context, entry->rl_raw, sizeof(entry->rl_raw));
157  	return mlx5_cmd_exec_in(dev, set_pp_rate_limit, in);
158  }
159  
mlx5_rl_is_in_range(struct mlx5_core_dev * dev,u32 rate)160  bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate)
161  {
162  	struct mlx5_rl_table *table = &dev->priv.rl_table;
163  
164  	return (rate <= table->max_rate && rate >= table->min_rate);
165  }
166  EXPORT_SYMBOL(mlx5_rl_is_in_range);
167  
mlx5_rl_are_equal(struct mlx5_rate_limit * rl_0,struct mlx5_rate_limit * rl_1)168  bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0,
169  		       struct mlx5_rate_limit *rl_1)
170  {
171  	return ((rl_0->rate == rl_1->rate) &&
172  		(rl_0->max_burst_sz == rl_1->max_burst_sz) &&
173  		(rl_0->typical_pkt_sz == rl_1->typical_pkt_sz));
174  }
175  EXPORT_SYMBOL(mlx5_rl_are_equal);
176  
mlx5_rl_table_get(struct mlx5_rl_table * table)177  static int mlx5_rl_table_get(struct mlx5_rl_table *table)
178  {
179  	int i;
180  
181  	lockdep_assert_held(&table->rl_lock);
182  
183  	if (table->rl_entry) {
184  		table->refcount++;
185  		return 0;
186  	}
187  
188  	table->rl_entry = kcalloc(table->max_size, sizeof(struct mlx5_rl_entry),
189  				  GFP_KERNEL);
190  	if (!table->rl_entry)
191  		return -ENOMEM;
192  
193  	/* The index represents the index in HW rate limit table
194  	 * Index 0 is reserved for unlimited rate
195  	 */
196  	for (i = 0; i < table->max_size; i++)
197  		table->rl_entry[i].index = i + 1;
198  
199  	table->refcount++;
200  	return 0;
201  }
202  
mlx5_rl_table_put(struct mlx5_rl_table * table)203  static void mlx5_rl_table_put(struct mlx5_rl_table *table)
204  {
205  	lockdep_assert_held(&table->rl_lock);
206  	if (--table->refcount)
207  		return;
208  
209  	kfree(table->rl_entry);
210  	table->rl_entry = NULL;
211  }
212  
mlx5_rl_table_free(struct mlx5_core_dev * dev,struct mlx5_rl_table * table)213  static void mlx5_rl_table_free(struct mlx5_core_dev *dev, struct mlx5_rl_table *table)
214  {
215  	int i;
216  
217  	if (!table->rl_entry)
218  		return;
219  
220  	/* Clear all configured rates */
221  	for (i = 0; i < table->max_size; i++)
222  		if (table->rl_entry[i].refcount)
223  			mlx5_set_pp_rate_limit_cmd(dev, &table->rl_entry[i], false);
224  	kfree(table->rl_entry);
225  }
226  
mlx5_rl_entry_get(struct mlx5_rl_entry * entry)227  static void mlx5_rl_entry_get(struct mlx5_rl_entry *entry)
228  {
229  	entry->refcount++;
230  }
231  
232  static void
mlx5_rl_entry_put(struct mlx5_core_dev * dev,struct mlx5_rl_entry * entry)233  mlx5_rl_entry_put(struct mlx5_core_dev *dev, struct mlx5_rl_entry *entry)
234  {
235  	entry->refcount--;
236  	if (!entry->refcount)
237  		mlx5_set_pp_rate_limit_cmd(dev, entry, false);
238  }
239  
mlx5_rl_add_rate_raw(struct mlx5_core_dev * dev,void * rl_in,u16 uid,bool dedicated_entry,u16 * index)240  int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid,
241  			 bool dedicated_entry, u16 *index)
242  {
243  	struct mlx5_rl_table *table = &dev->priv.rl_table;
244  	struct mlx5_rl_entry *entry;
245  	u32 rate;
246  	int err;
247  
248  	if (!table->max_size)
249  		return -EOPNOTSUPP;
250  
251  	rate = MLX5_GET(set_pp_rate_limit_context, rl_in, rate_limit);
252  	if (!rate || !mlx5_rl_is_in_range(dev, rate)) {
253  		mlx5_core_err(dev, "Invalid rate: %u, should be %u to %u\n",
254  			      rate, table->min_rate, table->max_rate);
255  		return -EINVAL;
256  	}
257  
258  	mutex_lock(&table->rl_lock);
259  	err = mlx5_rl_table_get(table);
260  	if (err)
261  		goto out;
262  
263  	entry = find_rl_entry(table, rl_in, uid, dedicated_entry);
264  	if (!entry) {
265  		mlx5_core_err(dev, "Max number of %u rates reached\n",
266  			      table->max_size);
267  		err = -ENOSPC;
268  		goto rl_err;
269  	}
270  	if (!entry->refcount) {
271  		/* new rate limit */
272  		memcpy(entry->rl_raw, rl_in, sizeof(entry->rl_raw));
273  		entry->uid = uid;
274  		err = mlx5_set_pp_rate_limit_cmd(dev, entry, true);
275  		if (err) {
276  			mlx5_core_err(
277  				dev,
278  				"Failed configuring rate limit(err %d): rate %u, max_burst_sz %u, typical_pkt_sz %u\n",
279  				err, rate,
280  				MLX5_GET(set_pp_rate_limit_context, rl_in,
281  					 burst_upper_bound),
282  				MLX5_GET(set_pp_rate_limit_context, rl_in,
283  					 typical_packet_size));
284  			goto rl_err;
285  		}
286  
287  		entry->dedicated = dedicated_entry;
288  	}
289  	mlx5_rl_entry_get(entry);
290  	*index = entry->index;
291  	mutex_unlock(&table->rl_lock);
292  	return 0;
293  
294  rl_err:
295  	mlx5_rl_table_put(table);
296  out:
297  	mutex_unlock(&table->rl_lock);
298  	return err;
299  }
300  EXPORT_SYMBOL(mlx5_rl_add_rate_raw);
301  
mlx5_rl_remove_rate_raw(struct mlx5_core_dev * dev,u16 index)302  void mlx5_rl_remove_rate_raw(struct mlx5_core_dev *dev, u16 index)
303  {
304  	struct mlx5_rl_table *table = &dev->priv.rl_table;
305  	struct mlx5_rl_entry *entry;
306  
307  	mutex_lock(&table->rl_lock);
308  	entry = &table->rl_entry[index - 1];
309  	mlx5_rl_entry_put(dev, entry);
310  	mlx5_rl_table_put(table);
311  	mutex_unlock(&table->rl_lock);
312  }
313  EXPORT_SYMBOL(mlx5_rl_remove_rate_raw);
314  
mlx5_rl_add_rate(struct mlx5_core_dev * dev,u16 * index,struct mlx5_rate_limit * rl)315  int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
316  		     struct mlx5_rate_limit *rl)
317  {
318  	u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)] = {};
319  
320  	MLX5_SET(set_pp_rate_limit_context, rl_raw, rate_limit, rl->rate);
321  	MLX5_SET(set_pp_rate_limit_context, rl_raw, burst_upper_bound,
322  		 rl->max_burst_sz);
323  	MLX5_SET(set_pp_rate_limit_context, rl_raw, typical_packet_size,
324  		 rl->typical_pkt_sz);
325  
326  	return mlx5_rl_add_rate_raw(dev, rl_raw,
327  				    MLX5_CAP_QOS(dev, packet_pacing_uid) ?
328  					MLX5_SHARED_RESOURCE_UID : 0,
329  				    false, index);
330  }
331  EXPORT_SYMBOL(mlx5_rl_add_rate);
332  
mlx5_rl_remove_rate(struct mlx5_core_dev * dev,struct mlx5_rate_limit * rl)333  void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl)
334  {
335  	u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)] = {};
336  	struct mlx5_rl_table *table = &dev->priv.rl_table;
337  	struct mlx5_rl_entry *entry = NULL;
338  
339  	/* 0 is a reserved value for unlimited rate */
340  	if (rl->rate == 0)
341  		return;
342  
343  	MLX5_SET(set_pp_rate_limit_context, rl_raw, rate_limit, rl->rate);
344  	MLX5_SET(set_pp_rate_limit_context, rl_raw, burst_upper_bound,
345  		 rl->max_burst_sz);
346  	MLX5_SET(set_pp_rate_limit_context, rl_raw, typical_packet_size,
347  		 rl->typical_pkt_sz);
348  
349  	mutex_lock(&table->rl_lock);
350  	entry = find_rl_entry(table, rl_raw,
351  			      MLX5_CAP_QOS(dev, packet_pacing_uid) ?
352  				MLX5_SHARED_RESOURCE_UID : 0, false);
353  	if (!entry || !entry->refcount) {
354  		mlx5_core_warn(dev, "Rate %u, max_burst_sz %u typical_pkt_sz %u are not configured\n",
355  			       rl->rate, rl->max_burst_sz, rl->typical_pkt_sz);
356  		goto out;
357  	}
358  	mlx5_rl_entry_put(dev, entry);
359  	mlx5_rl_table_put(table);
360  out:
361  	mutex_unlock(&table->rl_lock);
362  }
363  EXPORT_SYMBOL(mlx5_rl_remove_rate);
364  
mlx5_init_rl_table(struct mlx5_core_dev * dev)365  int mlx5_init_rl_table(struct mlx5_core_dev *dev)
366  {
367  	struct mlx5_rl_table *table = &dev->priv.rl_table;
368  
369  	if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, packet_pacing)) {
370  		table->max_size = 0;
371  		return 0;
372  	}
373  
374  	mutex_init(&table->rl_lock);
375  
376  	/* First entry is reserved for unlimited rate */
377  	table->max_size = MLX5_CAP_QOS(dev, packet_pacing_rate_table_size) - 1;
378  	table->max_rate = MLX5_CAP_QOS(dev, packet_pacing_max_rate);
379  	table->min_rate = MLX5_CAP_QOS(dev, packet_pacing_min_rate);
380  
381  	mlx5_core_info(dev, "Rate limit: %u rates are supported, range: %uMbps to %uMbps\n",
382  		       table->max_size,
383  		       table->min_rate >> 10,
384  		       table->max_rate >> 10);
385  
386  	return 0;
387  }
388  
mlx5_cleanup_rl_table(struct mlx5_core_dev * dev)389  void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev)
390  {
391  	struct mlx5_rl_table *table = &dev->priv.rl_table;
392  
393  	if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, packet_pacing))
394  		return;
395  
396  	mlx5_rl_table_free(dev, table);
397  	mutex_destroy(&table->rl_lock);
398  }
399