1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 Mellanox Technologies Ltd */
3 #include <linux/mlx5/driver.h>
4 #include "vhca_event.h"
5 #include "priv.h"
6 #include "sf.h"
7 #include "mlx5_ifc_vhca_event.h"
8 #include "ecpf.h"
9 #include "vhca_event.h"
10 #include "mlx5_core.h"
11 #include "eswitch.h"
12 
13 struct mlx5_sf_hw {
14 	u32 usr_sfnum;
15 	u8 allocated: 1;
16 	u8 pending_delete: 1;
17 };
18 
19 struct mlx5_sf_hwc_table {
20 	struct mlx5_sf_hw *sfs;
21 	int max_fn;
22 	u16 start_fn_id;
23 };
24 
25 enum mlx5_sf_hwc_index {
26 	MLX5_SF_HWC_LOCAL,
27 	MLX5_SF_HWC_EXTERNAL,
28 	MLX5_SF_HWC_MAX,
29 };
30 
31 struct mlx5_sf_hw_table {
32 	struct mlx5_core_dev *dev;
33 	struct mutex table_lock; /* Serializes sf deletion and vhca state change handler. */
34 	struct notifier_block vhca_nb;
35 	struct mlx5_sf_hwc_table hwc[MLX5_SF_HWC_MAX];
36 };
37 
38 static struct mlx5_sf_hwc_table *
39 mlx5_sf_controller_to_hwc(struct mlx5_core_dev *dev, u32 controller)
40 {
41 	int idx = !!controller;
42 
43 	return &dev->priv.sf_hw_table->hwc[idx];
44 }
45 
46 u16 mlx5_sf_sw_to_hw_id(struct mlx5_core_dev *dev, u32 controller, u16 sw_id)
47 {
48 	struct mlx5_sf_hwc_table *hwc;
49 
50 	hwc = mlx5_sf_controller_to_hwc(dev, controller);
51 	return hwc->start_fn_id + sw_id;
52 }
53 
54 static u16 mlx5_sf_hw_to_sw_id(struct mlx5_sf_hwc_table *hwc, u16 hw_id)
55 {
56 	return hw_id - hwc->start_fn_id;
57 }
58 
59 static struct mlx5_sf_hwc_table *
60 mlx5_sf_table_fn_to_hwc(struct mlx5_sf_hw_table *table, u16 fn_id)
61 {
62 	int i;
63 
64 	for (i = 0; i < ARRAY_SIZE(table->hwc); i++) {
65 		if (table->hwc[i].max_fn &&
66 		    fn_id >= table->hwc[i].start_fn_id &&
67 		    fn_id < (table->hwc[i].start_fn_id + table->hwc[i].max_fn))
68 			return &table->hwc[i];
69 	}
70 	return NULL;
71 }
72 
73 static int mlx5_sf_hw_table_id_alloc(struct mlx5_sf_hw_table *table, u32 controller,
74 				     u32 usr_sfnum)
75 {
76 	struct mlx5_sf_hwc_table *hwc;
77 	int i;
78 
79 	hwc = mlx5_sf_controller_to_hwc(table->dev, controller);
80 	if (!hwc->sfs)
81 		return -ENOSPC;
82 
83 	/* Check if sf with same sfnum already exists or not. */
84 	for (i = 0; i < hwc->max_fn; i++) {
85 		if (hwc->sfs[i].allocated && hwc->sfs[i].usr_sfnum == usr_sfnum)
86 			return -EEXIST;
87 	}
88 	/* Find the free entry and allocate the entry from the array */
89 	for (i = 0; i < hwc->max_fn; i++) {
90 		if (!hwc->sfs[i].allocated) {
91 			hwc->sfs[i].usr_sfnum = usr_sfnum;
92 			hwc->sfs[i].allocated = true;
93 			return i;
94 		}
95 	}
96 	return -ENOSPC;
97 }
98 
99 static void mlx5_sf_hw_table_id_free(struct mlx5_sf_hw_table *table, u32 controller, int id)
100 {
101 	struct mlx5_sf_hwc_table *hwc;
102 
103 	hwc = mlx5_sf_controller_to_hwc(table->dev, controller);
104 	hwc->sfs[id].allocated = false;
105 	hwc->sfs[id].pending_delete = false;
106 }
107 
108 int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 controller, u32 usr_sfnum)
109 {
110 	struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
111 	u16 hw_fn_id;
112 	int sw_id;
113 	int err;
114 
115 	if (!table)
116 		return -EOPNOTSUPP;
117 
118 	mutex_lock(&table->table_lock);
119 	sw_id = mlx5_sf_hw_table_id_alloc(table, controller, usr_sfnum);
120 	if (sw_id < 0) {
121 		err = sw_id;
122 		goto exist_err;
123 	}
124 
125 	hw_fn_id = mlx5_sf_sw_to_hw_id(dev, controller, sw_id);
126 	err = mlx5_cmd_alloc_sf(dev, hw_fn_id);
127 	if (err)
128 		goto err;
129 
130 	err = mlx5_modify_vhca_sw_id(dev, hw_fn_id, usr_sfnum);
131 	if (err)
132 		goto vhca_err;
133 
134 	if (controller) {
135 		/* If this SF is for external controller, SF manager
136 		 * needs to arm firmware to receive the events.
137 		 */
138 		err = mlx5_vhca_event_arm(dev, hw_fn_id);
139 		if (err)
140 			goto vhca_err;
141 	}
142 
143 	mutex_unlock(&table->table_lock);
144 	return sw_id;
145 
146 vhca_err:
147 	mlx5_cmd_dealloc_sf(dev, hw_fn_id);
148 err:
149 	mlx5_sf_hw_table_id_free(table, controller, sw_id);
150 exist_err:
151 	mutex_unlock(&table->table_lock);
152 	return err;
153 }
154 
155 void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u32 controller, u16 id)
156 {
157 	struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
158 	u16 hw_fn_id;
159 
160 	mutex_lock(&table->table_lock);
161 	hw_fn_id = mlx5_sf_sw_to_hw_id(dev, controller, id);
162 	mlx5_cmd_dealloc_sf(dev, hw_fn_id);
163 	mlx5_sf_hw_table_id_free(table, controller, id);
164 	mutex_unlock(&table->table_lock);
165 }
166 
167 static void mlx5_sf_hw_table_hwc_sf_free(struct mlx5_core_dev *dev,
168 					 struct mlx5_sf_hwc_table *hwc, int idx)
169 {
170 	mlx5_cmd_dealloc_sf(dev, hwc->start_fn_id + idx);
171 	hwc->sfs[idx].allocated = false;
172 	hwc->sfs[idx].pending_delete = false;
173 }
174 
175 void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u32 controller, u16 id)
176 {
177 	struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
178 	u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {};
179 	struct mlx5_sf_hwc_table *hwc;
180 	u16 hw_fn_id;
181 	u8 state;
182 	int err;
183 
184 	hw_fn_id = mlx5_sf_sw_to_hw_id(dev, controller, id);
185 	hwc = mlx5_sf_controller_to_hwc(dev, controller);
186 	mutex_lock(&table->table_lock);
187 	err = mlx5_cmd_query_vhca_state(dev, hw_fn_id, out, sizeof(out));
188 	if (err)
189 		goto err;
190 	state = MLX5_GET(query_vhca_state_out, out, vhca_state_context.vhca_state);
191 	if (state == MLX5_VHCA_STATE_ALLOCATED) {
192 		mlx5_cmd_dealloc_sf(dev, hw_fn_id);
193 		hwc->sfs[id].allocated = false;
194 	} else {
195 		hwc->sfs[id].pending_delete = true;
196 	}
197 err:
198 	mutex_unlock(&table->table_lock);
199 }
200 
201 static void mlx5_sf_hw_table_hwc_dealloc_all(struct mlx5_core_dev *dev,
202 					     struct mlx5_sf_hwc_table *hwc)
203 {
204 	int i;
205 
206 	for (i = 0; i < hwc->max_fn; i++) {
207 		if (hwc->sfs[i].allocated)
208 			mlx5_sf_hw_table_hwc_sf_free(dev, hwc, i);
209 	}
210 }
211 
212 static void mlx5_sf_hw_table_dealloc_all(struct mlx5_sf_hw_table *table)
213 {
214 	mlx5_sf_hw_table_hwc_dealloc_all(table->dev, &table->hwc[MLX5_SF_HWC_EXTERNAL]);
215 	mlx5_sf_hw_table_hwc_dealloc_all(table->dev, &table->hwc[MLX5_SF_HWC_LOCAL]);
216 }
217 
218 static int mlx5_sf_hw_table_hwc_init(struct mlx5_sf_hwc_table *hwc, u16 max_fn, u16 base_id)
219 {
220 	struct mlx5_sf_hw *sfs;
221 
222 	if (!max_fn)
223 		return 0;
224 
225 	sfs = kcalloc(max_fn, sizeof(*sfs), GFP_KERNEL);
226 	if (!sfs)
227 		return -ENOMEM;
228 
229 	hwc->sfs = sfs;
230 	hwc->max_fn = max_fn;
231 	hwc->start_fn_id = base_id;
232 	return 0;
233 }
234 
235 static void mlx5_sf_hw_table_hwc_cleanup(struct mlx5_sf_hwc_table *hwc)
236 {
237 	kfree(hwc->sfs);
238 }
239 
240 int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev)
241 {
242 	struct mlx5_sf_hw_table *table;
243 	u16 max_ext_fn = 0;
244 	u16 ext_base_id;
245 	u16 max_fn = 0;
246 	u16 base_id;
247 	int err;
248 
249 	if (!mlx5_vhca_event_supported(dev))
250 		return 0;
251 
252 	if (mlx5_sf_supported(dev))
253 		max_fn = mlx5_sf_max_functions(dev);
254 
255 	err = mlx5_esw_sf_max_hpf_functions(dev, &max_ext_fn, &ext_base_id);
256 	if (err)
257 		return err;
258 
259 	if (!max_fn && !max_ext_fn)
260 		return 0;
261 
262 	table = kzalloc(sizeof(*table), GFP_KERNEL);
263 	if (!table)
264 		return -ENOMEM;
265 
266 	mutex_init(&table->table_lock);
267 	table->dev = dev;
268 	dev->priv.sf_hw_table = table;
269 
270 	base_id = mlx5_sf_start_function_id(dev);
271 	err = mlx5_sf_hw_table_hwc_init(&table->hwc[MLX5_SF_HWC_LOCAL], max_fn, base_id);
272 	if (err)
273 		goto table_err;
274 
275 	err = mlx5_sf_hw_table_hwc_init(&table->hwc[MLX5_SF_HWC_EXTERNAL],
276 					max_ext_fn, ext_base_id);
277 	if (err)
278 		goto ext_err;
279 
280 	mlx5_core_dbg(dev, "SF HW table: max sfs = %d, ext sfs = %d\n", max_fn, max_ext_fn);
281 	return 0;
282 
283 ext_err:
284 	mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_LOCAL]);
285 table_err:
286 	mutex_destroy(&table->table_lock);
287 	kfree(table);
288 	return err;
289 }
290 
291 void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev)
292 {
293 	struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
294 
295 	if (!table)
296 		return;
297 
298 	mutex_destroy(&table->table_lock);
299 	mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_EXTERNAL]);
300 	mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_LOCAL]);
301 	kfree(table);
302 }
303 
304 static int mlx5_sf_hw_vhca_event(struct notifier_block *nb, unsigned long opcode, void *data)
305 {
306 	struct mlx5_sf_hw_table *table = container_of(nb, struct mlx5_sf_hw_table, vhca_nb);
307 	const struct mlx5_vhca_state_event *event = data;
308 	struct mlx5_sf_hwc_table *hwc;
309 	struct mlx5_sf_hw *sf_hw;
310 	u16 sw_id;
311 
312 	if (event->new_vhca_state != MLX5_VHCA_STATE_ALLOCATED)
313 		return 0;
314 
315 	hwc = mlx5_sf_table_fn_to_hwc(table, event->function_id);
316 	if (!hwc)
317 		return 0;
318 
319 	sw_id = mlx5_sf_hw_to_sw_id(hwc, event->function_id);
320 	sf_hw = &hwc->sfs[sw_id];
321 
322 	mutex_lock(&table->table_lock);
323 	/* SF driver notified through firmware that SF is finally detached.
324 	 * Hence recycle the sf hardware id for reuse.
325 	 */
326 	if (sf_hw->allocated && sf_hw->pending_delete)
327 		mlx5_sf_hw_table_hwc_sf_free(table->dev, hwc, sw_id);
328 	mutex_unlock(&table->table_lock);
329 	return 0;
330 }
331 
332 int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev)
333 {
334 	struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
335 
336 	if (!table)
337 		return 0;
338 
339 	table->vhca_nb.notifier_call = mlx5_sf_hw_vhca_event;
340 	return mlx5_vhca_event_notifier_register(dev, &table->vhca_nb);
341 }
342 
343 void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev)
344 {
345 	struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
346 
347 	if (!table)
348 		return;
349 
350 	mlx5_vhca_event_notifier_unregister(dev, &table->vhca_nb);
351 	/* Dealloc SFs whose firmware event has been missed. */
352 	mlx5_sf_hw_table_dealloc_all(table);
353 }
354 
355 bool mlx5_sf_hw_table_supported(const struct mlx5_core_dev *dev)
356 {
357 	return !!dev->priv.sf_hw_table;
358 }
359