1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #include <linux/mlx5/eswitch.h>
5 #include <linux/err.h>
6 #include "dr_types.h"
7 
8 #define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type)	\
9 	((dmn)->info.caps.dmn_type##_sw_owner ||	\
10 	 ((dmn)->info.caps.dmn_type##_sw_owner_v2 &&	\
11 	  (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_7))
12 
13 static void dr_domain_init_csum_recalc_fts(struct mlx5dr_domain *dmn)
14 {
15 	/* Per vport cached FW FT for checksum recalculation, this
16 	 * recalculation is needed due to a HW bug in STEv0.
17 	 */
18 	xa_init(&dmn->csum_fts_xa);
19 }
20 
21 static void dr_domain_uninit_csum_recalc_fts(struct mlx5dr_domain *dmn)
22 {
23 	struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
24 	unsigned long i;
25 
26 	xa_for_each(&dmn->csum_fts_xa, i, recalc_cs_ft) {
27 		if (recalc_cs_ft)
28 			mlx5dr_fw_destroy_recalc_cs_ft(dmn, recalc_cs_ft);
29 	}
30 
31 	xa_destroy(&dmn->csum_fts_xa);
32 }
33 
34 int mlx5dr_domain_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
35 					u16 vport_num,
36 					u64 *rx_icm_addr)
37 {
38 	struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
39 	int ret;
40 
41 	recalc_cs_ft = xa_load(&dmn->csum_fts_xa, vport_num);
42 	if (!recalc_cs_ft) {
43 		/* Table hasn't been created yet */
44 		recalc_cs_ft = mlx5dr_fw_create_recalc_cs_ft(dmn, vport_num);
45 		if (!recalc_cs_ft)
46 			return -EINVAL;
47 
48 		ret = xa_err(xa_store(&dmn->csum_fts_xa, vport_num,
49 				      recalc_cs_ft, GFP_KERNEL));
50 		if (ret)
51 			return ret;
52 	}
53 
54 	*rx_icm_addr = recalc_cs_ft->rx_icm_addr;
55 
56 	return 0;
57 }
58 
59 static int dr_domain_init_mem_resources(struct mlx5dr_domain *dmn)
60 {
61 	int ret;
62 
63 	dmn->ste_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_STE);
64 	if (!dmn->ste_icm_pool) {
65 		mlx5dr_err(dmn, "Couldn't get icm memory\n");
66 		return -ENOMEM;
67 	}
68 
69 	dmn->action_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_MODIFY_ACTION);
70 	if (!dmn->action_icm_pool) {
71 		mlx5dr_err(dmn, "Couldn't get action icm memory\n");
72 		ret = -ENOMEM;
73 		goto free_ste_icm_pool;
74 	}
75 
76 	ret = mlx5dr_send_info_pool_create(dmn);
77 	if (ret) {
78 		mlx5dr_err(dmn, "Couldn't create send info pool\n");
79 		goto free_action_icm_pool;
80 	}
81 
82 	return 0;
83 
84 free_action_icm_pool:
85 	mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
86 free_ste_icm_pool:
87 	mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
88 	return ret;
89 }
90 
91 static void dr_domain_uninit_mem_resources(struct mlx5dr_domain *dmn)
92 {
93 	mlx5dr_send_info_pool_destroy(dmn);
94 	mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
95 	mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
96 }
97 
98 static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
99 {
100 	int ret;
101 
102 	dmn->ste_ctx = mlx5dr_ste_get_ctx(dmn->info.caps.sw_format_ver);
103 	if (!dmn->ste_ctx) {
104 		mlx5dr_err(dmn, "SW Steering on this device is unsupported\n");
105 		return -EOPNOTSUPP;
106 	}
107 
108 	ret = mlx5_core_alloc_pd(dmn->mdev, &dmn->pdn);
109 	if (ret) {
110 		mlx5dr_err(dmn, "Couldn't allocate PD, ret: %d", ret);
111 		return ret;
112 	}
113 
114 	dmn->uar = mlx5_get_uars_page(dmn->mdev);
115 	if (IS_ERR(dmn->uar)) {
116 		mlx5dr_err(dmn, "Couldn't allocate UAR\n");
117 		ret = PTR_ERR(dmn->uar);
118 		goto clean_pd;
119 	}
120 
121 	ret = dr_domain_init_mem_resources(dmn);
122 	if (ret) {
123 		mlx5dr_err(dmn, "Couldn't create domain memory resources\n");
124 		goto clean_uar;
125 	}
126 
127 	ret = mlx5dr_send_ring_alloc(dmn);
128 	if (ret) {
129 		mlx5dr_err(dmn, "Couldn't create send-ring\n");
130 		goto clean_mem_resources;
131 	}
132 
133 	return 0;
134 
135 clean_mem_resources:
136 	dr_domain_uninit_mem_resources(dmn);
137 clean_uar:
138 	mlx5_put_uars_page(dmn->mdev, dmn->uar);
139 clean_pd:
140 	mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
141 
142 	return ret;
143 }
144 
145 static void dr_domain_uninit_resources(struct mlx5dr_domain *dmn)
146 {
147 	mlx5dr_send_ring_free(dmn, dmn->send_ring);
148 	dr_domain_uninit_mem_resources(dmn);
149 	mlx5_put_uars_page(dmn->mdev, dmn->uar);
150 	mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
151 }
152 
153 static void dr_domain_fill_uplink_caps(struct mlx5dr_domain *dmn,
154 				       struct mlx5dr_cmd_vport_cap *uplink_vport)
155 {
156 	struct mlx5dr_esw_caps *esw_caps = &dmn->info.caps.esw_caps;
157 
158 	uplink_vport->num = MLX5_VPORT_UPLINK;
159 	uplink_vport->icm_address_rx = esw_caps->uplink_icm_address_rx;
160 	uplink_vport->icm_address_tx = esw_caps->uplink_icm_address_tx;
161 	uplink_vport->vport_gvmi = 0;
162 	uplink_vport->vhca_gvmi = dmn->info.caps.gvmi;
163 }
164 
165 static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
166 				 u16 vport_number,
167 				 bool other_vport,
168 				 struct mlx5dr_cmd_vport_cap *vport_caps)
169 {
170 	int ret;
171 
172 	ret = mlx5dr_cmd_query_esw_vport_context(dmn->mdev,
173 						 other_vport,
174 						 vport_number,
175 						 &vport_caps->icm_address_rx,
176 						 &vport_caps->icm_address_tx);
177 	if (ret)
178 		return ret;
179 
180 	ret = mlx5dr_cmd_query_gvmi(dmn->mdev,
181 				    other_vport,
182 				    vport_number,
183 				    &vport_caps->vport_gvmi);
184 	if (ret)
185 		return ret;
186 
187 	vport_caps->num = vport_number;
188 	vport_caps->vhca_gvmi = dmn->info.caps.gvmi;
189 
190 	return 0;
191 }
192 
193 static int dr_domain_query_esw_mngr(struct mlx5dr_domain *dmn)
194 {
195 	return dr_domain_query_vport(dmn, 0, false,
196 				     &dmn->info.caps.vports.esw_manager_caps);
197 }
198 
199 static void dr_domain_query_uplink(struct mlx5dr_domain *dmn)
200 {
201 	dr_domain_fill_uplink_caps(dmn, &dmn->info.caps.vports.uplink_caps);
202 }
203 
204 static struct mlx5dr_cmd_vport_cap *
205 dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
206 {
207 	struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
208 	struct mlx5dr_cmd_vport_cap *vport_caps;
209 	int ret;
210 
211 	vport_caps = kvzalloc(sizeof(*vport_caps), GFP_KERNEL);
212 	if (!vport_caps)
213 		return NULL;
214 
215 	ret = dr_domain_query_vport(dmn, vport, true, vport_caps);
216 	if (ret) {
217 		kvfree(vport_caps);
218 		return NULL;
219 	}
220 
221 	ret = xa_insert(&caps->vports.vports_caps_xa, vport,
222 			vport_caps, GFP_KERNEL);
223 	if (ret) {
224 		mlx5dr_dbg(dmn, "Couldn't insert new vport into xarray (%d)\n", ret);
225 		kvfree(vport_caps);
226 		return ERR_PTR(ret);
227 	}
228 
229 	return vport_caps;
230 }
231 
232 static bool dr_domain_is_esw_mgr_vport(struct mlx5dr_domain *dmn, u16 vport)
233 {
234 	struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
235 
236 	return (caps->is_ecpf && vport == MLX5_VPORT_ECPF) ||
237 	       (!caps->is_ecpf && vport == 0);
238 }
239 
240 struct mlx5dr_cmd_vport_cap *
241 mlx5dr_domain_get_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
242 {
243 	struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
244 	struct mlx5dr_cmd_vport_cap *vport_caps;
245 
246 	if (dr_domain_is_esw_mgr_vport(dmn, vport))
247 		return &caps->vports.esw_manager_caps;
248 
249 	if (vport == MLX5_VPORT_UPLINK)
250 		return &caps->vports.uplink_caps;
251 
252 vport_load:
253 	vport_caps = xa_load(&caps->vports.vports_caps_xa, vport);
254 	if (vport_caps)
255 		return vport_caps;
256 
257 	vport_caps = dr_domain_add_vport_cap(dmn, vport);
258 	if (PTR_ERR(vport_caps) == -EBUSY)
259 		/* caps were already stored by another thread */
260 		goto vport_load;
261 
262 	return vport_caps;
263 }
264 
265 static void dr_domain_clear_vports(struct mlx5dr_domain *dmn)
266 {
267 	struct mlx5dr_cmd_vport_cap *vport_caps;
268 	unsigned long i;
269 
270 	xa_for_each(&dmn->info.caps.vports.vports_caps_xa, i, vport_caps) {
271 		vport_caps = xa_erase(&dmn->info.caps.vports.vports_caps_xa, i);
272 		kvfree(vport_caps);
273 	}
274 }
275 
276 static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
277 				    struct mlx5dr_domain *dmn)
278 {
279 	int ret;
280 
281 	if (!dmn->info.caps.eswitch_manager)
282 		return -EOPNOTSUPP;
283 
284 	ret = mlx5dr_cmd_query_esw_caps(mdev, &dmn->info.caps.esw_caps);
285 	if (ret)
286 		return ret;
287 
288 	dmn->info.caps.fdb_sw_owner = dmn->info.caps.esw_caps.sw_owner;
289 	dmn->info.caps.fdb_sw_owner_v2 = dmn->info.caps.esw_caps.sw_owner_v2;
290 	dmn->info.caps.esw_rx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_rx;
291 	dmn->info.caps.esw_tx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_tx;
292 
293 	xa_init(&dmn->info.caps.vports.vports_caps_xa);
294 
295 	/* Query eswitch manager and uplink vports only. Rest of the
296 	 * vports (vport 0, VFs and SFs) will be queried dynamically.
297 	 */
298 
299 	ret = dr_domain_query_esw_mngr(dmn);
300 	if (ret) {
301 		mlx5dr_err(dmn, "Failed to query eswitch manager vport caps (err: %d)", ret);
302 		goto free_vports_caps_xa;
303 	}
304 
305 	dr_domain_query_uplink(dmn);
306 
307 	return 0;
308 
309 free_vports_caps_xa:
310 	xa_destroy(&dmn->info.caps.vports.vports_caps_xa);
311 
312 	return ret;
313 }
314 
315 static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
316 			       struct mlx5dr_domain *dmn)
317 {
318 	struct mlx5dr_cmd_vport_cap *vport_cap;
319 	int ret;
320 
321 	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) {
322 		mlx5dr_err(dmn, "Failed to allocate domain, bad link type\n");
323 		return -EOPNOTSUPP;
324 	}
325 
326 	ret = mlx5dr_cmd_query_device(mdev, &dmn->info.caps);
327 	if (ret)
328 		return ret;
329 
330 	ret = dr_domain_query_fdb_caps(mdev, dmn);
331 	if (ret)
332 		return ret;
333 
334 	switch (dmn->type) {
335 	case MLX5DR_DOMAIN_TYPE_NIC_RX:
336 		if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, rx))
337 			return -ENOTSUPP;
338 
339 		dmn->info.supp_sw_steering = true;
340 		dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX;
341 		dmn->info.rx.default_icm_addr = dmn->info.caps.nic_rx_drop_address;
342 		dmn->info.rx.drop_icm_addr = dmn->info.caps.nic_rx_drop_address;
343 		break;
344 	case MLX5DR_DOMAIN_TYPE_NIC_TX:
345 		if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, tx))
346 			return -ENOTSUPP;
347 
348 		dmn->info.supp_sw_steering = true;
349 		dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX;
350 		dmn->info.tx.default_icm_addr = dmn->info.caps.nic_tx_allow_address;
351 		dmn->info.tx.drop_icm_addr = dmn->info.caps.nic_tx_drop_address;
352 		break;
353 	case MLX5DR_DOMAIN_TYPE_FDB:
354 		if (!dmn->info.caps.eswitch_manager)
355 			return -ENOTSUPP;
356 
357 		if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, fdb))
358 			return -ENOTSUPP;
359 
360 		dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX;
361 		dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX;
362 		vport_cap = &dmn->info.caps.vports.esw_manager_caps;
363 
364 		dmn->info.supp_sw_steering = true;
365 		dmn->info.tx.default_icm_addr = vport_cap->icm_address_tx;
366 		dmn->info.rx.default_icm_addr = vport_cap->icm_address_rx;
367 		dmn->info.rx.drop_icm_addr = dmn->info.caps.esw_rx_drop_address;
368 		dmn->info.tx.drop_icm_addr = dmn->info.caps.esw_tx_drop_address;
369 		break;
370 	default:
371 		mlx5dr_err(dmn, "Invalid domain\n");
372 		ret = -EINVAL;
373 		break;
374 	}
375 
376 	return ret;
377 }
378 
379 static void dr_domain_caps_uninit(struct mlx5dr_domain *dmn)
380 {
381 	dr_domain_clear_vports(dmn);
382 	xa_destroy(&dmn->info.caps.vports.vports_caps_xa);
383 }
384 
385 struct mlx5dr_domain *
386 mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
387 {
388 	struct mlx5dr_domain *dmn;
389 	int ret;
390 
391 	if (type > MLX5DR_DOMAIN_TYPE_FDB)
392 		return NULL;
393 
394 	dmn = kzalloc(sizeof(*dmn), GFP_KERNEL);
395 	if (!dmn)
396 		return NULL;
397 
398 	dmn->mdev = mdev;
399 	dmn->type = type;
400 	refcount_set(&dmn->refcount, 1);
401 	mutex_init(&dmn->info.rx.mutex);
402 	mutex_init(&dmn->info.tx.mutex);
403 
404 	if (dr_domain_caps_init(mdev, dmn)) {
405 		mlx5dr_err(dmn, "Failed init domain, no caps\n");
406 		goto free_domain;
407 	}
408 
409 	dmn->info.max_log_action_icm_sz = DR_CHUNK_SIZE_4K;
410 	dmn->info.max_log_sw_icm_sz = min_t(u32, DR_CHUNK_SIZE_1024K,
411 					    dmn->info.caps.log_icm_size);
412 
413 	if (!dmn->info.supp_sw_steering) {
414 		mlx5dr_err(dmn, "SW steering is not supported\n");
415 		goto uninit_caps;
416 	}
417 
418 	/* Allocate resources */
419 	ret = dr_domain_init_resources(dmn);
420 	if (ret) {
421 		mlx5dr_err(dmn, "Failed init domain resources\n");
422 		goto uninit_caps;
423 	}
424 
425 	dr_domain_init_csum_recalc_fts(dmn);
426 	mlx5dr_dbg_init_dump(dmn);
427 	return dmn;
428 
429 uninit_caps:
430 	dr_domain_caps_uninit(dmn);
431 free_domain:
432 	kfree(dmn);
433 	return NULL;
434 }
435 
436 /* Assure synchronization of the device steering tables with updates made by SW
437  * insertion.
438  */
439 int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags)
440 {
441 	int ret = 0;
442 
443 	if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_SW) {
444 		mlx5dr_domain_lock(dmn);
445 		ret = mlx5dr_send_ring_force_drain(dmn);
446 		mlx5dr_domain_unlock(dmn);
447 		if (ret) {
448 			mlx5dr_err(dmn, "Force drain failed flags: %d, ret: %d\n",
449 				   flags, ret);
450 			return ret;
451 		}
452 	}
453 
454 	if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_HW)
455 		ret = mlx5dr_cmd_sync_steering(dmn->mdev);
456 
457 	return ret;
458 }
459 
460 int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
461 {
462 	if (WARN_ON_ONCE(refcount_read(&dmn->refcount) > 1))
463 		return -EBUSY;
464 
465 	/* make sure resources are not used by the hardware */
466 	mlx5dr_cmd_sync_steering(dmn->mdev);
467 	mlx5dr_dbg_uninit_dump(dmn);
468 	dr_domain_uninit_csum_recalc_fts(dmn);
469 	dr_domain_uninit_resources(dmn);
470 	dr_domain_caps_uninit(dmn);
471 	mutex_destroy(&dmn->info.tx.mutex);
472 	mutex_destroy(&dmn->info.rx.mutex);
473 	kfree(dmn);
474 	return 0;
475 }
476 
477 void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
478 			    struct mlx5dr_domain *peer_dmn)
479 {
480 	mlx5dr_domain_lock(dmn);
481 
482 	if (dmn->peer_dmn)
483 		refcount_dec(&dmn->peer_dmn->refcount);
484 
485 	dmn->peer_dmn = peer_dmn;
486 
487 	if (dmn->peer_dmn)
488 		refcount_inc(&dmn->peer_dmn->refcount);
489 
490 	mlx5dr_domain_unlock(dmn);
491 }
492