1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #include <linux/mlx5/eswitch.h>
5 #include <linux/err.h>
6 #include "dr_types.h"
7 
8 #define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type)	\
9 	((dmn)->info.caps.dmn_type##_sw_owner ||	\
10 	 ((dmn)->info.caps.dmn_type##_sw_owner_v2 &&	\
11 	  (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_7))
12 
13 bool mlx5dr_domain_is_support_ptrn_arg(struct mlx5dr_domain *dmn)
14 {
15 	return false;
16 }
17 
18 static int dr_domain_init_modify_header_resources(struct mlx5dr_domain *dmn)
19 {
20 	if (!mlx5dr_domain_is_support_ptrn_arg(dmn))
21 		return 0;
22 
23 	dmn->ptrn_mgr = mlx5dr_ptrn_mgr_create(dmn);
24 	if (!dmn->ptrn_mgr) {
25 		mlx5dr_err(dmn, "Couldn't create ptrn_mgr\n");
26 		return -ENOMEM;
27 	}
28 
29 	return 0;
30 }
31 
32 static void dr_domain_destroy_modify_header_resources(struct mlx5dr_domain *dmn)
33 {
34 	if (!mlx5dr_domain_is_support_ptrn_arg(dmn))
35 		return;
36 
37 	mlx5dr_ptrn_mgr_destroy(dmn->ptrn_mgr);
38 }
39 
40 static void dr_domain_init_csum_recalc_fts(struct mlx5dr_domain *dmn)
41 {
42 	/* Per vport cached FW FT for checksum recalculation, this
43 	 * recalculation is needed due to a HW bug in STEv0.
44 	 */
45 	xa_init(&dmn->csum_fts_xa);
46 }
47 
48 static void dr_domain_uninit_csum_recalc_fts(struct mlx5dr_domain *dmn)
49 {
50 	struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
51 	unsigned long i;
52 
53 	xa_for_each(&dmn->csum_fts_xa, i, recalc_cs_ft) {
54 		if (recalc_cs_ft)
55 			mlx5dr_fw_destroy_recalc_cs_ft(dmn, recalc_cs_ft);
56 	}
57 
58 	xa_destroy(&dmn->csum_fts_xa);
59 }
60 
61 int mlx5dr_domain_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
62 					u16 vport_num,
63 					u64 *rx_icm_addr)
64 {
65 	struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
66 	int ret;
67 
68 	recalc_cs_ft = xa_load(&dmn->csum_fts_xa, vport_num);
69 	if (!recalc_cs_ft) {
70 		/* Table hasn't been created yet */
71 		recalc_cs_ft = mlx5dr_fw_create_recalc_cs_ft(dmn, vport_num);
72 		if (!recalc_cs_ft)
73 			return -EINVAL;
74 
75 		ret = xa_err(xa_store(&dmn->csum_fts_xa, vport_num,
76 				      recalc_cs_ft, GFP_KERNEL));
77 		if (ret)
78 			return ret;
79 	}
80 
81 	*rx_icm_addr = recalc_cs_ft->rx_icm_addr;
82 
83 	return 0;
84 }
85 
86 static int dr_domain_init_mem_resources(struct mlx5dr_domain *dmn)
87 {
88 	int ret;
89 
90 	dmn->chunks_kmem_cache = kmem_cache_create("mlx5_dr_chunks",
91 						   sizeof(struct mlx5dr_icm_chunk), 0,
92 						   SLAB_HWCACHE_ALIGN, NULL);
93 	if (!dmn->chunks_kmem_cache) {
94 		mlx5dr_err(dmn, "Couldn't create chunks kmem_cache\n");
95 		return -ENOMEM;
96 	}
97 
98 	dmn->htbls_kmem_cache = kmem_cache_create("mlx5_dr_htbls",
99 						  sizeof(struct mlx5dr_ste_htbl), 0,
100 						  SLAB_HWCACHE_ALIGN, NULL);
101 	if (!dmn->htbls_kmem_cache) {
102 		mlx5dr_err(dmn, "Couldn't create hash tables kmem_cache\n");
103 		ret = -ENOMEM;
104 		goto free_chunks_kmem_cache;
105 	}
106 
107 	dmn->ste_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_STE);
108 	if (!dmn->ste_icm_pool) {
109 		mlx5dr_err(dmn, "Couldn't get icm memory\n");
110 		ret = -ENOMEM;
111 		goto free_htbls_kmem_cache;
112 	}
113 
114 	dmn->action_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_MODIFY_ACTION);
115 	if (!dmn->action_icm_pool) {
116 		mlx5dr_err(dmn, "Couldn't get action icm memory\n");
117 		ret = -ENOMEM;
118 		goto free_ste_icm_pool;
119 	}
120 
121 	ret = mlx5dr_send_info_pool_create(dmn);
122 	if (ret) {
123 		mlx5dr_err(dmn, "Couldn't create send info pool\n");
124 		goto free_action_icm_pool;
125 	}
126 
127 	return 0;
128 
129 free_action_icm_pool:
130 	mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
131 free_ste_icm_pool:
132 	mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
133 free_htbls_kmem_cache:
134 	kmem_cache_destroy(dmn->htbls_kmem_cache);
135 free_chunks_kmem_cache:
136 	kmem_cache_destroy(dmn->chunks_kmem_cache);
137 
138 	return ret;
139 }
140 
141 static void dr_domain_uninit_mem_resources(struct mlx5dr_domain *dmn)
142 {
143 	mlx5dr_send_info_pool_destroy(dmn);
144 	mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
145 	mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
146 	kmem_cache_destroy(dmn->htbls_kmem_cache);
147 	kmem_cache_destroy(dmn->chunks_kmem_cache);
148 }
149 
150 static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
151 {
152 	int ret;
153 
154 	dmn->ste_ctx = mlx5dr_ste_get_ctx(dmn->info.caps.sw_format_ver);
155 	if (!dmn->ste_ctx) {
156 		mlx5dr_err(dmn, "SW Steering on this device is unsupported\n");
157 		return -EOPNOTSUPP;
158 	}
159 
160 	ret = mlx5_core_alloc_pd(dmn->mdev, &dmn->pdn);
161 	if (ret) {
162 		mlx5dr_err(dmn, "Couldn't allocate PD, ret: %d", ret);
163 		return ret;
164 	}
165 
166 	dmn->uar = mlx5_get_uars_page(dmn->mdev);
167 	if (IS_ERR(dmn->uar)) {
168 		mlx5dr_err(dmn, "Couldn't allocate UAR\n");
169 		ret = PTR_ERR(dmn->uar);
170 		goto clean_pd;
171 	}
172 
173 	ret = dr_domain_init_mem_resources(dmn);
174 	if (ret) {
175 		mlx5dr_err(dmn, "Couldn't create domain memory resources\n");
176 		goto clean_uar;
177 	}
178 
179 	ret = dr_domain_init_modify_header_resources(dmn);
180 	if (ret) {
181 		mlx5dr_err(dmn, "Couldn't create modify-header-resources\n");
182 		goto clean_mem_resources;
183 	}
184 
185 	ret = mlx5dr_send_ring_alloc(dmn);
186 	if (ret) {
187 		mlx5dr_err(dmn, "Couldn't create send-ring\n");
188 		goto clean_modify_hdr;
189 	}
190 
191 	return 0;
192 
193 clean_modify_hdr:
194 	dr_domain_destroy_modify_header_resources(dmn);
195 clean_mem_resources:
196 	dr_domain_uninit_mem_resources(dmn);
197 clean_uar:
198 	mlx5_put_uars_page(dmn->mdev, dmn->uar);
199 clean_pd:
200 	mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
201 
202 	return ret;
203 }
204 
205 static void dr_domain_uninit_resources(struct mlx5dr_domain *dmn)
206 {
207 	mlx5dr_send_ring_free(dmn, dmn->send_ring);
208 	dr_domain_destroy_modify_header_resources(dmn);
209 	dr_domain_uninit_mem_resources(dmn);
210 	mlx5_put_uars_page(dmn->mdev, dmn->uar);
211 	mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
212 }
213 
214 static void dr_domain_fill_uplink_caps(struct mlx5dr_domain *dmn,
215 				       struct mlx5dr_cmd_vport_cap *uplink_vport)
216 {
217 	struct mlx5dr_esw_caps *esw_caps = &dmn->info.caps.esw_caps;
218 
219 	uplink_vport->num = MLX5_VPORT_UPLINK;
220 	uplink_vport->icm_address_rx = esw_caps->uplink_icm_address_rx;
221 	uplink_vport->icm_address_tx = esw_caps->uplink_icm_address_tx;
222 	uplink_vport->vport_gvmi = 0;
223 	uplink_vport->vhca_gvmi = dmn->info.caps.gvmi;
224 }
225 
226 static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
227 				 u16 vport_number,
228 				 bool other_vport,
229 				 struct mlx5dr_cmd_vport_cap *vport_caps)
230 {
231 	int ret;
232 
233 	ret = mlx5dr_cmd_query_esw_vport_context(dmn->mdev,
234 						 other_vport,
235 						 vport_number,
236 						 &vport_caps->icm_address_rx,
237 						 &vport_caps->icm_address_tx);
238 	if (ret)
239 		return ret;
240 
241 	ret = mlx5dr_cmd_query_gvmi(dmn->mdev,
242 				    other_vport,
243 				    vport_number,
244 				    &vport_caps->vport_gvmi);
245 	if (ret)
246 		return ret;
247 
248 	vport_caps->num = vport_number;
249 	vport_caps->vhca_gvmi = dmn->info.caps.gvmi;
250 
251 	return 0;
252 }
253 
254 static int dr_domain_query_esw_mgr(struct mlx5dr_domain *dmn)
255 {
256 	return dr_domain_query_vport(dmn, 0, false,
257 				     &dmn->info.caps.vports.esw_manager_caps);
258 }
259 
260 static void dr_domain_query_uplink(struct mlx5dr_domain *dmn)
261 {
262 	dr_domain_fill_uplink_caps(dmn, &dmn->info.caps.vports.uplink_caps);
263 }
264 
265 static struct mlx5dr_cmd_vport_cap *
266 dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
267 {
268 	struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
269 	struct mlx5dr_cmd_vport_cap *vport_caps;
270 	int ret;
271 
272 	vport_caps = kvzalloc(sizeof(*vport_caps), GFP_KERNEL);
273 	if (!vport_caps)
274 		return NULL;
275 
276 	ret = dr_domain_query_vport(dmn, vport, true, vport_caps);
277 	if (ret) {
278 		kvfree(vport_caps);
279 		return NULL;
280 	}
281 
282 	ret = xa_insert(&caps->vports.vports_caps_xa, vport,
283 			vport_caps, GFP_KERNEL);
284 	if (ret) {
285 		mlx5dr_dbg(dmn, "Couldn't insert new vport into xarray (%d)\n", ret);
286 		kvfree(vport_caps);
287 		return ERR_PTR(ret);
288 	}
289 
290 	return vport_caps;
291 }
292 
293 static bool dr_domain_is_esw_mgr_vport(struct mlx5dr_domain *dmn, u16 vport)
294 {
295 	struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
296 
297 	return (caps->is_ecpf && vport == MLX5_VPORT_ECPF) ||
298 	       (!caps->is_ecpf && vport == 0);
299 }
300 
301 struct mlx5dr_cmd_vport_cap *
302 mlx5dr_domain_get_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
303 {
304 	struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
305 	struct mlx5dr_cmd_vport_cap *vport_caps;
306 
307 	if (dr_domain_is_esw_mgr_vport(dmn, vport))
308 		return &caps->vports.esw_manager_caps;
309 
310 	if (vport == MLX5_VPORT_UPLINK)
311 		return &caps->vports.uplink_caps;
312 
313 vport_load:
314 	vport_caps = xa_load(&caps->vports.vports_caps_xa, vport);
315 	if (vport_caps)
316 		return vport_caps;
317 
318 	vport_caps = dr_domain_add_vport_cap(dmn, vport);
319 	if (PTR_ERR(vport_caps) == -EBUSY)
320 		/* caps were already stored by another thread */
321 		goto vport_load;
322 
323 	return vport_caps;
324 }
325 
326 static void dr_domain_clear_vports(struct mlx5dr_domain *dmn)
327 {
328 	struct mlx5dr_cmd_vport_cap *vport_caps;
329 	unsigned long i;
330 
331 	xa_for_each(&dmn->info.caps.vports.vports_caps_xa, i, vport_caps) {
332 		vport_caps = xa_erase(&dmn->info.caps.vports.vports_caps_xa, i);
333 		kvfree(vport_caps);
334 	}
335 }
336 
337 static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
338 				    struct mlx5dr_domain *dmn)
339 {
340 	int ret;
341 
342 	if (!dmn->info.caps.eswitch_manager)
343 		return -EOPNOTSUPP;
344 
345 	ret = mlx5dr_cmd_query_esw_caps(mdev, &dmn->info.caps.esw_caps);
346 	if (ret)
347 		return ret;
348 
349 	dmn->info.caps.fdb_sw_owner = dmn->info.caps.esw_caps.sw_owner;
350 	dmn->info.caps.fdb_sw_owner_v2 = dmn->info.caps.esw_caps.sw_owner_v2;
351 	dmn->info.caps.esw_rx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_rx;
352 	dmn->info.caps.esw_tx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_tx;
353 
354 	xa_init(&dmn->info.caps.vports.vports_caps_xa);
355 
356 	/* Query eswitch manager and uplink vports only. Rest of the
357 	 * vports (vport 0, VFs and SFs) will be queried dynamically.
358 	 */
359 
360 	ret = dr_domain_query_esw_mgr(dmn);
361 	if (ret) {
362 		mlx5dr_err(dmn, "Failed to query eswitch manager vport caps (err: %d)", ret);
363 		goto free_vports_caps_xa;
364 	}
365 
366 	dr_domain_query_uplink(dmn);
367 
368 	return 0;
369 
370 free_vports_caps_xa:
371 	xa_destroy(&dmn->info.caps.vports.vports_caps_xa);
372 
373 	return ret;
374 }
375 
376 static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
377 			       struct mlx5dr_domain *dmn)
378 {
379 	struct mlx5dr_cmd_vport_cap *vport_cap;
380 	int ret;
381 
382 	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) {
383 		mlx5dr_err(dmn, "Failed to allocate domain, bad link type\n");
384 		return -EOPNOTSUPP;
385 	}
386 
387 	ret = mlx5dr_cmd_query_device(mdev, &dmn->info.caps);
388 	if (ret)
389 		return ret;
390 
391 	ret = dr_domain_query_fdb_caps(mdev, dmn);
392 	if (ret)
393 		return ret;
394 
395 	switch (dmn->type) {
396 	case MLX5DR_DOMAIN_TYPE_NIC_RX:
397 		if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, rx))
398 			return -ENOTSUPP;
399 
400 		dmn->info.supp_sw_steering = true;
401 		dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX;
402 		dmn->info.rx.default_icm_addr = dmn->info.caps.nic_rx_drop_address;
403 		dmn->info.rx.drop_icm_addr = dmn->info.caps.nic_rx_drop_address;
404 		break;
405 	case MLX5DR_DOMAIN_TYPE_NIC_TX:
406 		if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, tx))
407 			return -ENOTSUPP;
408 
409 		dmn->info.supp_sw_steering = true;
410 		dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX;
411 		dmn->info.tx.default_icm_addr = dmn->info.caps.nic_tx_allow_address;
412 		dmn->info.tx.drop_icm_addr = dmn->info.caps.nic_tx_drop_address;
413 		break;
414 	case MLX5DR_DOMAIN_TYPE_FDB:
415 		if (!dmn->info.caps.eswitch_manager)
416 			return -ENOTSUPP;
417 
418 		if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, fdb))
419 			return -ENOTSUPP;
420 
421 		dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX;
422 		dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX;
423 		vport_cap = &dmn->info.caps.vports.esw_manager_caps;
424 
425 		dmn->info.supp_sw_steering = true;
426 		dmn->info.tx.default_icm_addr = vport_cap->icm_address_tx;
427 		dmn->info.rx.default_icm_addr = vport_cap->icm_address_rx;
428 		dmn->info.rx.drop_icm_addr = dmn->info.caps.esw_rx_drop_address;
429 		dmn->info.tx.drop_icm_addr = dmn->info.caps.esw_tx_drop_address;
430 		break;
431 	default:
432 		mlx5dr_err(dmn, "Invalid domain\n");
433 		ret = -EINVAL;
434 		break;
435 	}
436 
437 	return ret;
438 }
439 
440 static void dr_domain_caps_uninit(struct mlx5dr_domain *dmn)
441 {
442 	dr_domain_clear_vports(dmn);
443 	xa_destroy(&dmn->info.caps.vports.vports_caps_xa);
444 }
445 
446 struct mlx5dr_domain *
447 mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
448 {
449 	struct mlx5dr_domain *dmn;
450 	int ret;
451 
452 	if (type > MLX5DR_DOMAIN_TYPE_FDB)
453 		return NULL;
454 
455 	dmn = kzalloc(sizeof(*dmn), GFP_KERNEL);
456 	if (!dmn)
457 		return NULL;
458 
459 	dmn->mdev = mdev;
460 	dmn->type = type;
461 	refcount_set(&dmn->refcount, 1);
462 	mutex_init(&dmn->info.rx.mutex);
463 	mutex_init(&dmn->info.tx.mutex);
464 	xa_init(&dmn->definers_xa);
465 
466 	if (dr_domain_caps_init(mdev, dmn)) {
467 		mlx5dr_err(dmn, "Failed init domain, no caps\n");
468 		goto def_xa_destroy;
469 	}
470 
471 	dmn->info.max_log_action_icm_sz = DR_CHUNK_SIZE_4K;
472 	dmn->info.max_log_sw_icm_sz = min_t(u32, DR_CHUNK_SIZE_1024K,
473 					    dmn->info.caps.log_icm_size);
474 	dmn->info.max_log_modify_hdr_pattern_icm_sz =
475 		min_t(u32, DR_CHUNK_SIZE_4K,
476 		      dmn->info.caps.log_modify_pattern_icm_size);
477 
478 	if (!dmn->info.supp_sw_steering) {
479 		mlx5dr_err(dmn, "SW steering is not supported\n");
480 		goto uninit_caps;
481 	}
482 
483 	/* Allocate resources */
484 	ret = dr_domain_init_resources(dmn);
485 	if (ret) {
486 		mlx5dr_err(dmn, "Failed init domain resources\n");
487 		goto uninit_caps;
488 	}
489 
490 	dr_domain_init_csum_recalc_fts(dmn);
491 	mlx5dr_dbg_init_dump(dmn);
492 	return dmn;
493 
494 uninit_caps:
495 	dr_domain_caps_uninit(dmn);
496 def_xa_destroy:
497 	xa_destroy(&dmn->definers_xa);
498 	kfree(dmn);
499 	return NULL;
500 }
501 
502 /* Assure synchronization of the device steering tables with updates made by SW
503  * insertion.
504  */
505 int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags)
506 {
507 	int ret = 0;
508 
509 	if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_SW) {
510 		mlx5dr_domain_lock(dmn);
511 		ret = mlx5dr_send_ring_force_drain(dmn);
512 		mlx5dr_domain_unlock(dmn);
513 		if (ret) {
514 			mlx5dr_err(dmn, "Force drain failed flags: %d, ret: %d\n",
515 				   flags, ret);
516 			return ret;
517 		}
518 	}
519 
520 	if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_HW)
521 		ret = mlx5dr_cmd_sync_steering(dmn->mdev);
522 
523 	return ret;
524 }
525 
526 int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
527 {
528 	if (WARN_ON_ONCE(refcount_read(&dmn->refcount) > 1))
529 		return -EBUSY;
530 
531 	/* make sure resources are not used by the hardware */
532 	mlx5dr_cmd_sync_steering(dmn->mdev);
533 	mlx5dr_dbg_uninit_dump(dmn);
534 	dr_domain_uninit_csum_recalc_fts(dmn);
535 	dr_domain_uninit_resources(dmn);
536 	dr_domain_caps_uninit(dmn);
537 	xa_destroy(&dmn->definers_xa);
538 	mutex_destroy(&dmn->info.tx.mutex);
539 	mutex_destroy(&dmn->info.rx.mutex);
540 	kfree(dmn);
541 	return 0;
542 }
543 
544 void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
545 			    struct mlx5dr_domain *peer_dmn)
546 {
547 	mlx5dr_domain_lock(dmn);
548 
549 	if (dmn->peer_dmn)
550 		refcount_dec(&dmn->peer_dmn->refcount);
551 
552 	dmn->peer_dmn = peer_dmn;
553 
554 	if (dmn->peer_dmn)
555 		refcount_inc(&dmn->peer_dmn->refcount);
556 
557 	mlx5dr_domain_unlock(dmn);
558 }
559