1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #include <linux/mlx5/eswitch.h>
5 #include "dr_types.h"
6 
7 #define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type)	\
8 	((dmn)->info.caps.dmn_type##_sw_owner ||	\
9 	 ((dmn)->info.caps.dmn_type##_sw_owner_v2 &&	\
10 	  (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_6DX))
11 
12 static int dr_domain_init_cache(struct mlx5dr_domain *dmn)
13 {
14 	/* Per vport cached FW FT for checksum recalculation, this
15 	 * recalculation is needed due to a HW bug.
16 	 */
17 	dmn->cache.recalc_cs_ft = kcalloc(dmn->info.caps.num_vports,
18 					  sizeof(dmn->cache.recalc_cs_ft[0]),
19 					  GFP_KERNEL);
20 	if (!dmn->cache.recalc_cs_ft)
21 		return -ENOMEM;
22 
23 	return 0;
24 }
25 
26 static void dr_domain_uninit_cache(struct mlx5dr_domain *dmn)
27 {
28 	int i;
29 
30 	for (i = 0; i < dmn->info.caps.num_vports; i++) {
31 		if (!dmn->cache.recalc_cs_ft[i])
32 			continue;
33 
34 		mlx5dr_fw_destroy_recalc_cs_ft(dmn, dmn->cache.recalc_cs_ft[i]);
35 	}
36 
37 	kfree(dmn->cache.recalc_cs_ft);
38 }
39 
40 int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
41 					      u16 vport_num,
42 					      u64 *rx_icm_addr)
43 {
44 	struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
45 
46 	recalc_cs_ft = dmn->cache.recalc_cs_ft[vport_num];
47 	if (!recalc_cs_ft) {
48 		/* Table not in cache, need to allocate a new one */
49 		recalc_cs_ft = mlx5dr_fw_create_recalc_cs_ft(dmn, vport_num);
50 		if (!recalc_cs_ft)
51 			return -EINVAL;
52 
53 		dmn->cache.recalc_cs_ft[vport_num] = recalc_cs_ft;
54 	}
55 
56 	*rx_icm_addr = recalc_cs_ft->rx_icm_addr;
57 
58 	return 0;
59 }
60 
61 static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
62 {
63 	int ret;
64 
65 	dmn->ste_ctx = mlx5dr_ste_get_ctx(dmn->info.caps.sw_format_ver);
66 	if (!dmn->ste_ctx) {
67 		mlx5dr_err(dmn, "SW Steering on this device is unsupported\n");
68 		return -EOPNOTSUPP;
69 	}
70 
71 	ret = mlx5_core_alloc_pd(dmn->mdev, &dmn->pdn);
72 	if (ret) {
73 		mlx5dr_err(dmn, "Couldn't allocate PD, ret: %d", ret);
74 		return ret;
75 	}
76 
77 	dmn->uar = mlx5_get_uars_page(dmn->mdev);
78 	if (!dmn->uar) {
79 		mlx5dr_err(dmn, "Couldn't allocate UAR\n");
80 		ret = -ENOMEM;
81 		goto clean_pd;
82 	}
83 
84 	dmn->ste_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_STE);
85 	if (!dmn->ste_icm_pool) {
86 		mlx5dr_err(dmn, "Couldn't get icm memory\n");
87 		ret = -ENOMEM;
88 		goto clean_uar;
89 	}
90 
91 	dmn->action_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_MODIFY_ACTION);
92 	if (!dmn->action_icm_pool) {
93 		mlx5dr_err(dmn, "Couldn't get action icm memory\n");
94 		ret = -ENOMEM;
95 		goto free_ste_icm_pool;
96 	}
97 
98 	ret = mlx5dr_send_ring_alloc(dmn);
99 	if (ret) {
100 		mlx5dr_err(dmn, "Couldn't create send-ring\n");
101 		goto free_action_icm_pool;
102 	}
103 
104 	return 0;
105 
106 free_action_icm_pool:
107 	mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
108 free_ste_icm_pool:
109 	mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
110 clean_uar:
111 	mlx5_put_uars_page(dmn->mdev, dmn->uar);
112 clean_pd:
113 	mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
114 
115 	return ret;
116 }
117 
118 static void dr_domain_uninit_resources(struct mlx5dr_domain *dmn)
119 {
120 	mlx5dr_send_ring_free(dmn, dmn->send_ring);
121 	mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
122 	mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
123 	mlx5_put_uars_page(dmn->mdev, dmn->uar);
124 	mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
125 }
126 
127 static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
128 				 u16 vport_number,
129 				 struct mlx5dr_cmd_vport_cap *vport_caps)
130 {
131 	u16 cmd_vport = vport_number;
132 	bool other_vport = true;
133 	int ret;
134 
135 	if (dmn->info.caps.is_ecpf && vport_number == MLX5_VPORT_ECPF) {
136 		other_vport = false;
137 		cmd_vport = 0;
138 	}
139 
140 	ret = mlx5dr_cmd_query_esw_vport_context(dmn->mdev,
141 						 other_vport,
142 						 cmd_vport,
143 						 &vport_caps->icm_address_rx,
144 						 &vport_caps->icm_address_tx);
145 	if (ret)
146 		return ret;
147 
148 	ret = mlx5dr_cmd_query_gvmi(dmn->mdev,
149 				    other_vport,
150 				    cmd_vport,
151 				    &vport_caps->vport_gvmi);
152 	if (ret)
153 		return ret;
154 
155 	vport_caps->num = vport_number;
156 	vport_caps->vhca_gvmi = dmn->info.caps.gvmi;
157 
158 	return 0;
159 }
160 
161 static int dr_domain_query_esw_mngr(struct mlx5dr_domain *dmn)
162 {
163 	return dr_domain_query_vport(dmn,
164 				     dmn->info.caps.is_ecpf ? MLX5_VPORT_ECPF : 0,
165 				     &dmn->info.caps.esw_manager_vport_caps);
166 }
167 
168 static int dr_domain_query_vports(struct mlx5dr_domain *dmn)
169 {
170 	struct mlx5dr_esw_caps *esw_caps = &dmn->info.caps.esw_caps;
171 	struct mlx5dr_cmd_vport_cap *wire_vport;
172 	int vport;
173 	int ret;
174 
175 	ret = dr_domain_query_esw_mngr(dmn);
176 	if (ret)
177 		return ret;
178 
179 	/* Query vports (except wire vport) */
180 	for (vport = 0; vport < dmn->info.caps.num_esw_ports - 1; vport++) {
181 		ret = dr_domain_query_vport(dmn,
182 					    vport,
183 					    &dmn->info.caps.vports_caps[vport]);
184 		if (ret)
185 			return ret;
186 	}
187 
188 	/* Last vport is the wire port */
189 	wire_vport = &dmn->info.caps.vports_caps[vport];
190 	wire_vport->num = MLX5_VPORT_UPLINK;
191 	wire_vport->icm_address_rx = esw_caps->uplink_icm_address_rx;
192 	wire_vport->icm_address_tx = esw_caps->uplink_icm_address_tx;
193 	wire_vport->vport_gvmi = 0;
194 	wire_vport->vhca_gvmi = dmn->info.caps.gvmi;
195 
196 	return 0;
197 }
198 
199 static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
200 				    struct mlx5dr_domain *dmn)
201 {
202 	int ret;
203 
204 	if (!dmn->info.caps.eswitch_manager)
205 		return -EOPNOTSUPP;
206 
207 	ret = mlx5dr_cmd_query_esw_caps(mdev, &dmn->info.caps.esw_caps);
208 	if (ret)
209 		return ret;
210 
211 	dmn->info.caps.fdb_sw_owner = dmn->info.caps.esw_caps.sw_owner;
212 	dmn->info.caps.fdb_sw_owner_v2 = dmn->info.caps.esw_caps.sw_owner_v2;
213 	dmn->info.caps.esw_rx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_rx;
214 	dmn->info.caps.esw_tx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_tx;
215 
216 	dmn->info.caps.vports_caps = kcalloc(dmn->info.caps.num_esw_ports,
217 					     sizeof(dmn->info.caps.vports_caps[0]),
218 					     GFP_KERNEL);
219 	if (!dmn->info.caps.vports_caps)
220 		return -ENOMEM;
221 
222 	ret = dr_domain_query_vports(dmn);
223 	if (ret) {
224 		mlx5dr_err(dmn, "Failed to query vports caps (err: %d)", ret);
225 		goto free_vports_caps;
226 	}
227 
228 	dmn->info.caps.num_vports = dmn->info.caps.num_esw_ports - 1;
229 
230 	return 0;
231 
232 free_vports_caps:
233 	kfree(dmn->info.caps.vports_caps);
234 	dmn->info.caps.vports_caps = NULL;
235 	return ret;
236 }
237 
238 static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
239 			       struct mlx5dr_domain *dmn)
240 {
241 	struct mlx5dr_cmd_vport_cap *vport_cap;
242 	int ret;
243 
244 	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) {
245 		mlx5dr_err(dmn, "Failed to allocate domain, bad link type\n");
246 		return -EOPNOTSUPP;
247 	}
248 
249 	dmn->info.caps.num_esw_ports = mlx5_eswitch_get_total_vports(mdev);
250 
251 	ret = mlx5dr_cmd_query_device(mdev, &dmn->info.caps);
252 	if (ret)
253 		return ret;
254 
255 	ret = dr_domain_query_fdb_caps(mdev, dmn);
256 	if (ret)
257 		return ret;
258 
259 	switch (dmn->type) {
260 	case MLX5DR_DOMAIN_TYPE_NIC_RX:
261 		if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, rx))
262 			return -ENOTSUPP;
263 
264 		dmn->info.supp_sw_steering = true;
265 		dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX;
266 		dmn->info.rx.default_icm_addr = dmn->info.caps.nic_rx_drop_address;
267 		dmn->info.rx.drop_icm_addr = dmn->info.caps.nic_rx_drop_address;
268 		break;
269 	case MLX5DR_DOMAIN_TYPE_NIC_TX:
270 		if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, tx))
271 			return -ENOTSUPP;
272 
273 		dmn->info.supp_sw_steering = true;
274 		dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX;
275 		dmn->info.tx.default_icm_addr = dmn->info.caps.nic_tx_allow_address;
276 		dmn->info.tx.drop_icm_addr = dmn->info.caps.nic_tx_drop_address;
277 		break;
278 	case MLX5DR_DOMAIN_TYPE_FDB:
279 		if (!dmn->info.caps.eswitch_manager)
280 			return -ENOTSUPP;
281 
282 		if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, fdb))
283 			return -ENOTSUPP;
284 
285 		dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX;
286 		dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX;
287 		vport_cap = &dmn->info.caps.esw_manager_vport_caps;
288 
289 		dmn->info.supp_sw_steering = true;
290 		dmn->info.tx.default_icm_addr = vport_cap->icm_address_tx;
291 		dmn->info.rx.default_icm_addr = vport_cap->icm_address_rx;
292 		dmn->info.rx.drop_icm_addr = dmn->info.caps.esw_rx_drop_address;
293 		dmn->info.tx.drop_icm_addr = dmn->info.caps.esw_tx_drop_address;
294 		break;
295 	default:
296 		mlx5dr_err(dmn, "Invalid domain\n");
297 		ret = -EINVAL;
298 		break;
299 	}
300 
301 	return ret;
302 }
303 
304 static void dr_domain_caps_uninit(struct mlx5dr_domain *dmn)
305 {
306 	kfree(dmn->info.caps.vports_caps);
307 }
308 
309 struct mlx5dr_domain *
310 mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
311 {
312 	struct mlx5dr_domain *dmn;
313 	int ret;
314 
315 	if (type > MLX5DR_DOMAIN_TYPE_FDB)
316 		return NULL;
317 
318 	dmn = kzalloc(sizeof(*dmn), GFP_KERNEL);
319 	if (!dmn)
320 		return NULL;
321 
322 	dmn->mdev = mdev;
323 	dmn->type = type;
324 	refcount_set(&dmn->refcount, 1);
325 	mutex_init(&dmn->info.rx.mutex);
326 	mutex_init(&dmn->info.tx.mutex);
327 
328 	if (dr_domain_caps_init(mdev, dmn)) {
329 		mlx5dr_err(dmn, "Failed init domain, no caps\n");
330 		goto free_domain;
331 	}
332 
333 	dmn->info.max_log_action_icm_sz = DR_CHUNK_SIZE_4K;
334 	dmn->info.max_log_sw_icm_sz = min_t(u32, DR_CHUNK_SIZE_1024K,
335 					    dmn->info.caps.log_icm_size);
336 
337 	if (!dmn->info.supp_sw_steering) {
338 		mlx5dr_err(dmn, "SW steering is not supported\n");
339 		goto uninit_caps;
340 	}
341 
342 	/* Allocate resources */
343 	ret = dr_domain_init_resources(dmn);
344 	if (ret) {
345 		mlx5dr_err(dmn, "Failed init domain resources\n");
346 		goto uninit_caps;
347 	}
348 
349 	ret = dr_domain_init_cache(dmn);
350 	if (ret) {
351 		mlx5dr_err(dmn, "Failed initialize domain cache\n");
352 		goto uninit_resourses;
353 	}
354 
355 	return dmn;
356 
357 uninit_resourses:
358 	dr_domain_uninit_resources(dmn);
359 uninit_caps:
360 	dr_domain_caps_uninit(dmn);
361 free_domain:
362 	kfree(dmn);
363 	return NULL;
364 }
365 
366 /* Assure synchronization of the device steering tables with updates made by SW
367  * insertion.
368  */
369 int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags)
370 {
371 	int ret = 0;
372 
373 	if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_SW) {
374 		mlx5dr_domain_lock(dmn);
375 		ret = mlx5dr_send_ring_force_drain(dmn);
376 		mlx5dr_domain_unlock(dmn);
377 		if (ret) {
378 			mlx5dr_err(dmn, "Force drain failed flags: %d, ret: %d\n",
379 				   flags, ret);
380 			return ret;
381 		}
382 	}
383 
384 	if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_HW)
385 		ret = mlx5dr_cmd_sync_steering(dmn->mdev);
386 
387 	return ret;
388 }
389 
390 int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
391 {
392 	if (refcount_read(&dmn->refcount) > 1)
393 		return -EBUSY;
394 
395 	/* make sure resources are not used by the hardware */
396 	mlx5dr_cmd_sync_steering(dmn->mdev);
397 	dr_domain_uninit_cache(dmn);
398 	dr_domain_uninit_resources(dmn);
399 	dr_domain_caps_uninit(dmn);
400 	mutex_destroy(&dmn->info.tx.mutex);
401 	mutex_destroy(&dmn->info.rx.mutex);
402 	kfree(dmn);
403 	return 0;
404 }
405 
406 void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
407 			    struct mlx5dr_domain *peer_dmn)
408 {
409 	mlx5dr_domain_lock(dmn);
410 
411 	if (dmn->peer_dmn)
412 		refcount_dec(&dmn->peer_dmn->refcount);
413 
414 	dmn->peer_dmn = peer_dmn;
415 
416 	if (dmn->peer_dmn)
417 		refcount_inc(&dmn->peer_dmn->refcount);
418 
419 	mlx5dr_domain_unlock(dmn);
420 }
421