1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #include <linux/mlx5/eswitch.h>
5 #include "dr_types.h"
6 
7 #define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type)	\
8 	((dmn)->info.caps.dmn_type##_sw_owner ||	\
9 	 ((dmn)->info.caps.dmn_type##_sw_owner_v2 &&	\
10 	  (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_6DX))
11 
12 static int dr_domain_init_cache(struct mlx5dr_domain *dmn)
13 {
14 	/* Per vport cached FW FT for checksum recalculation, this
15 	 * recalculation is needed due to a HW bug.
16 	 */
17 	dmn->cache.recalc_cs_ft = kcalloc(dmn->info.caps.num_vports,
18 					  sizeof(dmn->cache.recalc_cs_ft[0]),
19 					  GFP_KERNEL);
20 	if (!dmn->cache.recalc_cs_ft)
21 		return -ENOMEM;
22 
23 	return 0;
24 }
25 
26 static void dr_domain_uninit_cache(struct mlx5dr_domain *dmn)
27 {
28 	int i;
29 
30 	for (i = 0; i < dmn->info.caps.num_vports; i++) {
31 		if (!dmn->cache.recalc_cs_ft[i])
32 			continue;
33 
34 		mlx5dr_fw_destroy_recalc_cs_ft(dmn, dmn->cache.recalc_cs_ft[i]);
35 	}
36 
37 	kfree(dmn->cache.recalc_cs_ft);
38 }
39 
40 int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
41 					      u32 vport_num,
42 					      u64 *rx_icm_addr)
43 {
44 	struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
45 
46 	recalc_cs_ft = dmn->cache.recalc_cs_ft[vport_num];
47 	if (!recalc_cs_ft) {
48 		/* Table not in cache, need to allocate a new one */
49 		recalc_cs_ft = mlx5dr_fw_create_recalc_cs_ft(dmn, vport_num);
50 		if (!recalc_cs_ft)
51 			return -EINVAL;
52 
53 		dmn->cache.recalc_cs_ft[vport_num] = recalc_cs_ft;
54 	}
55 
56 	*rx_icm_addr = recalc_cs_ft->rx_icm_addr;
57 
58 	return 0;
59 }
60 
61 static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
62 {
63 	int ret;
64 
65 	dmn->ste_ctx = mlx5dr_ste_get_ctx(dmn->info.caps.sw_format_ver);
66 	if (!dmn->ste_ctx) {
67 		mlx5dr_err(dmn, "SW Steering on this device is unsupported\n");
68 		return -EOPNOTSUPP;
69 	}
70 
71 	ret = mlx5_core_alloc_pd(dmn->mdev, &dmn->pdn);
72 	if (ret) {
73 		mlx5dr_err(dmn, "Couldn't allocate PD, ret: %d", ret);
74 		return ret;
75 	}
76 
77 	dmn->uar = mlx5_get_uars_page(dmn->mdev);
78 	if (!dmn->uar) {
79 		mlx5dr_err(dmn, "Couldn't allocate UAR\n");
80 		ret = -ENOMEM;
81 		goto clean_pd;
82 	}
83 
84 	dmn->ste_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_STE);
85 	if (!dmn->ste_icm_pool) {
86 		mlx5dr_err(dmn, "Couldn't get icm memory\n");
87 		ret = -ENOMEM;
88 		goto clean_uar;
89 	}
90 
91 	dmn->action_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_MODIFY_ACTION);
92 	if (!dmn->action_icm_pool) {
93 		mlx5dr_err(dmn, "Couldn't get action icm memory\n");
94 		ret = -ENOMEM;
95 		goto free_ste_icm_pool;
96 	}
97 
98 	ret = mlx5dr_send_ring_alloc(dmn);
99 	if (ret) {
100 		mlx5dr_err(dmn, "Couldn't create send-ring\n");
101 		goto free_action_icm_pool;
102 	}
103 
104 	return 0;
105 
106 free_action_icm_pool:
107 	mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
108 free_ste_icm_pool:
109 	mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
110 clean_uar:
111 	mlx5_put_uars_page(dmn->mdev, dmn->uar);
112 clean_pd:
113 	mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
114 
115 	return ret;
116 }
117 
118 static void dr_domain_uninit_resources(struct mlx5dr_domain *dmn)
119 {
120 	mlx5dr_send_ring_free(dmn, dmn->send_ring);
121 	mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
122 	mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
123 	mlx5_put_uars_page(dmn->mdev, dmn->uar);
124 	mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
125 }
126 
127 static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
128 				 bool other_vport,
129 				 u16 vport_number)
130 {
131 	struct mlx5dr_cmd_vport_cap *vport_caps;
132 	int ret;
133 
134 	vport_caps = &dmn->info.caps.vports_caps[vport_number];
135 
136 	ret = mlx5dr_cmd_query_esw_vport_context(dmn->mdev,
137 						 other_vport,
138 						 vport_number,
139 						 &vport_caps->icm_address_rx,
140 						 &vport_caps->icm_address_tx);
141 	if (ret)
142 		return ret;
143 
144 	ret = mlx5dr_cmd_query_gvmi(dmn->mdev,
145 				    other_vport,
146 				    vport_number,
147 				    &vport_caps->vport_gvmi);
148 	if (ret)
149 		return ret;
150 
151 	vport_caps->num = vport_number;
152 	vport_caps->vhca_gvmi = dmn->info.caps.gvmi;
153 
154 	return 0;
155 }
156 
157 static int dr_domain_query_vports(struct mlx5dr_domain *dmn)
158 {
159 	struct mlx5dr_esw_caps *esw_caps = &dmn->info.caps.esw_caps;
160 	struct mlx5dr_cmd_vport_cap *wire_vport;
161 	int vport;
162 	int ret;
163 
164 	/* Query vports (except wire vport) */
165 	for (vport = 0; vport < dmn->info.caps.num_esw_ports - 1; vport++) {
166 		ret = dr_domain_query_vport(dmn, !!vport, vport);
167 		if (ret)
168 			return ret;
169 	}
170 
171 	/* Last vport is the wire port */
172 	wire_vport = &dmn->info.caps.vports_caps[vport];
173 	wire_vport->num = WIRE_PORT;
174 	wire_vport->icm_address_rx = esw_caps->uplink_icm_address_rx;
175 	wire_vport->icm_address_tx = esw_caps->uplink_icm_address_tx;
176 	wire_vport->vport_gvmi = 0;
177 	wire_vport->vhca_gvmi = dmn->info.caps.gvmi;
178 
179 	return 0;
180 }
181 
182 static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
183 				    struct mlx5dr_domain *dmn)
184 {
185 	int ret;
186 
187 	if (!dmn->info.caps.eswitch_manager)
188 		return -EOPNOTSUPP;
189 
190 	ret = mlx5dr_cmd_query_esw_caps(mdev, &dmn->info.caps.esw_caps);
191 	if (ret)
192 		return ret;
193 
194 	dmn->info.caps.fdb_sw_owner = dmn->info.caps.esw_caps.sw_owner;
195 	dmn->info.caps.fdb_sw_owner_v2 = dmn->info.caps.esw_caps.sw_owner_v2;
196 	dmn->info.caps.esw_rx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_rx;
197 	dmn->info.caps.esw_tx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_tx;
198 
199 	dmn->info.caps.vports_caps = kcalloc(dmn->info.caps.num_esw_ports,
200 					     sizeof(dmn->info.caps.vports_caps[0]),
201 					     GFP_KERNEL);
202 	if (!dmn->info.caps.vports_caps)
203 		return -ENOMEM;
204 
205 	ret = dr_domain_query_vports(dmn);
206 	if (ret) {
207 		mlx5dr_err(dmn, "Failed to query vports caps (err: %d)", ret);
208 		goto free_vports_caps;
209 	}
210 
211 	dmn->info.caps.num_vports = dmn->info.caps.num_esw_ports - 1;
212 
213 	return 0;
214 
215 free_vports_caps:
216 	kfree(dmn->info.caps.vports_caps);
217 	dmn->info.caps.vports_caps = NULL;
218 	return ret;
219 }
220 
221 static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
222 			       struct mlx5dr_domain *dmn)
223 {
224 	struct mlx5dr_cmd_vport_cap *vport_cap;
225 	int ret;
226 
227 	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) {
228 		mlx5dr_err(dmn, "Failed to allocate domain, bad link type\n");
229 		return -EOPNOTSUPP;
230 	}
231 
232 	dmn->info.caps.num_esw_ports = mlx5_eswitch_get_total_vports(mdev);
233 
234 	ret = mlx5dr_cmd_query_device(mdev, &dmn->info.caps);
235 	if (ret)
236 		return ret;
237 
238 	ret = dr_domain_query_fdb_caps(mdev, dmn);
239 	if (ret)
240 		return ret;
241 
242 	switch (dmn->type) {
243 	case MLX5DR_DOMAIN_TYPE_NIC_RX:
244 		if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, rx))
245 			return -ENOTSUPP;
246 
247 		dmn->info.supp_sw_steering = true;
248 		dmn->info.rx.ste_type = MLX5DR_STE_TYPE_RX;
249 		dmn->info.rx.default_icm_addr = dmn->info.caps.nic_rx_drop_address;
250 		dmn->info.rx.drop_icm_addr = dmn->info.caps.nic_rx_drop_address;
251 		break;
252 	case MLX5DR_DOMAIN_TYPE_NIC_TX:
253 		if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, tx))
254 			return -ENOTSUPP;
255 
256 		dmn->info.supp_sw_steering = true;
257 		dmn->info.tx.ste_type = MLX5DR_STE_TYPE_TX;
258 		dmn->info.tx.default_icm_addr = dmn->info.caps.nic_tx_allow_address;
259 		dmn->info.tx.drop_icm_addr = dmn->info.caps.nic_tx_drop_address;
260 		break;
261 	case MLX5DR_DOMAIN_TYPE_FDB:
262 		if (!dmn->info.caps.eswitch_manager)
263 			return -ENOTSUPP;
264 
265 		if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, fdb))
266 			return -ENOTSUPP;
267 
268 		dmn->info.rx.ste_type = MLX5DR_STE_TYPE_RX;
269 		dmn->info.tx.ste_type = MLX5DR_STE_TYPE_TX;
270 		vport_cap = mlx5dr_get_vport_cap(&dmn->info.caps, 0);
271 		if (!vport_cap) {
272 			mlx5dr_err(dmn, "Failed to get esw manager vport\n");
273 			return -ENOENT;
274 		}
275 
276 		dmn->info.supp_sw_steering = true;
277 		dmn->info.tx.default_icm_addr = vport_cap->icm_address_tx;
278 		dmn->info.rx.default_icm_addr = vport_cap->icm_address_rx;
279 		dmn->info.rx.drop_icm_addr = dmn->info.caps.esw_rx_drop_address;
280 		dmn->info.tx.drop_icm_addr = dmn->info.caps.esw_tx_drop_address;
281 		break;
282 	default:
283 		mlx5dr_err(dmn, "Invalid domain\n");
284 		ret = -EINVAL;
285 		break;
286 	}
287 
288 	return ret;
289 }
290 
291 static void dr_domain_caps_uninit(struct mlx5dr_domain *dmn)
292 {
293 	kfree(dmn->info.caps.vports_caps);
294 }
295 
296 struct mlx5dr_domain *
297 mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
298 {
299 	struct mlx5dr_domain *dmn;
300 	int ret;
301 
302 	if (type > MLX5DR_DOMAIN_TYPE_FDB)
303 		return NULL;
304 
305 	dmn = kzalloc(sizeof(*dmn), GFP_KERNEL);
306 	if (!dmn)
307 		return NULL;
308 
309 	dmn->mdev = mdev;
310 	dmn->type = type;
311 	refcount_set(&dmn->refcount, 1);
312 	mutex_init(&dmn->info.rx.mutex);
313 	mutex_init(&dmn->info.tx.mutex);
314 
315 	if (dr_domain_caps_init(mdev, dmn)) {
316 		mlx5dr_err(dmn, "Failed init domain, no caps\n");
317 		goto free_domain;
318 	}
319 
320 	dmn->info.max_log_action_icm_sz = DR_CHUNK_SIZE_4K;
321 	dmn->info.max_log_sw_icm_sz = min_t(u32, DR_CHUNK_SIZE_1024K,
322 					    dmn->info.caps.log_icm_size);
323 
324 	if (!dmn->info.supp_sw_steering) {
325 		mlx5dr_err(dmn, "SW steering is not supported\n");
326 		goto uninit_caps;
327 	}
328 
329 	/* Allocate resources */
330 	ret = dr_domain_init_resources(dmn);
331 	if (ret) {
332 		mlx5dr_err(dmn, "Failed init domain resources\n");
333 		goto uninit_caps;
334 	}
335 
336 	ret = dr_domain_init_cache(dmn);
337 	if (ret) {
338 		mlx5dr_err(dmn, "Failed initialize domain cache\n");
339 		goto uninit_resourses;
340 	}
341 
342 	return dmn;
343 
344 uninit_resourses:
345 	dr_domain_uninit_resources(dmn);
346 uninit_caps:
347 	dr_domain_caps_uninit(dmn);
348 free_domain:
349 	kfree(dmn);
350 	return NULL;
351 }
352 
353 /* Assure synchronization of the device steering tables with updates made by SW
354  * insertion.
355  */
356 int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags)
357 {
358 	int ret = 0;
359 
360 	if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_SW) {
361 		mlx5dr_domain_lock(dmn);
362 		ret = mlx5dr_send_ring_force_drain(dmn);
363 		mlx5dr_domain_unlock(dmn);
364 		if (ret) {
365 			mlx5dr_err(dmn, "Force drain failed flags: %d, ret: %d\n",
366 				   flags, ret);
367 			return ret;
368 		}
369 	}
370 
371 	if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_HW)
372 		ret = mlx5dr_cmd_sync_steering(dmn->mdev);
373 
374 	return ret;
375 }
376 
377 int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
378 {
379 	if (refcount_read(&dmn->refcount) > 1)
380 		return -EBUSY;
381 
382 	/* make sure resources are not used by the hardware */
383 	mlx5dr_cmd_sync_steering(dmn->mdev);
384 	dr_domain_uninit_cache(dmn);
385 	dr_domain_uninit_resources(dmn);
386 	dr_domain_caps_uninit(dmn);
387 	mutex_destroy(&dmn->info.tx.mutex);
388 	mutex_destroy(&dmn->info.rx.mutex);
389 	kfree(dmn);
390 	return 0;
391 }
392 
393 void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
394 			    struct mlx5dr_domain *peer_dmn)
395 {
396 	mlx5dr_domain_lock(dmn);
397 
398 	if (dmn->peer_dmn)
399 		refcount_dec(&dmn->peer_dmn->refcount);
400 
401 	dmn->peer_dmn = peer_dmn;
402 
403 	if (dmn->peer_dmn)
404 		refcount_inc(&dmn->peer_dmn->refcount);
405 
406 	mlx5dr_domain_unlock(dmn);
407 }
408