1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #include <linux/mlx5/eswitch.h>
5 #include "dr_types.h"
6 
7 static int dr_domain_init_cache(struct mlx5dr_domain *dmn)
8 {
9 	/* Per vport cached FW FT for checksum recalculation, this
10 	 * recalculation is needed due to a HW bug.
11 	 */
12 	dmn->cache.recalc_cs_ft = kcalloc(dmn->info.caps.num_vports,
13 					  sizeof(dmn->cache.recalc_cs_ft[0]),
14 					  GFP_KERNEL);
15 	if (!dmn->cache.recalc_cs_ft)
16 		return -ENOMEM;
17 
18 	return 0;
19 }
20 
21 static void dr_domain_uninit_cache(struct mlx5dr_domain *dmn)
22 {
23 	int i;
24 
25 	for (i = 0; i < dmn->info.caps.num_vports; i++) {
26 		if (!dmn->cache.recalc_cs_ft[i])
27 			continue;
28 
29 		mlx5dr_fw_destroy_recalc_cs_ft(dmn, dmn->cache.recalc_cs_ft[i]);
30 	}
31 
32 	kfree(dmn->cache.recalc_cs_ft);
33 }
34 
35 int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
36 					      u32 vport_num,
37 					      u64 *rx_icm_addr)
38 {
39 	struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
40 
41 	recalc_cs_ft = dmn->cache.recalc_cs_ft[vport_num];
42 	if (!recalc_cs_ft) {
43 		/* Table not in cache, need to allocate a new one */
44 		recalc_cs_ft = mlx5dr_fw_create_recalc_cs_ft(dmn, vport_num);
45 		if (!recalc_cs_ft)
46 			return -EINVAL;
47 
48 		dmn->cache.recalc_cs_ft[vport_num] = recalc_cs_ft;
49 	}
50 
51 	*rx_icm_addr = recalc_cs_ft->rx_icm_addr;
52 
53 	return 0;
54 }
55 
56 static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
57 {
58 	int ret;
59 
60 	ret = mlx5_core_alloc_pd(dmn->mdev, &dmn->pdn);
61 	if (ret) {
62 		mlx5dr_err(dmn, "Couldn't allocate PD, ret: %d", ret);
63 		return ret;
64 	}
65 
66 	dmn->uar = mlx5_get_uars_page(dmn->mdev);
67 	if (!dmn->uar) {
68 		mlx5dr_err(dmn, "Couldn't allocate UAR\n");
69 		ret = -ENOMEM;
70 		goto clean_pd;
71 	}
72 
73 	dmn->ste_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_STE);
74 	if (!dmn->ste_icm_pool) {
75 		mlx5dr_err(dmn, "Couldn't get icm memory\n");
76 		ret = -ENOMEM;
77 		goto clean_uar;
78 	}
79 
80 	dmn->action_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_MODIFY_ACTION);
81 	if (!dmn->action_icm_pool) {
82 		mlx5dr_err(dmn, "Couldn't get action icm memory\n");
83 		ret = -ENOMEM;
84 		goto free_ste_icm_pool;
85 	}
86 
87 	ret = mlx5dr_send_ring_alloc(dmn);
88 	if (ret) {
89 		mlx5dr_err(dmn, "Couldn't create send-ring\n");
90 		goto free_action_icm_pool;
91 	}
92 
93 	return 0;
94 
95 free_action_icm_pool:
96 	mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
97 free_ste_icm_pool:
98 	mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
99 clean_uar:
100 	mlx5_put_uars_page(dmn->mdev, dmn->uar);
101 clean_pd:
102 	mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
103 
104 	return ret;
105 }
106 
107 static void dr_domain_uninit_resources(struct mlx5dr_domain *dmn)
108 {
109 	mlx5dr_send_ring_free(dmn, dmn->send_ring);
110 	mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
111 	mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
112 	mlx5_put_uars_page(dmn->mdev, dmn->uar);
113 	mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
114 }
115 
116 static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
117 				 bool other_vport,
118 				 u16 vport_number)
119 {
120 	struct mlx5dr_cmd_vport_cap *vport_caps;
121 	int ret;
122 
123 	vport_caps = &dmn->info.caps.vports_caps[vport_number];
124 
125 	ret = mlx5dr_cmd_query_esw_vport_context(dmn->mdev,
126 						 other_vport,
127 						 vport_number,
128 						 &vport_caps->icm_address_rx,
129 						 &vport_caps->icm_address_tx);
130 	if (ret)
131 		return ret;
132 
133 	ret = mlx5dr_cmd_query_gvmi(dmn->mdev,
134 				    other_vport,
135 				    vport_number,
136 				    &vport_caps->vport_gvmi);
137 	if (ret)
138 		return ret;
139 
140 	vport_caps->num = vport_number;
141 	vport_caps->vhca_gvmi = dmn->info.caps.gvmi;
142 
143 	return 0;
144 }
145 
146 static int dr_domain_query_vports(struct mlx5dr_domain *dmn)
147 {
148 	struct mlx5dr_esw_caps *esw_caps = &dmn->info.caps.esw_caps;
149 	struct mlx5dr_cmd_vport_cap *wire_vport;
150 	int vport;
151 	int ret;
152 
153 	/* Query vports (except wire vport) */
154 	for (vport = 0; vport < dmn->info.caps.num_esw_ports - 1; vport++) {
155 		ret = dr_domain_query_vport(dmn, !!vport, vport);
156 		if (ret)
157 			return ret;
158 	}
159 
160 	/* Last vport is the wire port */
161 	wire_vport = &dmn->info.caps.vports_caps[vport];
162 	wire_vport->num = WIRE_PORT;
163 	wire_vport->icm_address_rx = esw_caps->uplink_icm_address_rx;
164 	wire_vport->icm_address_tx = esw_caps->uplink_icm_address_tx;
165 	wire_vport->vport_gvmi = 0;
166 	wire_vport->vhca_gvmi = dmn->info.caps.gvmi;
167 
168 	return 0;
169 }
170 
171 static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
172 				    struct mlx5dr_domain *dmn)
173 {
174 	int ret;
175 
176 	if (!dmn->info.caps.eswitch_manager)
177 		return -EOPNOTSUPP;
178 
179 	ret = mlx5dr_cmd_query_esw_caps(mdev, &dmn->info.caps.esw_caps);
180 	if (ret)
181 		return ret;
182 
183 	dmn->info.caps.fdb_sw_owner = dmn->info.caps.esw_caps.sw_owner;
184 	dmn->info.caps.esw_rx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_rx;
185 	dmn->info.caps.esw_tx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_tx;
186 
187 	dmn->info.caps.vports_caps = kcalloc(dmn->info.caps.num_esw_ports,
188 					     sizeof(dmn->info.caps.vports_caps[0]),
189 					     GFP_KERNEL);
190 	if (!dmn->info.caps.vports_caps)
191 		return -ENOMEM;
192 
193 	ret = dr_domain_query_vports(dmn);
194 	if (ret) {
195 		mlx5dr_err(dmn, "Failed to query vports caps (err: %d)", ret);
196 		goto free_vports_caps;
197 	}
198 
199 	dmn->info.caps.num_vports = dmn->info.caps.num_esw_ports - 1;
200 
201 	return 0;
202 
203 free_vports_caps:
204 	kfree(dmn->info.caps.vports_caps);
205 	dmn->info.caps.vports_caps = NULL;
206 	return ret;
207 }
208 
209 static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
210 			       struct mlx5dr_domain *dmn)
211 {
212 	struct mlx5dr_cmd_vport_cap *vport_cap;
213 	int ret;
214 
215 	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) {
216 		mlx5dr_err(dmn, "Failed to allocate domain, bad link type\n");
217 		return -EOPNOTSUPP;
218 	}
219 
220 	dmn->info.caps.num_esw_ports = mlx5_eswitch_get_total_vports(mdev);
221 
222 	ret = mlx5dr_cmd_query_device(mdev, &dmn->info.caps);
223 	if (ret)
224 		return ret;
225 
226 	if (dmn->info.caps.sw_format_ver != MLX5_STEERING_FORMAT_CONNECTX_5) {
227 		mlx5dr_err(dmn, "SW steering is not supported on this device\n");
228 		return -EOPNOTSUPP;
229 	}
230 
231 	ret = dr_domain_query_fdb_caps(mdev, dmn);
232 	if (ret)
233 		return ret;
234 
235 	switch (dmn->type) {
236 	case MLX5DR_DOMAIN_TYPE_NIC_RX:
237 		if (!dmn->info.caps.rx_sw_owner)
238 			return -ENOTSUPP;
239 
240 		dmn->info.supp_sw_steering = true;
241 		dmn->info.rx.ste_type = MLX5DR_STE_TYPE_RX;
242 		dmn->info.rx.default_icm_addr = dmn->info.caps.nic_rx_drop_address;
243 		dmn->info.rx.drop_icm_addr = dmn->info.caps.nic_rx_drop_address;
244 		break;
245 	case MLX5DR_DOMAIN_TYPE_NIC_TX:
246 		if (!dmn->info.caps.tx_sw_owner)
247 			return -ENOTSUPP;
248 
249 		dmn->info.supp_sw_steering = true;
250 		dmn->info.tx.ste_type = MLX5DR_STE_TYPE_TX;
251 		dmn->info.tx.default_icm_addr = dmn->info.caps.nic_tx_allow_address;
252 		dmn->info.tx.drop_icm_addr = dmn->info.caps.nic_tx_drop_address;
253 		break;
254 	case MLX5DR_DOMAIN_TYPE_FDB:
255 		if (!dmn->info.caps.eswitch_manager)
256 			return -ENOTSUPP;
257 
258 		if (!dmn->info.caps.fdb_sw_owner)
259 			return -ENOTSUPP;
260 
261 		dmn->info.rx.ste_type = MLX5DR_STE_TYPE_RX;
262 		dmn->info.tx.ste_type = MLX5DR_STE_TYPE_TX;
263 		vport_cap = mlx5dr_get_vport_cap(&dmn->info.caps, 0);
264 		if (!vport_cap) {
265 			mlx5dr_err(dmn, "Failed to get esw manager vport\n");
266 			return -ENOENT;
267 		}
268 
269 		dmn->info.supp_sw_steering = true;
270 		dmn->info.tx.default_icm_addr = vport_cap->icm_address_tx;
271 		dmn->info.rx.default_icm_addr = vport_cap->icm_address_rx;
272 		dmn->info.rx.drop_icm_addr = dmn->info.caps.esw_rx_drop_address;
273 		dmn->info.tx.drop_icm_addr = dmn->info.caps.esw_tx_drop_address;
274 		break;
275 	default:
276 		mlx5dr_err(dmn, "Invalid domain\n");
277 		ret = -EINVAL;
278 		break;
279 	}
280 
281 	return ret;
282 }
283 
284 static void dr_domain_caps_uninit(struct mlx5dr_domain *dmn)
285 {
286 	kfree(dmn->info.caps.vports_caps);
287 }
288 
289 struct mlx5dr_domain *
290 mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
291 {
292 	struct mlx5dr_domain *dmn;
293 	int ret;
294 
295 	if (type > MLX5DR_DOMAIN_TYPE_FDB)
296 		return NULL;
297 
298 	dmn = kzalloc(sizeof(*dmn), GFP_KERNEL);
299 	if (!dmn)
300 		return NULL;
301 
302 	dmn->mdev = mdev;
303 	dmn->type = type;
304 	refcount_set(&dmn->refcount, 1);
305 	mutex_init(&dmn->info.rx.mutex);
306 	mutex_init(&dmn->info.tx.mutex);
307 
308 	if (dr_domain_caps_init(mdev, dmn)) {
309 		mlx5dr_err(dmn, "Failed init domain, no caps\n");
310 		goto free_domain;
311 	}
312 
313 	dmn->info.max_log_action_icm_sz = DR_CHUNK_SIZE_4K;
314 	dmn->info.max_log_sw_icm_sz = min_t(u32, DR_CHUNK_SIZE_1024K,
315 					    dmn->info.caps.log_icm_size);
316 
317 	if (!dmn->info.supp_sw_steering) {
318 		mlx5dr_err(dmn, "SW steering is not supported\n");
319 		goto uninit_caps;
320 	}
321 
322 	/* Allocate resources */
323 	ret = dr_domain_init_resources(dmn);
324 	if (ret) {
325 		mlx5dr_err(dmn, "Failed init domain resources\n");
326 		goto uninit_caps;
327 	}
328 
329 	ret = dr_domain_init_cache(dmn);
330 	if (ret) {
331 		mlx5dr_err(dmn, "Failed initialize domain cache\n");
332 		goto uninit_resourses;
333 	}
334 
335 	return dmn;
336 
337 uninit_resourses:
338 	dr_domain_uninit_resources(dmn);
339 uninit_caps:
340 	dr_domain_caps_uninit(dmn);
341 free_domain:
342 	kfree(dmn);
343 	return NULL;
344 }
345 
346 /* Assure synchronization of the device steering tables with updates made by SW
347  * insertion.
348  */
349 int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags)
350 {
351 	int ret = 0;
352 
353 	if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_SW) {
354 		mlx5dr_domain_lock(dmn);
355 		ret = mlx5dr_send_ring_force_drain(dmn);
356 		mlx5dr_domain_unlock(dmn);
357 		if (ret) {
358 			mlx5dr_err(dmn, "Force drain failed flags: %d, ret: %d\n",
359 				   flags, ret);
360 			return ret;
361 		}
362 	}
363 
364 	if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_HW)
365 		ret = mlx5dr_cmd_sync_steering(dmn->mdev);
366 
367 	return ret;
368 }
369 
370 int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
371 {
372 	if (refcount_read(&dmn->refcount) > 1)
373 		return -EBUSY;
374 
375 	/* make sure resources are not used by the hardware */
376 	mlx5dr_cmd_sync_steering(dmn->mdev);
377 	dr_domain_uninit_cache(dmn);
378 	dr_domain_uninit_resources(dmn);
379 	dr_domain_caps_uninit(dmn);
380 	mutex_destroy(&dmn->info.tx.mutex);
381 	mutex_destroy(&dmn->info.rx.mutex);
382 	kfree(dmn);
383 	return 0;
384 }
385 
386 void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
387 			    struct mlx5dr_domain *peer_dmn)
388 {
389 	mlx5dr_domain_lock(dmn);
390 
391 	if (dmn->peer_dmn)
392 		refcount_dec(&dmn->peer_dmn->refcount);
393 
394 	dmn->peer_dmn = peer_dmn;
395 
396 	if (dmn->peer_dmn)
397 		refcount_inc(&dmn->peer_dmn->refcount);
398 
399 	mlx5dr_domain_unlock(dmn);
400 }
401