1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/mlx5/driver.h>
34 #include <linux/mlx5/eswitch.h>
35 #include <linux/mlx5/mlx5_ifc_vdpa.h>
36 #include "mlx5_core.h"
37 
38 /* intf dev list mutex */
39 static DEFINE_MUTEX(mlx5_intf_mutex);
40 static DEFINE_IDA(mlx5_adev_ida);
41 
42 static bool is_eth_rep_supported(struct mlx5_core_dev *dev)
43 {
44 	if (!IS_ENABLED(CONFIG_MLX5_ESWITCH))
45 		return false;
46 
47 	if (!MLX5_ESWITCH_MANAGER(dev))
48 		return false;
49 
50 	if (!is_mdev_switchdev_mode(dev))
51 		return false;
52 
53 	return true;
54 }
55 
56 static bool is_eth_supported(struct mlx5_core_dev *dev)
57 {
58 	if (!IS_ENABLED(CONFIG_MLX5_CORE_EN))
59 		return false;
60 
61 	if (is_eth_rep_supported(dev))
62 		return false;
63 
64 	if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
65 		return false;
66 
67 	if (!MLX5_CAP_GEN(dev, eth_net_offloads)) {
68 		mlx5_core_warn(dev, "Missing eth_net_offloads capability\n");
69 		return false;
70 	}
71 
72 	if (!MLX5_CAP_GEN(dev, nic_flow_table)) {
73 		mlx5_core_warn(dev, "Missing nic_flow_table capability\n");
74 		return false;
75 	}
76 
77 	if (!MLX5_CAP_ETH(dev, csum_cap)) {
78 		mlx5_core_warn(dev, "Missing csum_cap capability\n");
79 		return false;
80 	}
81 
82 	if (!MLX5_CAP_ETH(dev, max_lso_cap)) {
83 		mlx5_core_warn(dev, "Missing max_lso_cap capability\n");
84 		return false;
85 	}
86 
87 	if (!MLX5_CAP_ETH(dev, vlan_cap)) {
88 		mlx5_core_warn(dev, "Missing vlan_cap capability\n");
89 		return false;
90 	}
91 
92 	if (!MLX5_CAP_ETH(dev, rss_ind_tbl_cap)) {
93 		mlx5_core_warn(dev, "Missing rss_ind_tbl_cap capability\n");
94 		return false;
95 	}
96 
97 	if (MLX5_CAP_FLOWTABLE(dev,
98 			       flow_table_properties_nic_receive.max_ft_level) < 3) {
99 		mlx5_core_warn(dev, "max_ft_level < 3\n");
100 		return false;
101 	}
102 
103 	if (!MLX5_CAP_ETH(dev, self_lb_en_modifiable))
104 		mlx5_core_warn(dev, "Self loop back prevention is not supported\n");
105 	if (!MLX5_CAP_GEN(dev, cq_moderation))
106 		mlx5_core_warn(dev, "CQ moderation is not supported\n");
107 
108 	return true;
109 }
110 
111 static bool is_vnet_supported(struct mlx5_core_dev *dev)
112 {
113 	if (!IS_ENABLED(CONFIG_MLX5_VDPA_NET))
114 		return false;
115 
116 	if (mlx5_core_is_pf(dev))
117 		return false;
118 
119 	if (!(MLX5_CAP_GEN_64(dev, general_obj_types) &
120 	      MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q))
121 		return false;
122 
123 	if (!(MLX5_CAP_DEV_VDPA_EMULATION(dev, event_mode) &
124 	      MLX5_VIRTIO_Q_EVENT_MODE_QP_MODE))
125 		return false;
126 
127 	if (!MLX5_CAP_DEV_VDPA_EMULATION(dev, eth_frame_offload_type))
128 		return false;
129 
130 	return true;
131 }
132 
133 static bool is_ib_rep_supported(struct mlx5_core_dev *dev)
134 {
135 	if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
136 		return false;
137 
138 	if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV)
139 		return false;
140 
141 	if (!is_eth_rep_supported(dev))
142 		return false;
143 
144 	if (!MLX5_ESWITCH_MANAGER(dev))
145 		return false;
146 
147 	if (!is_mdev_switchdev_mode(dev))
148 		return false;
149 
150 	if (mlx5_core_mp_enabled(dev))
151 		return false;
152 
153 	return true;
154 }
155 
156 static bool is_mp_supported(struct mlx5_core_dev *dev)
157 {
158 	if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
159 		return false;
160 
161 	if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV)
162 		return false;
163 
164 	if (is_ib_rep_supported(dev))
165 		return false;
166 
167 	if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
168 		return false;
169 
170 	if (!mlx5_core_is_mp_slave(dev))
171 		return false;
172 
173 	return true;
174 }
175 
176 static bool is_ib_supported(struct mlx5_core_dev *dev)
177 {
178 	if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
179 		return false;
180 
181 	if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV)
182 		return false;
183 
184 	if (is_ib_rep_supported(dev))
185 		return false;
186 
187 	if (is_mp_supported(dev))
188 		return false;
189 
190 	return true;
191 }
192 
193 enum {
194 	MLX5_INTERFACE_PROTOCOL_ETH_REP,
195 	MLX5_INTERFACE_PROTOCOL_ETH,
196 
197 	MLX5_INTERFACE_PROTOCOL_IB_REP,
198 	MLX5_INTERFACE_PROTOCOL_MPIB,
199 	MLX5_INTERFACE_PROTOCOL_IB,
200 
201 	MLX5_INTERFACE_PROTOCOL_VNET,
202 };
203 
204 static const struct mlx5_adev_device {
205 	const char *suffix;
206 	bool (*is_supported)(struct mlx5_core_dev *dev);
207 } mlx5_adev_devices[] = {
208 	[MLX5_INTERFACE_PROTOCOL_VNET] = { .suffix = "vnet",
209 					   .is_supported = &is_vnet_supported },
210 	[MLX5_INTERFACE_PROTOCOL_IB] = { .suffix = "rdma",
211 					 .is_supported = &is_ib_supported },
212 	[MLX5_INTERFACE_PROTOCOL_ETH] = { .suffix = "eth",
213 					  .is_supported = &is_eth_supported },
214 	[MLX5_INTERFACE_PROTOCOL_ETH_REP] = { .suffix = "eth-rep",
215 					   .is_supported = &is_eth_rep_supported },
216 	[MLX5_INTERFACE_PROTOCOL_IB_REP] = { .suffix = "rdma-rep",
217 					   .is_supported = &is_ib_rep_supported },
218 	[MLX5_INTERFACE_PROTOCOL_MPIB] = { .suffix = "multiport",
219 					   .is_supported = &is_mp_supported },
220 };
221 
222 int mlx5_adev_idx_alloc(void)
223 {
224 	return ida_alloc(&mlx5_adev_ida, GFP_KERNEL);
225 }
226 
227 void mlx5_adev_idx_free(int idx)
228 {
229 	ida_free(&mlx5_adev_ida, idx);
230 }
231 
232 int mlx5_adev_init(struct mlx5_core_dev *dev)
233 {
234 	struct mlx5_priv *priv = &dev->priv;
235 
236 	priv->adev = kcalloc(ARRAY_SIZE(mlx5_adev_devices),
237 			     sizeof(struct mlx5_adev *), GFP_KERNEL);
238 	if (!priv->adev)
239 		return -ENOMEM;
240 
241 	return 0;
242 }
243 
244 void mlx5_adev_cleanup(struct mlx5_core_dev *dev)
245 {
246 	struct mlx5_priv *priv = &dev->priv;
247 
248 	kfree(priv->adev);
249 }
250 
251 static void adev_release(struct device *dev)
252 {
253 	struct mlx5_adev *mlx5_adev =
254 		container_of(dev, struct mlx5_adev, adev.dev);
255 	struct mlx5_priv *priv = &mlx5_adev->mdev->priv;
256 	int idx = mlx5_adev->idx;
257 
258 	kfree(mlx5_adev);
259 	priv->adev[idx] = NULL;
260 }
261 
262 static struct mlx5_adev *add_adev(struct mlx5_core_dev *dev, int idx)
263 {
264 	const char *suffix = mlx5_adev_devices[idx].suffix;
265 	struct auxiliary_device *adev;
266 	struct mlx5_adev *madev;
267 	int ret;
268 
269 	madev = kzalloc(sizeof(*madev), GFP_KERNEL);
270 	if (!madev)
271 		return ERR_PTR(-ENOMEM);
272 
273 	adev = &madev->adev;
274 	adev->id = dev->priv.adev_idx;
275 	adev->name = suffix;
276 	adev->dev.parent = dev->device;
277 	adev->dev.release = adev_release;
278 	madev->mdev = dev;
279 	madev->idx = idx;
280 
281 	ret = auxiliary_device_init(adev);
282 	if (ret) {
283 		kfree(madev);
284 		return ERR_PTR(ret);
285 	}
286 
287 	ret = auxiliary_device_add(adev);
288 	if (ret) {
289 		auxiliary_device_uninit(adev);
290 		return ERR_PTR(ret);
291 	}
292 	return madev;
293 }
294 
295 static void del_adev(struct auxiliary_device *adev)
296 {
297 	auxiliary_device_delete(adev);
298 	auxiliary_device_uninit(adev);
299 }
300 
301 int mlx5_attach_device(struct mlx5_core_dev *dev)
302 {
303 	struct mlx5_priv *priv = &dev->priv;
304 	struct auxiliary_device *adev;
305 	struct auxiliary_driver *adrv;
306 	int ret = 0, i;
307 
308 	mutex_lock(&mlx5_intf_mutex);
309 	for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
310 		if (!priv->adev[i]) {
311 			bool is_supported = false;
312 
313 			if (mlx5_adev_devices[i].is_supported)
314 				is_supported = mlx5_adev_devices[i].is_supported(dev);
315 
316 			if (!is_supported)
317 				continue;
318 
319 			priv->adev[i] = add_adev(dev, i);
320 			if (IS_ERR(priv->adev[i])) {
321 				ret = PTR_ERR(priv->adev[i]);
322 				priv->adev[i] = NULL;
323 			}
324 		} else {
325 			adev = &priv->adev[i]->adev;
326 			adrv = to_auxiliary_drv(adev->dev.driver);
327 
328 			if (adrv->resume)
329 				ret = adrv->resume(adev);
330 		}
331 		if (ret) {
332 			mlx5_core_warn(dev, "Device[%d] (%s) failed to load\n",
333 				       i, mlx5_adev_devices[i].suffix);
334 
335 			break;
336 		}
337 	}
338 	mutex_unlock(&mlx5_intf_mutex);
339 	return ret;
340 }
341 
342 void mlx5_detach_device(struct mlx5_core_dev *dev)
343 {
344 	struct mlx5_priv *priv = &dev->priv;
345 	struct auxiliary_device *adev;
346 	struct auxiliary_driver *adrv;
347 	pm_message_t pm = {};
348 	int i;
349 
350 	mutex_lock(&mlx5_intf_mutex);
351 	for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) {
352 		if (!priv->adev[i])
353 			continue;
354 
355 		adev = &priv->adev[i]->adev;
356 		adrv = to_auxiliary_drv(adev->dev.driver);
357 
358 		if (adrv->suspend) {
359 			adrv->suspend(adev, pm);
360 			continue;
361 		}
362 
363 		del_adev(&priv->adev[i]->adev);
364 		priv->adev[i] = NULL;
365 	}
366 	mutex_unlock(&mlx5_intf_mutex);
367 }
368 
369 int mlx5_register_device(struct mlx5_core_dev *dev)
370 {
371 	int ret;
372 
373 	mutex_lock(&mlx5_intf_mutex);
374 	dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
375 	ret = mlx5_rescan_drivers_locked(dev);
376 	mutex_unlock(&mlx5_intf_mutex);
377 	if (ret)
378 		mlx5_unregister_device(dev);
379 
380 	return ret;
381 }
382 
383 void mlx5_unregister_device(struct mlx5_core_dev *dev)
384 {
385 	mutex_lock(&mlx5_intf_mutex);
386 	dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
387 	mlx5_rescan_drivers_locked(dev);
388 	mutex_unlock(&mlx5_intf_mutex);
389 }
390 
391 static int add_drivers(struct mlx5_core_dev *dev)
392 {
393 	struct mlx5_priv *priv = &dev->priv;
394 	int i, ret = 0;
395 
396 	for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
397 		bool is_supported = false;
398 
399 		if (priv->adev[i])
400 			continue;
401 
402 		if (mlx5_adev_devices[i].is_supported)
403 			is_supported = mlx5_adev_devices[i].is_supported(dev);
404 
405 		if (!is_supported)
406 			continue;
407 
408 		priv->adev[i] = add_adev(dev, i);
409 		if (IS_ERR(priv->adev[i])) {
410 			mlx5_core_warn(dev, "Device[%d] (%s) failed to load\n",
411 				       i, mlx5_adev_devices[i].suffix);
412 			/* We continue to rescan drivers and leave to the caller
413 			 * to make decision if to release everything or continue.
414 			 */
415 			ret = PTR_ERR(priv->adev[i]);
416 			priv->adev[i] = NULL;
417 		}
418 	}
419 	return ret;
420 }
421 
422 static void delete_drivers(struct mlx5_core_dev *dev)
423 {
424 	struct mlx5_priv *priv = &dev->priv;
425 	bool delete_all;
426 	int i;
427 
428 	delete_all = priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
429 
430 	for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) {
431 		bool is_supported = false;
432 
433 		if (!priv->adev[i])
434 			continue;
435 
436 		if (mlx5_adev_devices[i].is_supported && !delete_all)
437 			is_supported = mlx5_adev_devices[i].is_supported(dev);
438 
439 		if (is_supported)
440 			continue;
441 
442 		del_adev(&priv->adev[i]->adev);
443 		priv->adev[i] = NULL;
444 	}
445 }
446 
447 /* This function is used after mlx5_core_dev is reconfigured.
448  */
449 int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
450 {
451 	struct mlx5_priv *priv = &dev->priv;
452 
453 	lockdep_assert_held(&mlx5_intf_mutex);
454 
455 	delete_drivers(dev);
456 	if (priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
457 		return 0;
458 
459 	return add_drivers(dev);
460 }
461 
462 static u32 mlx5_gen_pci_id(const struct mlx5_core_dev *dev)
463 {
464 	return (u32)((pci_domain_nr(dev->pdev->bus) << 16) |
465 		     (dev->pdev->bus->number << 8) |
466 		     PCI_SLOT(dev->pdev->devfn));
467 }
468 
469 static int next_phys_dev(struct device *dev, const void *data)
470 {
471 	struct mlx5_adev *madev = container_of(dev, struct mlx5_adev, adev.dev);
472 	struct mlx5_core_dev *mdev = madev->mdev;
473 	const struct mlx5_core_dev *curr = data;
474 
475 	if (!mlx5_core_is_pf(mdev))
476 		return 0;
477 
478 	if (mdev == curr)
479 		return 0;
480 
481 	if (mlx5_gen_pci_id(mdev) != mlx5_gen_pci_id(curr))
482 		return 0;
483 
484 	return 1;
485 }
486 
487 /* This function is called with two flows:
488  * 1. During initialization of mlx5_core_dev and we don't need to lock it.
489  * 2. During LAG configure stage and caller holds &mlx5_intf_mutex.
490  */
491 struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
492 {
493 	struct auxiliary_device *adev;
494 	struct mlx5_adev *madev;
495 
496 	if (!mlx5_core_is_pf(dev))
497 		return NULL;
498 
499 	adev = auxiliary_find_device(NULL, dev, &next_phys_dev);
500 	if (!adev)
501 		return NULL;
502 
503 	madev = container_of(adev, struct mlx5_adev, adev);
504 	put_device(&adev->dev);
505 	return madev->mdev;
506 }
507 
508 void mlx5_dev_list_lock(void)
509 {
510 	mutex_lock(&mlx5_intf_mutex);
511 }
512 void mlx5_dev_list_unlock(void)
513 {
514 	mutex_unlock(&mlx5_intf_mutex);
515 }
516 
517 int mlx5_dev_list_trylock(void)
518 {
519 	return mutex_trylock(&mlx5_intf_mutex);
520 }
521