main.c (664b0bae0b87f69bc9deb098f5e0158b9cf18e04) main.c (24da00164f7a9c247d2224a54494d0e955199630)
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:

--- 24 unchanged lines hidden (view full) ---

33#include <linux/debugfs.h>
34#include <linux/highmem.h>
35#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/errno.h>
38#include <linux/pci.h>
39#include <linux/dma-mapping.h>
40#include <linux/slab.h>
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:

--- 24 unchanged lines hidden (view full) ---

33#include <linux/debugfs.h>
34#include <linux/highmem.h>
35#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/errno.h>
38#include <linux/pci.h>
39#include <linux/dma-mapping.h>
40#include <linux/slab.h>
41#include <linux/bitmap.h>
41#if defined(CONFIG_X86)
42#include <asm/pat.h>
43#endif
44#include <linux/sched.h>
45#include <linux/sched/mm.h>
46#include <linux/sched/task.h>
47#include <linux/delay.h>
48#include <rdma/ib_user_verbs.h>
49#include <rdma/ib_addr.h>
50#include <rdma/ib_cache.h>
51#include <linux/mlx5/port.h>
52#include <linux/mlx5/vport.h>
53#include <linux/mlx5/fs.h>
42#if defined(CONFIG_X86)
43#include <asm/pat.h>
44#endif
45#include <linux/sched.h>
46#include <linux/sched/mm.h>
47#include <linux/sched/task.h>
48#include <linux/delay.h>
49#include <rdma/ib_user_verbs.h>
50#include <rdma/ib_addr.h>
51#include <rdma/ib_cache.h>
52#include <linux/mlx5/port.h>
53#include <linux/mlx5/vport.h>
54#include <linux/mlx5/fs.h>
55#include <linux/mlx5/fs_helpers.h>
54#include <linux/list.h>
55#include <rdma/ib_smi.h>
56#include <rdma/ib_umem.h>
57#include <linux/in.h>
58#include <linux/etherdevice.h>
59#include "mlx5_ib.h"
56#include <linux/list.h>
57#include <rdma/ib_smi.h>
58#include <rdma/ib_umem.h>
59#include <linux/in.h>
60#include <linux/etherdevice.h>
61#include "mlx5_ib.h"
62#include "ib_rep.h"
60#include "cmd.h"
63#include "cmd.h"
64#include <linux/mlx5/fs_helpers.h>
65#include <linux/mlx5/accel.h>
66#include <rdma/uverbs_std_types.h>
67#include <rdma/mlx5_user_ioctl_verbs.h>
68#include <rdma/mlx5_user_ioctl_cmds.h>
61
69
70#define UVERBS_MODULE_NAME mlx5_ib
71#include <rdma/uverbs_named_ioctl.h>
72
62#define DRIVER_NAME "mlx5_ib"
63#define DRIVER_VERSION "5.0-0"
64
65MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
66MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
67MODULE_LICENSE("Dual BSD/GPL");
68
69static char mlx5_version[] =

--- 15 unchanged lines hidden (view full) ---

85static struct workqueue_struct *mlx5_ib_event_wq;
86static LIST_HEAD(mlx5_ib_unaffiliated_port_list);
87static LIST_HEAD(mlx5_ib_dev_list);
88/*
89 * This mutex should be held when accessing either of the above lists
90 */
91static DEFINE_MUTEX(mlx5_ib_multiport_mutex);
92
73#define DRIVER_NAME "mlx5_ib"
74#define DRIVER_VERSION "5.0-0"
75
76MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
77MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
78MODULE_LICENSE("Dual BSD/GPL");
79
80static char mlx5_version[] =

--- 15 unchanged lines hidden (view full) ---

96static struct workqueue_struct *mlx5_ib_event_wq;
97static LIST_HEAD(mlx5_ib_unaffiliated_port_list);
98static LIST_HEAD(mlx5_ib_dev_list);
99/*
100 * This mutex should be held when accessing either of the above lists
101 */
102static DEFINE_MUTEX(mlx5_ib_multiport_mutex);
103
104/* We can't use an array for xlt_emergency_page because dma_map_single
105 * doesn't work on kernel modules memory
106 */
107static unsigned long xlt_emergency_page;
108static struct mutex xlt_emergency_page_mutex;
109
93struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi)
94{
95 struct mlx5_ib_dev *dev;
96
97 mutex_lock(&mlx5_ib_multiport_mutex);
98 dev = mpi->ibdev;
99 mutex_unlock(&mlx5_ib_multiport_mutex);
100 return dev;

--- 24 unchanged lines hidden (view full) ---

125static int get_port_state(struct ib_device *ibdev,
126 u8 port_num,
127 enum ib_port_state *state)
128{
129 struct ib_port_attr attr;
130 int ret;
131
132 memset(&attr, 0, sizeof(attr));
110struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi)
111{
112 struct mlx5_ib_dev *dev;
113
114 mutex_lock(&mlx5_ib_multiport_mutex);
115 dev = mpi->ibdev;
116 mutex_unlock(&mlx5_ib_multiport_mutex);
117 return dev;

--- 24 unchanged lines hidden (view full) ---

142static int get_port_state(struct ib_device *ibdev,
143 u8 port_num,
144 enum ib_port_state *state)
145{
146 struct ib_port_attr attr;
147 int ret;
148
149 memset(&attr, 0, sizeof(attr));
133 ret = mlx5_ib_query_port(ibdev, port_num, &attr);
150 ret = ibdev->query_port(ibdev, port_num, &attr);
134 if (!ret)
135 *state = attr.state;
136 return ret;
137}
138
139static int mlx5_netdev_event(struct notifier_block *this,
140 unsigned long event, void *ptr)
141{

--- 7 unchanged lines hidden (view full) ---

149 mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
150 if (!mdev)
151 return NOTIFY_DONE;
152
153 switch (event) {
154 case NETDEV_REGISTER:
155 case NETDEV_UNREGISTER:
156 write_lock(&roce->netdev_lock);
151 if (!ret)
152 *state = attr.state;
153 return ret;
154}
155
156static int mlx5_netdev_event(struct notifier_block *this,
157 unsigned long event, void *ptr)
158{

--- 7 unchanged lines hidden (view full) ---

166 mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
167 if (!mdev)
168 return NOTIFY_DONE;
169
170 switch (event) {
171 case NETDEV_REGISTER:
172 case NETDEV_UNREGISTER:
173 write_lock(&roce->netdev_lock);
174 if (ibdev->rep) {
175 struct mlx5_eswitch *esw = ibdev->mdev->priv.eswitch;
176 struct net_device *rep_ndev;
157
177
158 if (ndev->dev.parent == &mdev->pdev->dev)
159 roce->netdev = (event == NETDEV_UNREGISTER) ?
178 rep_ndev = mlx5_ib_get_rep_netdev(esw,
179 ibdev->rep->vport);
180 if (rep_ndev == ndev)
181 roce->netdev = (event == NETDEV_UNREGISTER) ?
160 NULL : ndev;
182 NULL : ndev;
183 } else if (ndev->dev.parent == &ibdev->mdev->pdev->dev) {
184 roce->netdev = (event == NETDEV_UNREGISTER) ?
185 NULL : ndev;
186 }
161 write_unlock(&roce->netdev_lock);
162 break;
163
164 case NETDEV_CHANGE:
165 case NETDEV_UP:
166 case NETDEV_DOWN: {
167 struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(mdev);
168 struct net_device *upper = NULL;

--- 71 unchanged lines hidden (view full) ---

240 u8 *native_port_num)
241{
242 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
243 ib_port_num);
244 struct mlx5_core_dev *mdev = NULL;
245 struct mlx5_ib_multiport_info *mpi;
246 struct mlx5_ib_port *port;
247
187 write_unlock(&roce->netdev_lock);
188 break;
189
190 case NETDEV_CHANGE:
191 case NETDEV_UP:
192 case NETDEV_DOWN: {
193 struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(mdev);
194 struct net_device *upper = NULL;

--- 71 unchanged lines hidden (view full) ---

266 u8 *native_port_num)
267{
268 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
269 ib_port_num);
270 struct mlx5_core_dev *mdev = NULL;
271 struct mlx5_ib_multiport_info *mpi;
272 struct mlx5_ib_port *port;
273
274 if (!mlx5_core_mp_enabled(ibdev->mdev) ||
275 ll != IB_LINK_LAYER_ETHERNET) {
276 if (native_port_num)
277 *native_port_num = ib_port_num;
278 return ibdev->mdev;
279 }
280
248 if (native_port_num)
249 *native_port_num = 1;
250
281 if (native_port_num)
282 *native_port_num = 1;
283
251 if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
252 return ibdev->mdev;
253
254 port = &ibdev->port[ib_port_num - 1];
255 if (!port)
256 return NULL;
257
258 spin_lock(&port->mp.mpi_lock);
259 mpi = ibdev->port[ib_port_num - 1].mp.mpi;
260 if (mpi && !mpi->unaffiliate) {
261 mdev = mpi->mdev;

--- 117 unchanged lines hidden (view full) ---

379 /* Possible bad flows are checked before filling out props so in case
380 * of an error it will still be zeroed out.
381 */
382 err = mlx5_query_port_eth_proto_oper(mdev, &eth_prot_oper,
383 mdev_port_num);
384 if (err)
385 goto out;
386
284 port = &ibdev->port[ib_port_num - 1];
285 if (!port)
286 return NULL;
287
288 spin_lock(&port->mp.mpi_lock);
289 mpi = ibdev->port[ib_port_num - 1].mp.mpi;
290 if (mpi && !mpi->unaffiliate) {
291 mdev = mpi->mdev;

--- 117 unchanged lines hidden (view full) ---

409 /* Possible bad flows are checked before filling out props so in case
410 * of an error it will still be zeroed out.
411 */
412 err = mlx5_query_port_eth_proto_oper(mdev, &eth_prot_oper,
413 mdev_port_num);
414 if (err)
415 goto out;
416
417 props->active_width = IB_WIDTH_4X;
418 props->active_speed = IB_SPEED_QDR;
419
387 translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
388 &props->active_width);
389
390 props->port_cap_flags |= IB_PORT_CM_SUP;
391 props->port_cap_flags |= IB_PORT_IP_BASED_GIDS;
392
393 props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev,
394 roce_address_table_size);

--- 78 unchanged lines hidden (view full) ---

473 mlx5_ib_warn(dev, "Unexpected GID type %u\n", gid_type);
474 }
475
476 return mlx5_core_roce_gid_set(dev->mdev, index, roce_version,
477 roce_l3_type, gid->raw, mac, vlan,
478 vlan_id, port_num);
479}
480
420 translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
421 &props->active_width);
422
423 props->port_cap_flags |= IB_PORT_CM_SUP;
424 props->port_cap_flags |= IB_PORT_IP_BASED_GIDS;
425
426 props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev,
427 roce_address_table_size);

--- 78 unchanged lines hidden (view full) ---

506 mlx5_ib_warn(dev, "Unexpected GID type %u\n", gid_type);
507 }
508
509 return mlx5_core_roce_gid_set(dev->mdev, index, roce_version,
510 roce_l3_type, gid->raw, mac, vlan,
511 vlan_id, port_num);
512}
513
481static int mlx5_ib_add_gid(struct ib_device *device, u8 port_num,
482 unsigned int index, const union ib_gid *gid,
514static int mlx5_ib_add_gid(const union ib_gid *gid,
483 const struct ib_gid_attr *attr,
484 __always_unused void **context)
485{
515 const struct ib_gid_attr *attr,
516 __always_unused void **context)
517{
486 return set_roce_addr(to_mdev(device), port_num, index, gid, attr);
518 return set_roce_addr(to_mdev(attr->device), attr->port_num,
519 attr->index, gid, attr);
487}
488
520}
521
489static int mlx5_ib_del_gid(struct ib_device *device, u8 port_num,
490 unsigned int index, __always_unused void **context)
522static int mlx5_ib_del_gid(const struct ib_gid_attr *attr,
523 __always_unused void **context)
491{
524{
492 return set_roce_addr(to_mdev(device), port_num, index, NULL, NULL);
525 return set_roce_addr(to_mdev(attr->device), attr->port_num,
526 attr->index, NULL, NULL);
493}
494
495__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
496 int index)
497{
498 struct ib_gid_attr attr;
499 union ib_gid gid;
500
501 if (ib_get_cached_gid(&dev->ib_dev, port_num, index, &gid, &attr))
502 return 0;
503
527}
528
529__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
530 int index)
531{
532 struct ib_gid_attr attr;
533 union ib_gid gid;
534
535 if (ib_get_cached_gid(&dev->ib_dev, port_num, index, &gid, &attr))
536 return 0;
537
504 if (!attr.ndev)
505 return 0;
506
507 dev_put(attr.ndev);
508
509 if (attr.gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
510 return 0;
511
512 return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
513}
514
515int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port_num,
516 int index, enum ib_gid_type *gid_type)
517{
518 struct ib_gid_attr attr;
519 union ib_gid gid;
520 int ret;
521
522 ret = ib_get_cached_gid(&dev->ib_dev, port_num, index, &gid, &attr);
523 if (ret)
524 return ret;
525
538 dev_put(attr.ndev);
539
540 if (attr.gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
541 return 0;
542
543 return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
544}
545
546int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port_num,
547 int index, enum ib_gid_type *gid_type)
548{
549 struct ib_gid_attr attr;
550 union ib_gid gid;
551 int ret;
552
553 ret = ib_get_cached_gid(&dev->ib_dev, port_num, index, &gid, &attr);
554 if (ret)
555 return ret;
556
526 if (!attr.ndev)
527 return -ENODEV;
528
529 dev_put(attr.ndev);
530
531 *gid_type = attr.gid_type;
532
533 return 0;
534}
535
536static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)

--- 287 unchanged lines hidden (view full) ---

824 MLX5_RX_HASH_DST_IPV4 |
825 MLX5_RX_HASH_SRC_IPV6 |
826 MLX5_RX_HASH_DST_IPV6 |
827 MLX5_RX_HASH_SRC_PORT_TCP |
828 MLX5_RX_HASH_DST_PORT_TCP |
829 MLX5_RX_HASH_SRC_PORT_UDP |
830 MLX5_RX_HASH_DST_PORT_UDP |
831 MLX5_RX_HASH_INNER;
557 dev_put(attr.ndev);
558
559 *gid_type = attr.gid_type;
560
561 return 0;
562}
563
564static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)

--- 287 unchanged lines hidden (view full) ---

852 MLX5_RX_HASH_DST_IPV4 |
853 MLX5_RX_HASH_SRC_IPV6 |
854 MLX5_RX_HASH_DST_IPV6 |
855 MLX5_RX_HASH_SRC_PORT_TCP |
856 MLX5_RX_HASH_DST_PORT_TCP |
857 MLX5_RX_HASH_SRC_PORT_UDP |
858 MLX5_RX_HASH_DST_PORT_UDP |
859 MLX5_RX_HASH_INNER;
860 if (mlx5_accel_ipsec_device_caps(dev->mdev) &
861 MLX5_ACCEL_IPSEC_CAP_DEVICE)
862 resp.rss_caps.rx_hash_fields_mask |=
863 MLX5_RX_HASH_IPSEC_SPI;
832 resp.response_length += sizeof(resp.rss_caps);
833 }
834 } else {
835 if (field_avail(typeof(resp), tso_caps, uhw->outlen))
836 resp.response_length += sizeof(resp.tso_caps);
837 if (field_avail(typeof(resp), rss_caps, uhw->outlen))
838 resp.response_length += sizeof(resp.rss_caps);
839 }

--- 15 unchanged lines hidden (view full) ---

855 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
856 MLX5_CAP_ETH(dev->mdev, scatter_fcs) &&
857 raw_support) {
858 /* Legacy bit to support old userspace libraries */
859 props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
860 props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
861 }
862
864 resp.response_length += sizeof(resp.rss_caps);
865 }
866 } else {
867 if (field_avail(typeof(resp), tso_caps, uhw->outlen))
868 resp.response_length += sizeof(resp.tso_caps);
869 if (field_avail(typeof(resp), rss_caps, uhw->outlen))
870 resp.response_length += sizeof(resp.rss_caps);
871 }

--- 15 unchanged lines hidden (view full) ---

887 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
888 MLX5_CAP_ETH(dev->mdev, scatter_fcs) &&
889 raw_support) {
890 /* Legacy bit to support old userspace libraries */
891 props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
892 props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
893 }
894
895 if (MLX5_CAP_DEV_MEM(mdev, memic)) {
896 props->max_dm_size =
897 MLX5_CAP_DEV_MEM(mdev, max_memic_size);
898 }
899
863 if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
864 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
865
866 if (MLX5_CAP_GEN(mdev, end_pad))
867 props->device_cap_flags |= IB_DEVICE_PCI_WRITE_END_PADDING;
868
869 props->vendor_part_id = mdev->pdev->device;
870 props->hw_ver = mdev->pdev->revision;

--- 89 unchanged lines hidden (view full) ---

960 if (MLX5_CAP_QOS(mdev, packet_pacing) &&
961 MLX5_CAP_GEN(mdev, qos)) {
962 resp.packet_pacing_caps.qp_rate_limit_max =
963 MLX5_CAP_QOS(mdev, packet_pacing_max_rate);
964 resp.packet_pacing_caps.qp_rate_limit_min =
965 MLX5_CAP_QOS(mdev, packet_pacing_min_rate);
966 resp.packet_pacing_caps.supported_qpts |=
967 1 << IB_QPT_RAW_PACKET;
900 if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
901 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
902
903 if (MLX5_CAP_GEN(mdev, end_pad))
904 props->device_cap_flags |= IB_DEVICE_PCI_WRITE_END_PADDING;
905
906 props->vendor_part_id = mdev->pdev->device;
907 props->hw_ver = mdev->pdev->revision;

--- 89 unchanged lines hidden (view full) ---

997 if (MLX5_CAP_QOS(mdev, packet_pacing) &&
998 MLX5_CAP_GEN(mdev, qos)) {
999 resp.packet_pacing_caps.qp_rate_limit_max =
1000 MLX5_CAP_QOS(mdev, packet_pacing_max_rate);
1001 resp.packet_pacing_caps.qp_rate_limit_min =
1002 MLX5_CAP_QOS(mdev, packet_pacing_min_rate);
1003 resp.packet_pacing_caps.supported_qpts |=
1004 1 << IB_QPT_RAW_PACKET;
1005 if (MLX5_CAP_QOS(mdev, packet_pacing_burst_bound) &&
1006 MLX5_CAP_QOS(mdev, packet_pacing_typical_size))
1007 resp.packet_pacing_caps.cap_flags |=
1008 MLX5_IB_PP_SUPPORT_BURST;
968 }
969 resp.response_length += sizeof(resp.packet_pacing_caps);
970 }
971
972 if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes,
973 uhw->outlen)) {
974 if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe))
975 resp.mlx5_ib_support_multi_pkt_send_wqes =

--- 287 unchanged lines hidden (view full) ---

1263 count = mlx5_core_reserved_gids_count(mdev);
1264 if (put_mdev)
1265 mlx5_ib_put_native_port_mdev(dev, port);
1266 props->gid_tbl_len -= count;
1267 }
1268 return ret;
1269}
1270
1009 }
1010 resp.response_length += sizeof(resp.packet_pacing_caps);
1011 }
1012
1013 if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes,
1014 uhw->outlen)) {
1015 if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe))
1016 resp.mlx5_ib_support_multi_pkt_send_wqes =

--- 287 unchanged lines hidden (view full) ---

1304 count = mlx5_core_reserved_gids_count(mdev);
1305 if (put_mdev)
1306 mlx5_ib_put_native_port_mdev(dev, port);
1307 props->gid_tbl_len -= count;
1308 }
1309 return ret;
1310}
1311
1312static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u8 port,
1313 struct ib_port_attr *props)
1314{
1315 int ret;
1316
1317 /* Only link layer == ethernet is valid for representors */
1318 ret = mlx5_query_port_roce(ibdev, port, props);
1319 if (ret || !props)
1320 return ret;
1321
1322 /* We don't support GIDS */
1323 props->gid_tbl_len = 0;
1324
1325 return ret;
1326}
1327
1271static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
1272 union ib_gid *gid)
1273{
1274 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1275 struct mlx5_core_dev *mdev = dev->mdev;
1276
1277 switch (mlx5_get_vport_access_method(ibdev)) {
1278 case MLX5_VPORT_ACCESS_METHOD_MAD:

--- 350 unchanged lines hidden (view full) ---

1629 req.max_cqe_version);
1630 resp.log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1631 MLX5_ADAPTER_PAGE_SHIFT : PAGE_SHIFT;
1632 resp.num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1633 MLX5_CAP_GEN(dev->mdev, num_of_uars_per_page) : 1;
1634 resp.response_length = min(offsetof(typeof(resp), response_length) +
1635 sizeof(resp.response_length), udata->outlen);
1636
1328static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
1329 union ib_gid *gid)
1330{
1331 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1332 struct mlx5_core_dev *mdev = dev->mdev;
1333
1334 switch (mlx5_get_vport_access_method(ibdev)) {
1335 case MLX5_VPORT_ACCESS_METHOD_MAD:

--- 350 unchanged lines hidden (view full) ---

1686 req.max_cqe_version);
1687 resp.log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1688 MLX5_ADAPTER_PAGE_SHIFT : PAGE_SHIFT;
1689 resp.num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1690 MLX5_CAP_GEN(dev->mdev, num_of_uars_per_page) : 1;
1691 resp.response_length = min(offsetof(typeof(resp), response_length) +
1692 sizeof(resp.response_length), udata->outlen);
1693
1694 if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) {
1695 if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_EGRESS))
1696 resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM;
1697 if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA)
1698 resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA;
1699 if (MLX5_CAP_FLOWTABLE(dev->mdev, flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
1700 resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING;
1701 if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN)
1702 resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN;
1703 /* MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD is currently always 0 */
1704 }
1705
1637 context = kzalloc(sizeof(*context), GFP_KERNEL);
1638 if (!context)
1639 return ERR_PTR(-ENOMEM);
1640
1641 lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR;
1642 bfregi = &context->bfregi;
1643
1644 /* updates req->total_num_bfregs */

--- 21 unchanged lines hidden (view full) ---

1666 err = allocate_uars(dev, context);
1667 if (err)
1668 goto out_sys_pages;
1669
1670#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1671 context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range;
1672#endif
1673
1706 context = kzalloc(sizeof(*context), GFP_KERNEL);
1707 if (!context)
1708 return ERR_PTR(-ENOMEM);
1709
1710 lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR;
1711 bfregi = &context->bfregi;
1712
1713 /* updates req->total_num_bfregs */

--- 21 unchanged lines hidden (view full) ---

1735 err = allocate_uars(dev, context);
1736 if (err)
1737 goto out_sys_pages;
1738
1739#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1740 context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range;
1741#endif
1742
1674 context->upd_xlt_page = __get_free_page(GFP_KERNEL);
1675 if (!context->upd_xlt_page) {
1676 err = -ENOMEM;
1677 goto out_uars;
1678 }
1679 mutex_init(&context->upd_xlt_page_mutex);
1680
1681 if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) {
1682 err = mlx5_ib_alloc_transport_domain(dev, &context->tdn);
1683 if (err)
1743 if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) {
1744 err = mlx5_ib_alloc_transport_domain(dev, &context->tdn);
1745 if (err)
1684 goto out_page;
1746 goto out_uars;
1685 }
1686
1687 INIT_LIST_HEAD(&context->vma_private_list);
1688 mutex_init(&context->vma_private_list_mutex);
1689 INIT_LIST_HEAD(&context->db_page_list);
1690 mutex_init(&context->db_page_mutex);
1691
1692 resp.tot_bfregs = req.total_num_bfregs;

--- 60 unchanged lines hidden (view full) ---

1753 print_lib_caps(dev, context->lib_caps);
1754
1755 return &context->ibucontext;
1756
1757out_td:
1758 if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1759 mlx5_ib_dealloc_transport_domain(dev, context->tdn);
1760
1747 }
1748
1749 INIT_LIST_HEAD(&context->vma_private_list);
1750 mutex_init(&context->vma_private_list_mutex);
1751 INIT_LIST_HEAD(&context->db_page_list);
1752 mutex_init(&context->db_page_mutex);
1753
1754 resp.tot_bfregs = req.total_num_bfregs;

--- 60 unchanged lines hidden (view full) ---

1815 print_lib_caps(dev, context->lib_caps);
1816
1817 return &context->ibucontext;
1818
1819out_td:
1820 if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1821 mlx5_ib_dealloc_transport_domain(dev, context->tdn);
1822
1761out_page:
1762 free_page(context->upd_xlt_page);
1763
1764out_uars:
1765 deallocate_uars(dev, context);
1766
1767out_sys_pages:
1768 kfree(bfregi->sys_pages);
1769
1770out_count:
1771 kfree(bfregi->count);

--- 9 unchanged lines hidden (view full) ---

1781 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
1782 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
1783 struct mlx5_bfreg_info *bfregi;
1784
1785 bfregi = &context->bfregi;
1786 if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1787 mlx5_ib_dealloc_transport_domain(dev, context->tdn);
1788
1823out_uars:
1824 deallocate_uars(dev, context);
1825
1826out_sys_pages:
1827 kfree(bfregi->sys_pages);
1828
1829out_count:
1830 kfree(bfregi->count);

--- 9 unchanged lines hidden (view full) ---

1840 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
1841 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
1842 struct mlx5_bfreg_info *bfregi;
1843
1844 bfregi = &context->bfregi;
1845 if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1846 mlx5_ib_dealloc_transport_domain(dev, context->tdn);
1847
1789 free_page(context->upd_xlt_page);
1790 deallocate_uars(dev, context);
1791 kfree(bfregi->sys_pages);
1792 kfree(bfregi->count);
1793 kfree(context);
1794
1795 return 0;
1796}
1797

--- 159 unchanged lines hidden (view full) ---

1957{
1958 switch (cmd) {
1959 case MLX5_IB_MMAP_WC_PAGE:
1960 return "WC";
1961 case MLX5_IB_MMAP_REGULAR_PAGE:
1962 return "best effort WC";
1963 case MLX5_IB_MMAP_NC_PAGE:
1964 return "NC";
1848 deallocate_uars(dev, context);
1849 kfree(bfregi->sys_pages);
1850 kfree(bfregi->count);
1851 kfree(context);
1852
1853 return 0;
1854}
1855

--- 159 unchanged lines hidden (view full) ---

2015{
2016 switch (cmd) {
2017 case MLX5_IB_MMAP_WC_PAGE:
2018 return "WC";
2019 case MLX5_IB_MMAP_REGULAR_PAGE:
2020 return "best effort WC";
2021 case MLX5_IB_MMAP_NC_PAGE:
2022 return "NC";
2023 case MLX5_IB_MMAP_DEVICE_MEM:
2024 return "Device Memory";
1965 default:
1966 return NULL;
1967 }
1968}
1969
1970static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
1971 struct vm_area_struct *vma,
1972 struct mlx5_ib_ucontext *context)

--- 142 unchanged lines hidden (view full) ---

2115 mlx5_cmd_free_uar(dev->mdev, idx);
2116
2117free_bfreg:
2118 mlx5_ib_free_bfreg(dev, bfregi, bfreg_dyn_idx);
2119
2120 return err;
2121}
2122
2025 default:
2026 return NULL;
2027 }
2028}
2029
2030static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
2031 struct vm_area_struct *vma,
2032 struct mlx5_ib_ucontext *context)

--- 142 unchanged lines hidden (view full) ---

2175 mlx5_cmd_free_uar(dev->mdev, idx);
2176
2177free_bfreg:
2178 mlx5_ib_free_bfreg(dev, bfregi, bfreg_dyn_idx);
2179
2180 return err;
2181}
2182
2183static int dm_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
2184{
2185 struct mlx5_ib_ucontext *mctx = to_mucontext(context);
2186 struct mlx5_ib_dev *dev = to_mdev(context->device);
2187 u16 page_idx = get_extended_index(vma->vm_pgoff);
2188 size_t map_size = vma->vm_end - vma->vm_start;
2189 u32 npages = map_size >> PAGE_SHIFT;
2190 phys_addr_t pfn;
2191 pgprot_t prot;
2192
2193 if (find_next_zero_bit(mctx->dm_pages, page_idx + npages, page_idx) !=
2194 page_idx + npages)
2195 return -EINVAL;
2196
2197 pfn = ((pci_resource_start(dev->mdev->pdev, 0) +
2198 MLX5_CAP64_DEV_MEM(dev->mdev, memic_bar_start_addr)) >>
2199 PAGE_SHIFT) +
2200 page_idx;
2201 prot = pgprot_writecombine(vma->vm_page_prot);
2202 vma->vm_page_prot = prot;
2203
2204 if (io_remap_pfn_range(vma, vma->vm_start, pfn, map_size,
2205 vma->vm_page_prot))
2206 return -EAGAIN;
2207
2208 return mlx5_ib_set_vma_data(vma, mctx);
2209}
2210
2123static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
2124{
2125 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
2126 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
2127 unsigned long command;
2128 phys_addr_t pfn;
2129
2130 command = get_command(vma->vm_pgoff);

--- 28 unchanged lines hidden (view full) ---

2159
2160 mlx5_ib_dbg(dev, "mapped internal timer at 0x%lx, PA 0x%llx\n",
2161 vma->vm_start,
2162 (unsigned long long)pfn << PAGE_SHIFT);
2163 break;
2164 case MLX5_IB_MMAP_CLOCK_INFO:
2165 return mlx5_ib_mmap_clock_info_page(dev, vma, context);
2166
2211static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
2212{
2213 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
2214 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
2215 unsigned long command;
2216 phys_addr_t pfn;
2217
2218 command = get_command(vma->vm_pgoff);

--- 28 unchanged lines hidden (view full) ---

2247
2248 mlx5_ib_dbg(dev, "mapped internal timer at 0x%lx, PA 0x%llx\n",
2249 vma->vm_start,
2250 (unsigned long long)pfn << PAGE_SHIFT);
2251 break;
2252 case MLX5_IB_MMAP_CLOCK_INFO:
2253 return mlx5_ib_mmap_clock_info_page(dev, vma, context);
2254
2255 case MLX5_IB_MMAP_DEVICE_MEM:
2256 return dm_mmap(ibcontext, vma);
2257
2167 default:
2168 return -EINVAL;
2169 }
2170
2171 return 0;
2172}
2173
2258 default:
2259 return -EINVAL;
2260 }
2261
2262 return 0;
2263}
2264
2265struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
2266 struct ib_ucontext *context,
2267 struct ib_dm_alloc_attr *attr,
2268 struct uverbs_attr_bundle *attrs)
2269{
2270 u64 act_size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE);
2271 struct mlx5_memic *memic = &to_mdev(ibdev)->memic;
2272 phys_addr_t memic_addr;
2273 struct mlx5_ib_dm *dm;
2274 u64 start_offset;
2275 u32 page_idx;
2276 int err;
2277
2278 dm = kzalloc(sizeof(*dm), GFP_KERNEL);
2279 if (!dm)
2280 return ERR_PTR(-ENOMEM);
2281
2282 mlx5_ib_dbg(to_mdev(ibdev), "alloc_memic req: user_length=0x%llx act_length=0x%llx log_alignment=%d\n",
2283 attr->length, act_size, attr->alignment);
2284
2285 err = mlx5_cmd_alloc_memic(memic, &memic_addr,
2286 act_size, attr->alignment);
2287 if (err)
2288 goto err_free;
2289
2290 start_offset = memic_addr & ~PAGE_MASK;
2291 page_idx = (memic_addr - pci_resource_start(memic->dev->pdev, 0) -
2292 MLX5_CAP64_DEV_MEM(memic->dev, memic_bar_start_addr)) >>
2293 PAGE_SHIFT;
2294
2295 err = uverbs_copy_to(attrs,
2296 MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
2297 &start_offset, sizeof(start_offset));
2298 if (err)
2299 goto err_dealloc;
2300
2301 err = uverbs_copy_to(attrs,
2302 MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
2303 &page_idx, sizeof(page_idx));
2304 if (err)
2305 goto err_dealloc;
2306
2307 bitmap_set(to_mucontext(context)->dm_pages, page_idx,
2308 DIV_ROUND_UP(act_size, PAGE_SIZE));
2309
2310 dm->dev_addr = memic_addr;
2311
2312 return &dm->ibdm;
2313
2314err_dealloc:
2315 mlx5_cmd_dealloc_memic(memic, memic_addr,
2316 act_size);
2317err_free:
2318 kfree(dm);
2319 return ERR_PTR(err);
2320}
2321
2322int mlx5_ib_dealloc_dm(struct ib_dm *ibdm)
2323{
2324 struct mlx5_memic *memic = &to_mdev(ibdm->device)->memic;
2325 struct mlx5_ib_dm *dm = to_mdm(ibdm);
2326 u64 act_size = roundup(dm->ibdm.length, MLX5_MEMIC_BASE_SIZE);
2327 u32 page_idx;
2328 int ret;
2329
2330 ret = mlx5_cmd_dealloc_memic(memic, dm->dev_addr, act_size);
2331 if (ret)
2332 return ret;
2333
2334 page_idx = (dm->dev_addr - pci_resource_start(memic->dev->pdev, 0) -
2335 MLX5_CAP64_DEV_MEM(memic->dev, memic_bar_start_addr)) >>
2336 PAGE_SHIFT;
2337 bitmap_clear(to_mucontext(ibdm->uobject->context)->dm_pages,
2338 page_idx,
2339 DIV_ROUND_UP(act_size, PAGE_SIZE));
2340
2341 kfree(dm);
2342
2343 return 0;
2344}
2345
2174static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
2175 struct ib_ucontext *context,
2176 struct ib_udata *udata)
2177{
2178 struct mlx5_ib_alloc_pd_resp resp;
2179 struct mlx5_ib_pd *pd;
2180 int err;
2181

--- 99 unchanged lines hidden (view full) ---

2281/* Field is the last supported field */
2282#define FIELDS_NOT_SUPPORTED(filter, field)\
2283 memchr_inv((void *)&filter.field +\
2284 sizeof(filter.field), 0,\
2285 sizeof(filter) -\
2286 offsetof(typeof(filter), field) -\
2287 sizeof(filter.field))
2288
2346static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
2347 struct ib_ucontext *context,
2348 struct ib_udata *udata)
2349{
2350 struct mlx5_ib_alloc_pd_resp resp;
2351 struct mlx5_ib_pd *pd;
2352 int err;
2353

--- 99 unchanged lines hidden (view full) ---

2453/* Field is the last supported field */
2454#define FIELDS_NOT_SUPPORTED(filter, field)\
2455 memchr_inv((void *)&filter.field +\
2456 sizeof(filter.field), 0,\
2457 sizeof(filter) -\
2458 offsetof(typeof(filter), field) -\
2459 sizeof(filter.field))
2460
2289#define IPV4_VERSION 4
2290#define IPV6_VERSION 6
2461static int parse_flow_flow_action(const union ib_flow_spec *ib_spec,
2462 const struct ib_flow_attr *flow_attr,
2463 struct mlx5_flow_act *action)
2464{
2465 struct mlx5_ib_flow_action *maction = to_mflow_act(ib_spec->action.act);
2466
2467 switch (maction->ib_action.type) {
2468 case IB_FLOW_ACTION_ESP:
2469 /* Currently only AES_GCM keymat is supported by the driver */
2470 action->esp_id = (uintptr_t)maction->esp_aes_gcm.ctx;
2471 action->action |= flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS ?
2472 MLX5_FLOW_CONTEXT_ACTION_ENCRYPT :
2473 MLX5_FLOW_CONTEXT_ACTION_DECRYPT;
2474 return 0;
2475 default:
2476 return -EOPNOTSUPP;
2477 }
2478}
2479
2291static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
2292 u32 *match_v, const union ib_flow_spec *ib_spec,
2480static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
2481 u32 *match_v, const union ib_flow_spec *ib_spec,
2293 u32 *tag_id, bool *is_drop)
2482 const struct ib_flow_attr *flow_attr,
2483 struct mlx5_flow_act *action)
2294{
2295 void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
2296 misc_parameters);
2297 void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v,
2298 misc_parameters);
2299 void *headers_c;
2300 void *headers_v;
2301 int match_ipv;
2484{
2485 void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
2486 misc_parameters);
2487 void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v,
2488 misc_parameters);
2489 void *headers_c;
2490 void *headers_v;
2491 int match_ipv;
2492 int ret;
2302
2303 if (ib_spec->type & IB_FLOW_SPEC_INNER) {
2304 headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
2305 inner_headers);
2306 headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
2307 inner_headers);
2308 match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2309 ft_field_support.inner_ip_version);

--- 58 unchanged lines hidden (view full) ---

2368 case IB_FLOW_SPEC_IPV4:
2369 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
2370 return -EOPNOTSUPP;
2371
2372 if (match_ipv) {
2373 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2374 ip_version, 0xf);
2375 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2493
2494 if (ib_spec->type & IB_FLOW_SPEC_INNER) {
2495 headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
2496 inner_headers);
2497 headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
2498 inner_headers);
2499 match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2500 ft_field_support.inner_ip_version);

--- 58 unchanged lines hidden (view full) ---

2559 case IB_FLOW_SPEC_IPV4:
2560 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
2561 return -EOPNOTSUPP;
2562
2563 if (match_ipv) {
2564 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2565 ip_version, 0xf);
2566 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2376 ip_version, IPV4_VERSION);
2567 ip_version, MLX5_FS_IPV4_VERSION);
2377 } else {
2378 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2379 ethertype, 0xffff);
2380 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2381 ethertype, ETH_P_IP);
2382 }
2383
2384 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,

--- 22 unchanged lines hidden (view full) ---

2407 case IB_FLOW_SPEC_IPV6:
2408 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD))
2409 return -EOPNOTSUPP;
2410
2411 if (match_ipv) {
2412 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2413 ip_version, 0xf);
2414 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2568 } else {
2569 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2570 ethertype, 0xffff);
2571 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2572 ethertype, ETH_P_IP);
2573 }
2574
2575 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,

--- 22 unchanged lines hidden (view full) ---

2598 case IB_FLOW_SPEC_IPV6:
2599 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD))
2600 return -EOPNOTSUPP;
2601
2602 if (match_ipv) {
2603 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2604 ip_version, 0xf);
2605 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2415 ip_version, IPV6_VERSION);
2606 ip_version, MLX5_FS_IPV6_VERSION);
2416 } else {
2417 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2418 ethertype, 0xffff);
2419 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2420 ethertype, ETH_P_IPV6);
2421 }
2422
2423 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,

--- 20 unchanged lines hidden (view full) ---

2444 set_proto(headers_c, headers_v,
2445 ib_spec->ipv6.mask.next_hdr,
2446 ib_spec->ipv6.val.next_hdr);
2447
2448 set_flow_label(misc_params_c, misc_params_v,
2449 ntohl(ib_spec->ipv6.mask.flow_label),
2450 ntohl(ib_spec->ipv6.val.flow_label),
2451 ib_spec->type & IB_FLOW_SPEC_INNER);
2607 } else {
2608 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2609 ethertype, 0xffff);
2610 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2611 ethertype, ETH_P_IPV6);
2612 }
2613
2614 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,

--- 20 unchanged lines hidden (view full) ---

2635 set_proto(headers_c, headers_v,
2636 ib_spec->ipv6.mask.next_hdr,
2637 ib_spec->ipv6.val.next_hdr);
2638
2639 set_flow_label(misc_params_c, misc_params_v,
2640 ntohl(ib_spec->ipv6.mask.flow_label),
2641 ntohl(ib_spec->ipv6.val.flow_label),
2642 ib_spec->type & IB_FLOW_SPEC_INNER);
2643 break;
2644 case IB_FLOW_SPEC_ESP:
2645 if (ib_spec->esp.mask.seq)
2646 return -EOPNOTSUPP;
2452
2647
2648 MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi,
2649 ntohl(ib_spec->esp.mask.spi));
2650 MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi,
2651 ntohl(ib_spec->esp.val.spi));
2453 break;
2454 case IB_FLOW_SPEC_TCP:
2455 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
2456 LAST_TCP_UDP_FIELD))
2457 return -EOPNOTSUPP;
2458
2459 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
2460 0xff);

--- 42 unchanged lines hidden (view full) ---

2503 break;
2504 case IB_FLOW_SPEC_ACTION_TAG:
2505 if (FIELDS_NOT_SUPPORTED(ib_spec->flow_tag,
2506 LAST_FLOW_TAG_FIELD))
2507 return -EOPNOTSUPP;
2508 if (ib_spec->flow_tag.tag_id >= BIT(24))
2509 return -EINVAL;
2510
2652 break;
2653 case IB_FLOW_SPEC_TCP:
2654 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
2655 LAST_TCP_UDP_FIELD))
2656 return -EOPNOTSUPP;
2657
2658 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
2659 0xff);

--- 42 unchanged lines hidden (view full) ---

2702 break;
2703 case IB_FLOW_SPEC_ACTION_TAG:
2704 if (FIELDS_NOT_SUPPORTED(ib_spec->flow_tag,
2705 LAST_FLOW_TAG_FIELD))
2706 return -EOPNOTSUPP;
2707 if (ib_spec->flow_tag.tag_id >= BIT(24))
2708 return -EINVAL;
2709
2511 *tag_id = ib_spec->flow_tag.tag_id;
2710 action->flow_tag = ib_spec->flow_tag.tag_id;
2711 action->has_flow_tag = true;
2512 break;
2513 case IB_FLOW_SPEC_ACTION_DROP:
2514 if (FIELDS_NOT_SUPPORTED(ib_spec->drop,
2515 LAST_DROP_FIELD))
2516 return -EOPNOTSUPP;
2712 break;
2713 case IB_FLOW_SPEC_ACTION_DROP:
2714 if (FIELDS_NOT_SUPPORTED(ib_spec->drop,
2715 LAST_DROP_FIELD))
2716 return -EOPNOTSUPP;
2517 *is_drop = true;
2717 action->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
2518 break;
2718 break;
2719 case IB_FLOW_SPEC_ACTION_HANDLE:
2720 ret = parse_flow_flow_action(ib_spec, flow_attr, action);
2721 if (ret)
2722 return ret;
2723 break;
2519 default:
2520 return -EINVAL;
2521 }
2522
2523 return 0;
2524}
2525
2526/* If a flow could catch both multicast and unicast packets,

--- 25 unchanged lines hidden (view full) ---

2552 eth_spec = (struct ib_flow_spec_eth *)flow_spec;
2553 return is_multicast_ether_addr(eth_spec->mask.dst_mac) &&
2554 is_multicast_ether_addr(eth_spec->val.dst_mac);
2555 }
2556
2557 return false;
2558}
2559
2724 default:
2725 return -EINVAL;
2726 }
2727
2728 return 0;
2729}
2730
2731/* If a flow could catch both multicast and unicast packets,

--- 25 unchanged lines hidden (view full) ---

2757 eth_spec = (struct ib_flow_spec_eth *)flow_spec;
2758 return is_multicast_ether_addr(eth_spec->mask.dst_mac) &&
2759 is_multicast_ether_addr(eth_spec->val.dst_mac);
2760 }
2761
2762 return false;
2763}
2764
2765enum valid_spec {
2766 VALID_SPEC_INVALID,
2767 VALID_SPEC_VALID,
2768 VALID_SPEC_NA,
2769};
2770
2771static enum valid_spec
2772is_valid_esp_aes_gcm(struct mlx5_core_dev *mdev,
2773 const struct mlx5_flow_spec *spec,
2774 const struct mlx5_flow_act *flow_act,
2775 bool egress)
2776{
2777 const u32 *match_c = spec->match_criteria;
2778 bool is_crypto =
2779 (flow_act->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
2780 MLX5_FLOW_CONTEXT_ACTION_DECRYPT));
2781 bool is_ipsec = mlx5_fs_is_ipsec_flow(match_c);
2782 bool is_drop = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_DROP;
2783
2784 /*
2785 * Currently only crypto is supported in egress, when regular egress
2786 * rules would be supported, always return VALID_SPEC_NA.
2787 */
2788 if (!is_crypto)
2789 return egress ? VALID_SPEC_INVALID : VALID_SPEC_NA;
2790
2791 return is_crypto && is_ipsec &&
2792 (!egress || (!is_drop && !flow_act->has_flow_tag)) ?
2793 VALID_SPEC_VALID : VALID_SPEC_INVALID;
2794}
2795
2796static bool is_valid_spec(struct mlx5_core_dev *mdev,
2797 const struct mlx5_flow_spec *spec,
2798 const struct mlx5_flow_act *flow_act,
2799 bool egress)
2800{
2801 /* We curretly only support ipsec egress flow */
2802 return is_valid_esp_aes_gcm(mdev, spec, flow_act, egress) != VALID_SPEC_INVALID;
2803}
2804
2560static bool is_valid_ethertype(struct mlx5_core_dev *mdev,
2561 const struct ib_flow_attr *flow_attr,
2562 bool check_inner)
2563{
2564 union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1);
2565 int match_ipv = check_inner ?
2566 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2567 ft_field_support.inner_ip_version) :

--- 58 unchanged lines hidden (view full) ---

2626static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
2627{
2628 struct mlx5_ib_dev *dev = to_mdev(flow_id->qp->device);
2629 struct mlx5_ib_flow_handler *handler = container_of(flow_id,
2630 struct mlx5_ib_flow_handler,
2631 ibflow);
2632 struct mlx5_ib_flow_handler *iter, *tmp;
2633
2805static bool is_valid_ethertype(struct mlx5_core_dev *mdev,
2806 const struct ib_flow_attr *flow_attr,
2807 bool check_inner)
2808{
2809 union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1);
2810 int match_ipv = check_inner ?
2811 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2812 ft_field_support.inner_ip_version) :

--- 58 unchanged lines hidden (view full) ---

2871static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
2872{
2873 struct mlx5_ib_dev *dev = to_mdev(flow_id->qp->device);
2874 struct mlx5_ib_flow_handler *handler = container_of(flow_id,
2875 struct mlx5_ib_flow_handler,
2876 ibflow);
2877 struct mlx5_ib_flow_handler *iter, *tmp;
2878
2634 mutex_lock(&dev->flow_db.lock);
2879 mutex_lock(&dev->flow_db->lock);
2635
2636 list_for_each_entry_safe(iter, tmp, &handler->list, list) {
2637 mlx5_del_flow_rules(iter->rule);
2638 put_flow_table(dev, iter->prio, true);
2639 list_del(&iter->list);
2640 kfree(iter);
2641 }
2642
2643 mlx5_del_flow_rules(handler->rule);
2644 put_flow_table(dev, handler->prio, true);
2880
2881 list_for_each_entry_safe(iter, tmp, &handler->list, list) {
2882 mlx5_del_flow_rules(iter->rule);
2883 put_flow_table(dev, iter->prio, true);
2884 list_del(&iter->list);
2885 kfree(iter);
2886 }
2887
2888 mlx5_del_flow_rules(handler->rule);
2889 put_flow_table(dev, handler->prio, true);
2645 mutex_unlock(&dev->flow_db.lock);
2890 mutex_unlock(&dev->flow_db->lock);
2646
2647 kfree(handler);
2648
2649 return 0;
2650}
2651
2652static int ib_prio_to_core_prio(unsigned int priority, bool dont_trap)
2653{

--- 22 unchanged lines hidden (view full) ---

2676 int num_entries;
2677 int num_groups;
2678 int priority;
2679 int err = 0;
2680
2681 max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
2682 log_max_ft_size));
2683 if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
2891
2892 kfree(handler);
2893
2894 return 0;
2895}
2896
2897static int ib_prio_to_core_prio(unsigned int priority, bool dont_trap)
2898{

--- 22 unchanged lines hidden (view full) ---

2921 int num_entries;
2922 int num_groups;
2923 int priority;
2924 int err = 0;
2925
2926 max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
2927 log_max_ft_size));
2928 if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
2684 if (flow_is_multicast_only(flow_attr) &&
2685 !dont_trap)
2929 if (ft_type == MLX5_IB_FT_TX)
2930 priority = 0;
2931 else if (flow_is_multicast_only(flow_attr) &&
2932 !dont_trap)
2686 priority = MLX5_IB_FLOW_MCAST_PRIO;
2687 else
2688 priority = ib_prio_to_core_prio(flow_attr->priority,
2689 dont_trap);
2690 ns = mlx5_get_flow_namespace(dev->mdev,
2933 priority = MLX5_IB_FLOW_MCAST_PRIO;
2934 else
2935 priority = ib_prio_to_core_prio(flow_attr->priority,
2936 dont_trap);
2937 ns = mlx5_get_flow_namespace(dev->mdev,
2938 ft_type == MLX5_IB_FT_TX ?
2939 MLX5_FLOW_NAMESPACE_EGRESS :
2691 MLX5_FLOW_NAMESPACE_BYPASS);
2692 num_entries = MLX5_FS_MAX_ENTRIES;
2693 num_groups = MLX5_FS_MAX_TYPES;
2940 MLX5_FLOW_NAMESPACE_BYPASS);
2941 num_entries = MLX5_FS_MAX_ENTRIES;
2942 num_groups = MLX5_FS_MAX_TYPES;
2694 prio = &dev->flow_db.prios[priority];
2943 prio = &dev->flow_db->prios[priority];
2695 } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
2696 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
2697 ns = mlx5_get_flow_namespace(dev->mdev,
2698 MLX5_FLOW_NAMESPACE_LEFTOVERS);
2699 build_leftovers_ft_param(&priority,
2700 &num_entries,
2701 &num_groups);
2944 } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
2945 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
2946 ns = mlx5_get_flow_namespace(dev->mdev,
2947 MLX5_FLOW_NAMESPACE_LEFTOVERS);
2948 build_leftovers_ft_param(&priority,
2949 &num_entries,
2950 &num_groups);
2702 prio = &dev->flow_db.prios[MLX5_IB_FLOW_LEFTOVERS_PRIO];
2951 prio = &dev->flow_db->prios[MLX5_IB_FLOW_LEFTOVERS_PRIO];
2703 } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
2704 if (!MLX5_CAP_FLOWTABLE(dev->mdev,
2705 allow_sniffer_and_nic_rx_shared_tir))
2706 return ERR_PTR(-ENOTSUPP);
2707
2708 ns = mlx5_get_flow_namespace(dev->mdev, ft_type == MLX5_IB_FT_RX ?
2709 MLX5_FLOW_NAMESPACE_SNIFFER_RX :
2710 MLX5_FLOW_NAMESPACE_SNIFFER_TX);
2711
2952 } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
2953 if (!MLX5_CAP_FLOWTABLE(dev->mdev,
2954 allow_sniffer_and_nic_rx_shared_tir))
2955 return ERR_PTR(-ENOTSUPP);
2956
2957 ns = mlx5_get_flow_namespace(dev->mdev, ft_type == MLX5_IB_FT_RX ?
2958 MLX5_FLOW_NAMESPACE_SNIFFER_RX :
2959 MLX5_FLOW_NAMESPACE_SNIFFER_TX);
2960
2712 prio = &dev->flow_db.sniffer[ft_type];
2961 prio = &dev->flow_db->sniffer[ft_type];
2713 priority = 0;
2714 num_entries = 1;
2715 num_groups = 1;
2716 }
2717
2718 if (!ns)
2719 return ERR_PTR(-ENOTSUPP);
2720

--- 41 unchanged lines hidden (view full) ---

2762static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
2763 struct mlx5_ib_flow_prio *ft_prio,
2764 const struct ib_flow_attr *flow_attr,
2765 struct mlx5_flow_destination *dst,
2766 u32 underlay_qpn)
2767{
2768 struct mlx5_flow_table *ft = ft_prio->flow_table;
2769 struct mlx5_ib_flow_handler *handler;
2962 priority = 0;
2963 num_entries = 1;
2964 num_groups = 1;
2965 }
2966
2967 if (!ns)
2968 return ERR_PTR(-ENOTSUPP);
2969

--- 41 unchanged lines hidden (view full) ---

3011static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
3012 struct mlx5_ib_flow_prio *ft_prio,
3013 const struct ib_flow_attr *flow_attr,
3014 struct mlx5_flow_destination *dst,
3015 u32 underlay_qpn)
3016{
3017 struct mlx5_flow_table *ft = ft_prio->flow_table;
3018 struct mlx5_ib_flow_handler *handler;
2770 struct mlx5_flow_act flow_act = {0};
3019 struct mlx5_flow_act flow_act = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG};
2771 struct mlx5_flow_spec *spec;
2772 struct mlx5_flow_destination *rule_dst = dst;
2773 const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr);
2774 unsigned int spec_index;
3020 struct mlx5_flow_spec *spec;
3021 struct mlx5_flow_destination *rule_dst = dst;
3022 const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr);
3023 unsigned int spec_index;
2775 u32 flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
2776 bool is_drop = false;
2777 int err = 0;
2778 int dest_num = 1;
3024 int err = 0;
3025 int dest_num = 1;
3026 bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS;
2779
2780 if (!is_valid_attr(dev->mdev, flow_attr))
2781 return ERR_PTR(-EINVAL);
2782
2783 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
2784 handler = kzalloc(sizeof(*handler), GFP_KERNEL);
2785 if (!handler || !spec) {
2786 err = -ENOMEM;
2787 goto free;
2788 }
2789
2790 INIT_LIST_HEAD(&handler->list);
2791
2792 for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
2793 err = parse_flow_attr(dev->mdev, spec->match_criteria,
2794 spec->match_value,
3027
3028 if (!is_valid_attr(dev->mdev, flow_attr))
3029 return ERR_PTR(-EINVAL);
3030
3031 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
3032 handler = kzalloc(sizeof(*handler), GFP_KERNEL);
3033 if (!handler || !spec) {
3034 err = -ENOMEM;
3035 goto free;
3036 }
3037
3038 INIT_LIST_HEAD(&handler->list);
3039
3040 for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
3041 err = parse_flow_attr(dev->mdev, spec->match_criteria,
3042 spec->match_value,
2795 ib_flow, &flow_tag, &is_drop);
3043 ib_flow, flow_attr, &flow_act);
2796 if (err < 0)
2797 goto free;
2798
2799 ib_flow += ((union ib_flow_spec *)ib_flow)->size;
2800 }
2801
2802 if (!flow_is_multicast_only(flow_attr))
2803 set_underlay_qp(dev, spec, underlay_qpn);
2804
3044 if (err < 0)
3045 goto free;
3046
3047 ib_flow += ((union ib_flow_spec *)ib_flow)->size;
3048 }
3049
3050 if (!flow_is_multicast_only(flow_attr))
3051 set_underlay_qp(dev, spec, underlay_qpn);
3052
3053 if (dev->rep) {
3054 void *misc;
3055
3056 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
3057 misc_parameters);
3058 MLX5_SET(fte_match_set_misc, misc, source_port,
3059 dev->rep->vport);
3060 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
3061 misc_parameters);
3062 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
3063 }
3064
2805 spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
3065 spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
2806 if (is_drop) {
2807 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
3066
3067 if (is_egress &&
3068 !is_valid_spec(dev->mdev, spec, &flow_act, is_egress)) {
3069 err = -EINVAL;
3070 goto free;
3071 }
3072
3073 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
2808 rule_dst = NULL;
2809 dest_num = 0;
2810 } else {
3074 rule_dst = NULL;
3075 dest_num = 0;
3076 } else {
2811 flow_act.action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
2812 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
3077 if (is_egress)
3078 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
3079 else
3080 flow_act.action |=
3081 dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
3082 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
2813 }
2814
3083 }
3084
2815 if (flow_tag != MLX5_FS_DEFAULT_FLOW_TAG &&
3085 if (flow_act.has_flow_tag &&
2816 (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
2817 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
2818 mlx5_ib_warn(dev, "Flow tag %u and attribute type %x isn't allowed in leftovers\n",
3086 (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
3087 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
3088 mlx5_ib_warn(dev, "Flow tag %u and attribute type %x isn't allowed in leftovers\n",
2819 flow_tag, flow_attr->type);
3089 flow_act.flow_tag, flow_attr->type);
2820 err = -EINVAL;
2821 goto free;
2822 }
3090 err = -EINVAL;
3091 goto free;
3092 }
2823 flow_act.flow_tag = flow_tag;
2824 handler->rule = mlx5_add_flow_rules(ft, spec,
2825 &flow_act,
2826 rule_dst, dest_num);
2827
2828 if (IS_ERR(handler->rule)) {
2829 err = PTR_ERR(handler->rule);
2830 goto free;
2831 }

--- 147 unchanged lines hidden (view full) ---

2979 int domain)
2980{
2981 struct mlx5_ib_dev *dev = to_mdev(qp->device);
2982 struct mlx5_ib_qp *mqp = to_mqp(qp);
2983 struct mlx5_ib_flow_handler *handler = NULL;
2984 struct mlx5_flow_destination *dst = NULL;
2985 struct mlx5_ib_flow_prio *ft_prio_tx = NULL;
2986 struct mlx5_ib_flow_prio *ft_prio;
3093 handler->rule = mlx5_add_flow_rules(ft, spec,
3094 &flow_act,
3095 rule_dst, dest_num);
3096
3097 if (IS_ERR(handler->rule)) {
3098 err = PTR_ERR(handler->rule);
3099 goto free;
3100 }

--- 147 unchanged lines hidden (view full) ---

3248 int domain)
3249{
3250 struct mlx5_ib_dev *dev = to_mdev(qp->device);
3251 struct mlx5_ib_qp *mqp = to_mqp(qp);
3252 struct mlx5_ib_flow_handler *handler = NULL;
3253 struct mlx5_flow_destination *dst = NULL;
3254 struct mlx5_ib_flow_prio *ft_prio_tx = NULL;
3255 struct mlx5_ib_flow_prio *ft_prio;
3256 bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS;
2987 int err;
2988 int underlay_qpn;
2989
2990 if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO)
2991 return ERR_PTR(-ENOMEM);
2992
2993 if (domain != IB_FLOW_DOMAIN_USER ||
2994 flow_attr->port > dev->num_ports ||
3257 int err;
3258 int underlay_qpn;
3259
3260 if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO)
3261 return ERR_PTR(-ENOMEM);
3262
3263 if (domain != IB_FLOW_DOMAIN_USER ||
3264 flow_attr->port > dev->num_ports ||
2995 (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP))
3265 (flow_attr->flags & ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP |
3266 IB_FLOW_ATTR_FLAGS_EGRESS)))
2996 return ERR_PTR(-EINVAL);
2997
3267 return ERR_PTR(-EINVAL);
3268
3269 if (is_egress &&
3270 (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
3271 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT))
3272 return ERR_PTR(-EINVAL);
3273
2998 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
2999 if (!dst)
3000 return ERR_PTR(-ENOMEM);
3001
3274 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
3275 if (!dst)
3276 return ERR_PTR(-ENOMEM);
3277
3002 mutex_lock(&dev->flow_db.lock);
3278 mutex_lock(&dev->flow_db->lock);
3003
3279
3004 ft_prio = get_flow_table(dev, flow_attr, MLX5_IB_FT_RX);
3280 ft_prio = get_flow_table(dev, flow_attr,
3281 is_egress ? MLX5_IB_FT_TX : MLX5_IB_FT_RX);
3005 if (IS_ERR(ft_prio)) {
3006 err = PTR_ERR(ft_prio);
3007 goto unlock;
3008 }
3009 if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
3010 ft_prio_tx = get_flow_table(dev, flow_attr, MLX5_IB_FT_TX);
3011 if (IS_ERR(ft_prio_tx)) {
3012 err = PTR_ERR(ft_prio_tx);
3013 ft_prio_tx = NULL;
3014 goto destroy_ft;
3015 }
3016 }
3017
3282 if (IS_ERR(ft_prio)) {
3283 err = PTR_ERR(ft_prio);
3284 goto unlock;
3285 }
3286 if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
3287 ft_prio_tx = get_flow_table(dev, flow_attr, MLX5_IB_FT_TX);
3288 if (IS_ERR(ft_prio_tx)) {
3289 err = PTR_ERR(ft_prio_tx);
3290 ft_prio_tx = NULL;
3291 goto destroy_ft;
3292 }
3293 }
3294
3018 dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
3019 if (mqp->flags & MLX5_IB_QP_RSS)
3020 dst->tir_num = mqp->rss_qp.tirn;
3021 else
3022 dst->tir_num = mqp->raw_packet_qp.rq.tirn;
3295 if (is_egress) {
3296 dst->type = MLX5_FLOW_DESTINATION_TYPE_PORT;
3297 } else {
3298 dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
3299 if (mqp->flags & MLX5_IB_QP_RSS)
3300 dst->tir_num = mqp->rss_qp.tirn;
3301 else
3302 dst->tir_num = mqp->raw_packet_qp.rq.tirn;
3303 }
3023
3024 if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
3025 if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) {
3026 handler = create_dont_trap_rule(dev, ft_prio,
3027 flow_attr, dst);
3028 } else {
3029 underlay_qpn = (mqp->flags & MLX5_IB_QP_UNDERLAY) ?
3030 mqp->underlay_qpn : 0;

--- 12 unchanged lines hidden (view full) ---

3043 }
3044
3045 if (IS_ERR(handler)) {
3046 err = PTR_ERR(handler);
3047 handler = NULL;
3048 goto destroy_ft;
3049 }
3050
3304
3305 if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
3306 if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) {
3307 handler = create_dont_trap_rule(dev, ft_prio,
3308 flow_attr, dst);
3309 } else {
3310 underlay_qpn = (mqp->flags & MLX5_IB_QP_UNDERLAY) ?
3311 mqp->underlay_qpn : 0;

--- 12 unchanged lines hidden (view full) ---

3324 }
3325
3326 if (IS_ERR(handler)) {
3327 err = PTR_ERR(handler);
3328 handler = NULL;
3329 goto destroy_ft;
3330 }
3331
3051 mutex_unlock(&dev->flow_db.lock);
3332 mutex_unlock(&dev->flow_db->lock);
3052 kfree(dst);
3053
3054 return &handler->ibflow;
3055
3056destroy_ft:
3057 put_flow_table(dev, ft_prio, false);
3058 if (ft_prio_tx)
3059 put_flow_table(dev, ft_prio_tx, false);
3060unlock:
3333 kfree(dst);
3334
3335 return &handler->ibflow;
3336
3337destroy_ft:
3338 put_flow_table(dev, ft_prio, false);
3339 if (ft_prio_tx)
3340 put_flow_table(dev, ft_prio_tx, false);
3341unlock:
3061 mutex_unlock(&dev->flow_db.lock);
3342 mutex_unlock(&dev->flow_db->lock);
3062 kfree(dst);
3063 kfree(handler);
3064 return ERR_PTR(err);
3065}
3066
3343 kfree(dst);
3344 kfree(handler);
3345 return ERR_PTR(err);
3346}
3347
3348static u32 mlx5_ib_flow_action_flags_to_accel_xfrm_flags(u32 mlx5_flags)
3349{
3350 u32 flags = 0;
3351
3352 if (mlx5_flags & MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA)
3353 flags |= MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA;
3354
3355 return flags;
3356}
3357
3358#define MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA
3359static struct ib_flow_action *
3360mlx5_ib_create_flow_action_esp(struct ib_device *device,
3361 const struct ib_flow_action_attrs_esp *attr,
3362 struct uverbs_attr_bundle *attrs)
3363{
3364 struct mlx5_ib_dev *mdev = to_mdev(device);
3365 struct ib_uverbs_flow_action_esp_keymat_aes_gcm *aes_gcm;
3366 struct mlx5_accel_esp_xfrm_attrs accel_attrs = {};
3367 struct mlx5_ib_flow_action *action;
3368 u64 action_flags;
3369 u64 flags;
3370 int err = 0;
3371
3372 if (IS_UVERBS_COPY_ERR(uverbs_copy_from(&action_flags, attrs,
3373 MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS)))
3374 return ERR_PTR(-EFAULT);
3375
3376 if (action_flags >= (MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED << 1))
3377 return ERR_PTR(-EOPNOTSUPP);
3378
3379 flags = mlx5_ib_flow_action_flags_to_accel_xfrm_flags(action_flags);
3380
3381 /* We current only support a subset of the standard features. Only a
3382 * keymat of type AES_GCM, with icv_len == 16, iv_algo == SEQ and esn
3383 * (with overlap). Full offload mode isn't supported.
3384 */
3385 if (!attr->keymat || attr->replay || attr->encap ||
3386 attr->spi || attr->seq || attr->tfc_pad ||
3387 attr->hard_limit_pkts ||
3388 (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
3389 IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT)))
3390 return ERR_PTR(-EOPNOTSUPP);
3391
3392 if (attr->keymat->protocol !=
3393 IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM)
3394 return ERR_PTR(-EOPNOTSUPP);
3395
3396 aes_gcm = &attr->keymat->keymat.aes_gcm;
3397
3398 if (aes_gcm->icv_len != 16 ||
3399 aes_gcm->iv_algo != IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ)
3400 return ERR_PTR(-EOPNOTSUPP);
3401
3402 action = kmalloc(sizeof(*action), GFP_KERNEL);
3403 if (!action)
3404 return ERR_PTR(-ENOMEM);
3405
3406 action->esp_aes_gcm.ib_flags = attr->flags;
3407 memcpy(&accel_attrs.keymat.aes_gcm.aes_key, &aes_gcm->aes_key,
3408 sizeof(accel_attrs.keymat.aes_gcm.aes_key));
3409 accel_attrs.keymat.aes_gcm.key_len = aes_gcm->key_len * 8;
3410 memcpy(&accel_attrs.keymat.aes_gcm.salt, &aes_gcm->salt,
3411 sizeof(accel_attrs.keymat.aes_gcm.salt));
3412 memcpy(&accel_attrs.keymat.aes_gcm.seq_iv, &aes_gcm->iv,
3413 sizeof(accel_attrs.keymat.aes_gcm.seq_iv));
3414 accel_attrs.keymat.aes_gcm.icv_len = aes_gcm->icv_len * 8;
3415 accel_attrs.keymat.aes_gcm.iv_algo = MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ;
3416 accel_attrs.keymat_type = MLX5_ACCEL_ESP_KEYMAT_AES_GCM;
3417
3418 accel_attrs.esn = attr->esn;
3419 if (attr->flags & IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED)
3420 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED;
3421 if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)
3422 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
3423
3424 if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT)
3425 accel_attrs.action |= MLX5_ACCEL_ESP_ACTION_ENCRYPT;
3426
3427 action->esp_aes_gcm.ctx =
3428 mlx5_accel_esp_create_xfrm(mdev->mdev, &accel_attrs, flags);
3429 if (IS_ERR(action->esp_aes_gcm.ctx)) {
3430 err = PTR_ERR(action->esp_aes_gcm.ctx);
3431 goto err_parse;
3432 }
3433
3434 action->esp_aes_gcm.ib_flags = attr->flags;
3435
3436 return &action->ib_action;
3437
3438err_parse:
3439 kfree(action);
3440 return ERR_PTR(err);
3441}
3442
3443static int
3444mlx5_ib_modify_flow_action_esp(struct ib_flow_action *action,
3445 const struct ib_flow_action_attrs_esp *attr,
3446 struct uverbs_attr_bundle *attrs)
3447{
3448 struct mlx5_ib_flow_action *maction = to_mflow_act(action);
3449 struct mlx5_accel_esp_xfrm_attrs accel_attrs;
3450 int err = 0;
3451
3452 if (attr->keymat || attr->replay || attr->encap ||
3453 attr->spi || attr->seq || attr->tfc_pad ||
3454 attr->hard_limit_pkts ||
3455 (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
3456 IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS |
3457 IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)))
3458 return -EOPNOTSUPP;
3459
3460 /* Only the ESN value or the MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP can
3461 * be modified.
3462 */
3463 if (!(maction->esp_aes_gcm.ib_flags &
3464 IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED) &&
3465 attr->flags & (IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
3466 IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW))
3467 return -EINVAL;
3468
3469 memcpy(&accel_attrs, &maction->esp_aes_gcm.ctx->attrs,
3470 sizeof(accel_attrs));
3471
3472 accel_attrs.esn = attr->esn;
3473 if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)
3474 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
3475 else
3476 accel_attrs.flags &= ~MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
3477
3478 err = mlx5_accel_esp_modify_xfrm(maction->esp_aes_gcm.ctx,
3479 &accel_attrs);
3480 if (err)
3481 return err;
3482
3483 maction->esp_aes_gcm.ib_flags &=
3484 ~IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW;
3485 maction->esp_aes_gcm.ib_flags |=
3486 attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW;
3487
3488 return 0;
3489}
3490
3491static int mlx5_ib_destroy_flow_action(struct ib_flow_action *action)
3492{
3493 struct mlx5_ib_flow_action *maction = to_mflow_act(action);
3494
3495 switch (action->type) {
3496 case IB_FLOW_ACTION_ESP:
3497 /*
3498 * We only support aes_gcm by now, so we implicitly know this is
3499 * the underline crypto.
3500 */
3501 mlx5_accel_esp_destroy_xfrm(maction->esp_aes_gcm.ctx);
3502 break;
3503 default:
3504 WARN_ON(true);
3505 break;
3506 }
3507
3508 kfree(maction);
3509 return 0;
3510}
3511
3067static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
3068{
3069 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
3070 struct mlx5_ib_qp *mqp = to_mqp(ibqp);
3071 int err;
3072
3073 if (mqp->flags & MLX5_IB_QP_UNDERLAY) {
3074 mlx5_ib_dbg(dev, "Attaching a multi cast group to underlay QP is not supported\n");

--- 183 unchanged lines hidden (view full) ---

3258
3259static void mlx5_ib_handle_event(struct work_struct *_work)
3260{
3261 struct mlx5_ib_event_work *work =
3262 container_of(_work, struct mlx5_ib_event_work, work);
3263 struct mlx5_ib_dev *ibdev;
3264 struct ib_event ibev;
3265 bool fatal = false;
3512static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
3513{
3514 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
3515 struct mlx5_ib_qp *mqp = to_mqp(ibqp);
3516 int err;
3517
3518 if (mqp->flags & MLX5_IB_QP_UNDERLAY) {
3519 mlx5_ib_dbg(dev, "Attaching a multi cast group to underlay QP is not supported\n");

--- 183 unchanged lines hidden (view full) ---

3703
3704static void mlx5_ib_handle_event(struct work_struct *_work)
3705{
3706 struct mlx5_ib_event_work *work =
3707 container_of(_work, struct mlx5_ib_event_work, work);
3708 struct mlx5_ib_dev *ibdev;
3709 struct ib_event ibev;
3710 bool fatal = false;
3266 u8 port = 0;
3711 u8 port = (u8)work->param;
3267
3268 if (mlx5_core_is_mp_slave(work->dev)) {
3269 ibdev = mlx5_ib_get_ibdev_from_mpi(work->context);
3270 if (!ibdev)
3271 goto out;
3272 } else {
3273 ibdev = work->context;
3274 }
3275
3276 switch (work->event) {
3277 case MLX5_DEV_EVENT_SYS_ERROR:
3278 ibev.event = IB_EVENT_DEVICE_FATAL;
3279 mlx5_ib_handle_internal_error(ibdev);
3280 fatal = true;
3281 break;
3282
3283 case MLX5_DEV_EVENT_PORT_UP:
3284 case MLX5_DEV_EVENT_PORT_DOWN:
3285 case MLX5_DEV_EVENT_PORT_INITIALIZED:
3712
3713 if (mlx5_core_is_mp_slave(work->dev)) {
3714 ibdev = mlx5_ib_get_ibdev_from_mpi(work->context);
3715 if (!ibdev)
3716 goto out;
3717 } else {
3718 ibdev = work->context;
3719 }
3720
3721 switch (work->event) {
3722 case MLX5_DEV_EVENT_SYS_ERROR:
3723 ibev.event = IB_EVENT_DEVICE_FATAL;
3724 mlx5_ib_handle_internal_error(ibdev);
3725 fatal = true;
3726 break;
3727
3728 case MLX5_DEV_EVENT_PORT_UP:
3729 case MLX5_DEV_EVENT_PORT_DOWN:
3730 case MLX5_DEV_EVENT_PORT_INITIALIZED:
3286 port = (u8)work->param;
3287
3288 /* In RoCE, port up/down events are handled in
3289 * mlx5_netdev_event().
3290 */
3291 if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
3292 IB_LINK_LAYER_ETHERNET)
3293 goto out;
3294
3295 ibev.event = (work->event == MLX5_DEV_EVENT_PORT_UP) ?
3296 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
3297 break;
3298
3299 case MLX5_DEV_EVENT_LID_CHANGE:
3300 ibev.event = IB_EVENT_LID_CHANGE;
3731 /* In RoCE, port up/down events are handled in
3732 * mlx5_netdev_event().
3733 */
3734 if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
3735 IB_LINK_LAYER_ETHERNET)
3736 goto out;
3737
3738 ibev.event = (work->event == MLX5_DEV_EVENT_PORT_UP) ?
3739 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
3740 break;
3741
3742 case MLX5_DEV_EVENT_LID_CHANGE:
3743 ibev.event = IB_EVENT_LID_CHANGE;
3301 port = (u8)work->param;
3302 break;
3303
3304 case MLX5_DEV_EVENT_PKEY_CHANGE:
3305 ibev.event = IB_EVENT_PKEY_CHANGE;
3744 break;
3745
3746 case MLX5_DEV_EVENT_PKEY_CHANGE:
3747 ibev.event = IB_EVENT_PKEY_CHANGE;
3306 port = (u8)work->param;
3307
3308 schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
3309 break;
3310
3311 case MLX5_DEV_EVENT_GUID_CHANGE:
3312 ibev.event = IB_EVENT_GID_CHANGE;
3748 schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
3749 break;
3750
3751 case MLX5_DEV_EVENT_GUID_CHANGE:
3752 ibev.event = IB_EVENT_GID_CHANGE;
3313 port = (u8)work->param;
3314 break;
3315
3316 case MLX5_DEV_EVENT_CLIENT_REREG:
3317 ibev.event = IB_EVENT_CLIENT_REREGISTER;
3753 break;
3754
3755 case MLX5_DEV_EVENT_CLIENT_REREG:
3756 ibev.event = IB_EVENT_CLIENT_REREGISTER;
3318 port = (u8)work->param;
3319 break;
3320 case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT:
3321 schedule_work(&ibdev->delay_drop.delay_drop_work);
3322 goto out;
3323 default:
3324 goto out;
3325 }
3326
3327 ibev.device = &ibdev->ib_dev;
3328 ibev.element.port_num = port;
3329
3757 break;
3758 case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT:
3759 schedule_work(&ibdev->delay_drop.delay_drop_work);
3760 goto out;
3761 default:
3762 goto out;
3763 }
3764
3765 ibev.device = &ibdev->ib_dev;
3766 ibev.element.port_num = port;
3767
3330 if (port < 1 || port > ibdev->num_ports) {
3768 if (!rdma_is_port_valid(&ibdev->ib_dev, port)) {
3331 mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
3332 goto out;
3333 }
3334
3335 if (ibdev->ib_active)
3336 ib_dispatch_event(&ibev);
3337
3338 if (fatal)

--- 428 unchanged lines hidden (view full) ---

3767 immutable->gid_tbl_len = attr.gid_tbl_len;
3768 immutable->core_cap_flags = get_core_cap_flags(ibdev);
3769 if ((ll == IB_LINK_LAYER_INFINIBAND) || MLX5_CAP_GEN(dev->mdev, roce))
3770 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
3771
3772 return 0;
3773}
3774
3769 mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
3770 goto out;
3771 }
3772
3773 if (ibdev->ib_active)
3774 ib_dispatch_event(&ibev);
3775
3776 if (fatal)

--- 428 unchanged lines hidden (view full) ---

4205 immutable->gid_tbl_len = attr.gid_tbl_len;
4206 immutable->core_cap_flags = get_core_cap_flags(ibdev);
4207 if ((ll == IB_LINK_LAYER_INFINIBAND) || MLX5_CAP_GEN(dev->mdev, roce))
4208 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
4209
4210 return 0;
4211}
4212
4213static int mlx5_port_rep_immutable(struct ib_device *ibdev, u8 port_num,
4214 struct ib_port_immutable *immutable)
4215{
4216 struct ib_port_attr attr;
4217 int err;
4218
4219 immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
4220
4221 err = ib_query_port(ibdev, port_num, &attr);
4222 if (err)
4223 return err;
4224
4225 immutable->pkey_tbl_len = attr.pkey_tbl_len;
4226 immutable->gid_tbl_len = attr.gid_tbl_len;
4227 immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
4228
4229 return 0;
4230}
4231
3775static void get_dev_fw_str(struct ib_device *ibdev, char *str)
3776{
3777 struct mlx5_ib_dev *dev =
3778 container_of(ibdev, struct mlx5_ib_dev, ib_dev);
3779 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%04d",
3780 fw_rev_maj(dev->mdev), fw_rev_min(dev->mdev),
3781 fw_rev_sub(dev->mdev));
3782}

--- 14 unchanged lines hidden (view full) ---

3797 return err;
3798
3799 ft = mlx5_create_lag_demux_flow_table(ns, 0, 0);
3800 if (IS_ERR(ft)) {
3801 err = PTR_ERR(ft);
3802 goto err_destroy_vport_lag;
3803 }
3804
4232static void get_dev_fw_str(struct ib_device *ibdev, char *str)
4233{
4234 struct mlx5_ib_dev *dev =
4235 container_of(ibdev, struct mlx5_ib_dev, ib_dev);
4236 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%04d",
4237 fw_rev_maj(dev->mdev), fw_rev_min(dev->mdev),
4238 fw_rev_sub(dev->mdev));
4239}

--- 14 unchanged lines hidden (view full) ---

4254 return err;
4255
4256 ft = mlx5_create_lag_demux_flow_table(ns, 0, 0);
4257 if (IS_ERR(ft)) {
4258 err = PTR_ERR(ft);
4259 goto err_destroy_vport_lag;
4260 }
4261
3805 dev->flow_db.lag_demux_ft = ft;
4262 dev->flow_db->lag_demux_ft = ft;
3806 return 0;
3807
3808err_destroy_vport_lag:
3809 mlx5_cmd_destroy_vport_lag(mdev);
3810 return err;
3811}
3812
3813static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
3814{
3815 struct mlx5_core_dev *mdev = dev->mdev;
3816
4263 return 0;
4264
4265err_destroy_vport_lag:
4266 mlx5_cmd_destroy_vport_lag(mdev);
4267 return err;
4268}
4269
4270static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
4271{
4272 struct mlx5_core_dev *mdev = dev->mdev;
4273
3817 if (dev->flow_db.lag_demux_ft) {
3818 mlx5_destroy_flow_table(dev->flow_db.lag_demux_ft);
3819 dev->flow_db.lag_demux_ft = NULL;
4274 if (dev->flow_db->lag_demux_ft) {
4275 mlx5_destroy_flow_table(dev->flow_db->lag_demux_ft);
4276 dev->flow_db->lag_demux_ft = NULL;
3820
3821 mlx5_cmd_destroy_vport_lag(mdev);
3822 }
3823}
3824
3825static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
3826{
3827 int err;

--- 15 unchanged lines hidden (view full) ---

3843 dev->roce[port_num].nb.notifier_call = NULL;
3844 }
3845}
3846
3847static int mlx5_enable_eth(struct mlx5_ib_dev *dev, u8 port_num)
3848{
3849 int err;
3850
4277
4278 mlx5_cmd_destroy_vport_lag(mdev);
4279 }
4280}
4281
4282static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
4283{
4284 int err;

--- 15 unchanged lines hidden (view full) ---

4300 dev->roce[port_num].nb.notifier_call = NULL;
4301 }
4302}
4303
4304static int mlx5_enable_eth(struct mlx5_ib_dev *dev, u8 port_num)
4305{
4306 int err;
4307
3851 err = mlx5_add_netdev_notifier(dev, port_num);
3852 if (err)
3853 return err;
3854
3855 if (MLX5_CAP_GEN(dev->mdev, roce)) {
3856 err = mlx5_nic_vport_enable_roce(dev->mdev);
3857 if (err)
4308 if (MLX5_CAP_GEN(dev->mdev, roce)) {
4309 err = mlx5_nic_vport_enable_roce(dev->mdev);
4310 if (err)
3858 goto err_unregister_netdevice_notifier;
4311 return err;
3859 }
3860
3861 err = mlx5_eth_lag_init(dev);
3862 if (err)
3863 goto err_disable_roce;
3864
3865 return 0;
3866
3867err_disable_roce:
3868 if (MLX5_CAP_GEN(dev->mdev, roce))
3869 mlx5_nic_vport_disable_roce(dev->mdev);
3870
4312 }
4313
4314 err = mlx5_eth_lag_init(dev);
4315 if (err)
4316 goto err_disable_roce;
4317
4318 return 0;
4319
4320err_disable_roce:
4321 if (MLX5_CAP_GEN(dev->mdev, roce))
4322 mlx5_nic_vport_disable_roce(dev->mdev);
4323
3871err_unregister_netdevice_notifier:
3872 mlx5_remove_netdev_notifier(dev, port_num);
3873 return err;
3874}
3875
3876static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
3877{
3878 mlx5_eth_lag_cleanup(dev);
3879 if (MLX5_CAP_GEN(dev->mdev, roce))
3880 mlx5_nic_vport_disable_roce(dev->mdev);

--- 617 unchanged lines hidden (view full) ---

4498
4499 mlx5_ib_dbg(dev, "removing from devlist\n");
4500 list_del(&dev->ib_dev_list);
4501 mutex_unlock(&mlx5_ib_multiport_mutex);
4502
4503 mlx5_nic_vport_disable_roce(dev->mdev);
4504}
4505
4324 return err;
4325}
4326
4327static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
4328{
4329 mlx5_eth_lag_cleanup(dev);
4330 if (MLX5_CAP_GEN(dev->mdev, roce))
4331 mlx5_nic_vport_disable_roce(dev->mdev);

--- 617 unchanged lines hidden (view full) ---

4949
4950 mlx5_ib_dbg(dev, "removing from devlist\n");
4951 list_del(&dev->ib_dev_list);
4952 mutex_unlock(&mlx5_ib_multiport_mutex);
4953
4954 mlx5_nic_vport_disable_roce(dev->mdev);
4955}
4956
4506static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
4957ADD_UVERBS_ATTRIBUTES_SIMPLE(mlx5_ib_dm, UVERBS_OBJECT_DM,
4958 UVERBS_METHOD_DM_ALLOC,
4959 &UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
4960 UVERBS_ATTR_TYPE(u64),
4961 UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
4962 &UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
4963 UVERBS_ATTR_TYPE(u16),
4964 UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
4965
4966ADD_UVERBS_ATTRIBUTES_SIMPLE(mlx5_ib_flow_action, UVERBS_OBJECT_FLOW_ACTION,
4967 UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
4968 &UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
4969 UVERBS_ATTR_TYPE(u64),
4970 UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
4971
4972#define NUM_TREES 2
4973static int populate_specs_root(struct mlx5_ib_dev *dev)
4507{
4974{
4975 const struct uverbs_object_tree_def *default_root[NUM_TREES + 1] = {
4976 uverbs_default_get_objects()};
4977 size_t num_trees = 1;
4978
4979 if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
4980 !WARN_ON(num_trees >= ARRAY_SIZE(default_root)))
4981 default_root[num_trees++] = &mlx5_ib_flow_action;
4982
4983 if (MLX5_CAP_DEV_MEM(dev->mdev, memic) &&
4984 !WARN_ON(num_trees >= ARRAY_SIZE(default_root)))
4985 default_root[num_trees++] = &mlx5_ib_dm;
4986
4987 dev->ib_dev.specs_root =
4988 uverbs_alloc_spec_tree(num_trees, default_root);
4989
4990 return PTR_ERR_OR_ZERO(dev->ib_dev.specs_root);
4991}
4992
4993static void depopulate_specs_root(struct mlx5_ib_dev *dev)
4994{
4995 uverbs_free_spec_tree(dev->ib_dev.specs_root);
4996}
4997
4998void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
4999{
4508 mlx5_ib_cleanup_multiport_master(dev);
4509#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
4510 cleanup_srcu_struct(&dev->mr_srcu);
4511#endif
4512 kfree(dev->port);
4513}
4514
5000 mlx5_ib_cleanup_multiport_master(dev);
5001#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
5002 cleanup_srcu_struct(&dev->mr_srcu);
5003#endif
5004 kfree(dev->port);
5005}
5006
4515static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
5007int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
4516{
4517 struct mlx5_core_dev *mdev = dev->mdev;
4518 const char *name;
4519 int err;
4520 int i;
4521
4522 dev->port = kcalloc(dev->num_ports, sizeof(*dev->port),
4523 GFP_KERNEL);

--- 5 unchanged lines hidden (view full) ---

4529 rwlock_init(&dev->roce[i].netdev_lock);
4530 }
4531
4532 err = mlx5_ib_init_multiport_master(dev);
4533 if (err)
4534 goto err_free_port;
4535
4536 if (!mlx5_core_mp_enabled(mdev)) {
5008{
5009 struct mlx5_core_dev *mdev = dev->mdev;
5010 const char *name;
5011 int err;
5012 int i;
5013
5014 dev->port = kcalloc(dev->num_ports, sizeof(*dev->port),
5015 GFP_KERNEL);

--- 5 unchanged lines hidden (view full) ---

5021 rwlock_init(&dev->roce[i].netdev_lock);
5022 }
5023
5024 err = mlx5_ib_init_multiport_master(dev);
5025 if (err)
5026 goto err_free_port;
5027
5028 if (!mlx5_core_mp_enabled(mdev)) {
4537 int i;
4538
4539 for (i = 1; i <= dev->num_ports; i++) {
4540 err = get_port_caps(dev, i);
4541 if (err)
4542 break;
4543 }
4544 } else {
4545 err = get_port_caps(dev, mlx5_core_native_port_num(mdev));
4546 }

--- 12 unchanged lines hidden (view full) ---

4559 dev->ib_dev.owner = THIS_MODULE;
4560 dev->ib_dev.node_type = RDMA_NODE_IB_CA;
4561 dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
4562 dev->ib_dev.phys_port_cnt = dev->num_ports;
4563 dev->ib_dev.num_comp_vectors =
4564 dev->mdev->priv.eq_table.num_comp_vectors;
4565 dev->ib_dev.dev.parent = &mdev->pdev->dev;
4566
5029 for (i = 1; i <= dev->num_ports; i++) {
5030 err = get_port_caps(dev, i);
5031 if (err)
5032 break;
5033 }
5034 } else {
5035 err = get_port_caps(dev, mlx5_core_native_port_num(mdev));
5036 }

--- 12 unchanged lines hidden (view full) ---

5049 dev->ib_dev.owner = THIS_MODULE;
5050 dev->ib_dev.node_type = RDMA_NODE_IB_CA;
5051 dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
5052 dev->ib_dev.phys_port_cnt = dev->num_ports;
5053 dev->ib_dev.num_comp_vectors =
5054 dev->mdev->priv.eq_table.num_comp_vectors;
5055 dev->ib_dev.dev.parent = &mdev->pdev->dev;
5056
4567 mutex_init(&dev->flow_db.lock);
4568 mutex_init(&dev->cap_mask_mutex);
4569 INIT_LIST_HEAD(&dev->qp_list);
4570 spin_lock_init(&dev->reset_flow_resource_lock);
4571
5057 mutex_init(&dev->cap_mask_mutex);
5058 INIT_LIST_HEAD(&dev->qp_list);
5059 spin_lock_init(&dev->reset_flow_resource_lock);
5060
5061 spin_lock_init(&dev->memic.memic_lock);
5062 dev->memic.dev = mdev;
5063
4572#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
4573 err = init_srcu_struct(&dev->mr_srcu);
4574 if (err)
4575 goto err_free_port;
4576#endif
4577
4578 return 0;
4579err_mp:
4580 mlx5_ib_cleanup_multiport_master(dev);
4581
4582err_free_port:
4583 kfree(dev->port);
4584
4585 return -ENOMEM;
4586}
4587
5064#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
5065 err = init_srcu_struct(&dev->mr_srcu);
5066 if (err)
5067 goto err_free_port;
5068#endif
5069
5070 return 0;
5071err_mp:
5072 mlx5_ib_cleanup_multiport_master(dev);
5073
5074err_free_port:
5075 kfree(dev->port);
5076
5077 return -ENOMEM;
5078}
5079
4588static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
5080static int mlx5_ib_stage_flow_db_init(struct mlx5_ib_dev *dev)
4589{
5081{
5082 dev->flow_db = kzalloc(sizeof(*dev->flow_db), GFP_KERNEL);
5083
5084 if (!dev->flow_db)
5085 return -ENOMEM;
5086
5087 mutex_init(&dev->flow_db->lock);
5088
5089 return 0;
5090}
5091
5092int mlx5_ib_stage_rep_flow_db_init(struct mlx5_ib_dev *dev)
5093{
5094 struct mlx5_ib_dev *nic_dev;
5095
5096 nic_dev = mlx5_ib_get_uplink_ibdev(dev->mdev->priv.eswitch);
5097
5098 if (!nic_dev)
5099 return -EINVAL;
5100
5101 dev->flow_db = nic_dev->flow_db;
5102
5103 return 0;
5104}
5105
5106static void mlx5_ib_stage_flow_db_cleanup(struct mlx5_ib_dev *dev)
5107{
5108 kfree(dev->flow_db);
5109}
5110
5111int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
5112{
4590 struct mlx5_core_dev *mdev = dev->mdev;
4591 int err;
4592
4593 dev->ib_dev.uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION;
4594 dev->ib_dev.uverbs_cmd_mask =
4595 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
4596 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
4597 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |

--- 23 unchanged lines hidden (view full) ---

4621 dev->ib_dev.uverbs_ex_cmd_mask =
4622 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
4623 (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
4624 (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP) |
4625 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP) |
4626 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ);
4627
4628 dev->ib_dev.query_device = mlx5_ib_query_device;
5113 struct mlx5_core_dev *mdev = dev->mdev;
5114 int err;
5115
5116 dev->ib_dev.uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION;
5117 dev->ib_dev.uverbs_cmd_mask =
5118 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
5119 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
5120 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |

--- 23 unchanged lines hidden (view full) ---

5144 dev->ib_dev.uverbs_ex_cmd_mask =
5145 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) |
5146 (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) |
5147 (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP) |
5148 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP) |
5149 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ);
5150
5151 dev->ib_dev.query_device = mlx5_ib_query_device;
4629 dev->ib_dev.query_port = mlx5_ib_query_port;
4630 dev->ib_dev.get_link_layer = mlx5_ib_port_link_layer;
4631 dev->ib_dev.query_gid = mlx5_ib_query_gid;
4632 dev->ib_dev.add_gid = mlx5_ib_add_gid;
4633 dev->ib_dev.del_gid = mlx5_ib_del_gid;
4634 dev->ib_dev.query_pkey = mlx5_ib_query_pkey;
4635 dev->ib_dev.modify_device = mlx5_ib_modify_device;
4636 dev->ib_dev.modify_port = mlx5_ib_modify_port;
4637 dev->ib_dev.alloc_ucontext = mlx5_ib_alloc_ucontext;

--- 26 unchanged lines hidden (view full) ---

4664 dev->ib_dev.rereg_user_mr = mlx5_ib_rereg_user_mr;
4665 dev->ib_dev.dereg_mr = mlx5_ib_dereg_mr;
4666 dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach;
4667 dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach;
4668 dev->ib_dev.process_mad = mlx5_ib_process_mad;
4669 dev->ib_dev.alloc_mr = mlx5_ib_alloc_mr;
4670 dev->ib_dev.map_mr_sg = mlx5_ib_map_mr_sg;
4671 dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
5152 dev->ib_dev.get_link_layer = mlx5_ib_port_link_layer;
5153 dev->ib_dev.query_gid = mlx5_ib_query_gid;
5154 dev->ib_dev.add_gid = mlx5_ib_add_gid;
5155 dev->ib_dev.del_gid = mlx5_ib_del_gid;
5156 dev->ib_dev.query_pkey = mlx5_ib_query_pkey;
5157 dev->ib_dev.modify_device = mlx5_ib_modify_device;
5158 dev->ib_dev.modify_port = mlx5_ib_modify_port;
5159 dev->ib_dev.alloc_ucontext = mlx5_ib_alloc_ucontext;

--- 26 unchanged lines hidden (view full) ---

5186 dev->ib_dev.rereg_user_mr = mlx5_ib_rereg_user_mr;
5187 dev->ib_dev.dereg_mr = mlx5_ib_dereg_mr;
5188 dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach;
5189 dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach;
5190 dev->ib_dev.process_mad = mlx5_ib_process_mad;
5191 dev->ib_dev.alloc_mr = mlx5_ib_alloc_mr;
5192 dev->ib_dev.map_mr_sg = mlx5_ib_map_mr_sg;
5193 dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
4672 dev->ib_dev.get_port_immutable = mlx5_port_immutable;
4673 dev->ib_dev.get_dev_fw_str = get_dev_fw_str;
4674 dev->ib_dev.get_vector_affinity = mlx5_ib_get_vector_affinity;
4675 if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads))
4676 dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev;
4677
4678 if (mlx5_core_is_pf(mdev)) {
4679 dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config;
4680 dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state;

--- 16 unchanged lines hidden (view full) ---

4697 if (MLX5_CAP_GEN(mdev, xrc)) {
4698 dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
4699 dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
4700 dev->ib_dev.uverbs_cmd_mask |=
4701 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
4702 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
4703 }
4704
5194 dev->ib_dev.get_dev_fw_str = get_dev_fw_str;
5195 dev->ib_dev.get_vector_affinity = mlx5_ib_get_vector_affinity;
5196 if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads))
5197 dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev;
5198
5199 if (mlx5_core_is_pf(mdev)) {
5200 dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config;
5201 dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state;

--- 16 unchanged lines hidden (view full) ---

5218 if (MLX5_CAP_GEN(mdev, xrc)) {
5219 dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
5220 dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
5221 dev->ib_dev.uverbs_cmd_mask |=
5222 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
5223 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
5224 }
5225
5226 if (MLX5_CAP_DEV_MEM(mdev, memic)) {
5227 dev->ib_dev.alloc_dm = mlx5_ib_alloc_dm;
5228 dev->ib_dev.dealloc_dm = mlx5_ib_dealloc_dm;
5229 }
5230
4705 dev->ib_dev.create_flow = mlx5_ib_create_flow;
4706 dev->ib_dev.destroy_flow = mlx5_ib_destroy_flow;
4707 dev->ib_dev.uverbs_ex_cmd_mask |=
4708 (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
4709 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
5231 dev->ib_dev.create_flow = mlx5_ib_create_flow;
5232 dev->ib_dev.destroy_flow = mlx5_ib_destroy_flow;
5233 dev->ib_dev.uverbs_ex_cmd_mask |=
5234 (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
5235 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
5236 dev->ib_dev.create_flow_action_esp = mlx5_ib_create_flow_action_esp;
5237 dev->ib_dev.destroy_flow_action = mlx5_ib_destroy_flow_action;
5238 dev->ib_dev.modify_flow_action_esp = mlx5_ib_modify_flow_action_esp;
5239 dev->ib_dev.driver_id = RDMA_DRIVER_MLX5;
4710
4711 err = init_node_data(dev);
4712 if (err)
4713 return err;
4714
4715 if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
4716 (MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) ||
4717 MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
4718 mutex_init(&dev->lb_mutex);
4719
4720 return 0;
4721}
4722
5240
5241 err = init_node_data(dev);
5242 if (err)
5243 return err;
5244
5245 if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
5246 (MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) ||
5247 MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
5248 mutex_init(&dev->lb_mutex);
5249
5250 return 0;
5251}
5252
5253static int mlx5_ib_stage_non_default_cb(struct mlx5_ib_dev *dev)
5254{
5255 dev->ib_dev.get_port_immutable = mlx5_port_immutable;
5256 dev->ib_dev.query_port = mlx5_ib_query_port;
5257
5258 return 0;
5259}
5260
5261int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev)
5262{
5263 dev->ib_dev.get_port_immutable = mlx5_port_rep_immutable;
5264 dev->ib_dev.query_port = mlx5_ib_rep_query_port;
5265
5266 return 0;
5267}
5268
5269static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev,
5270 u8 port_num)
5271{
5272 int i;
5273
5274 for (i = 0; i < dev->num_ports; i++) {
5275 dev->roce[i].dev = dev;
5276 dev->roce[i].native_port_num = i + 1;
5277 dev->roce[i].last_port_state = IB_PORT_DOWN;
5278 }
5279
5280 dev->ib_dev.get_netdev = mlx5_ib_get_netdev;
5281 dev->ib_dev.create_wq = mlx5_ib_create_wq;
5282 dev->ib_dev.modify_wq = mlx5_ib_modify_wq;
5283 dev->ib_dev.destroy_wq = mlx5_ib_destroy_wq;
5284 dev->ib_dev.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table;
5285 dev->ib_dev.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table;
5286
5287 dev->ib_dev.uverbs_ex_cmd_mask |=
5288 (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
5289 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
5290 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
5291 (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
5292 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
5293
5294 return mlx5_add_netdev_notifier(dev, port_num);
5295}
5296
5297static void mlx5_ib_stage_common_roce_cleanup(struct mlx5_ib_dev *dev)
5298{
5299 u8 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
5300
5301 mlx5_remove_netdev_notifier(dev, port_num);
5302}
5303
5304int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev)
5305{
5306 struct mlx5_core_dev *mdev = dev->mdev;
5307 enum rdma_link_layer ll;
5308 int port_type_cap;
5309 int err = 0;
5310 u8 port_num;
5311
5312 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
5313 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
5314 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
5315
5316 if (ll == IB_LINK_LAYER_ETHERNET)
5317 err = mlx5_ib_stage_common_roce_init(dev, port_num);
5318
5319 return err;
5320}
5321
5322void mlx5_ib_stage_rep_roce_cleanup(struct mlx5_ib_dev *dev)
5323{
5324 mlx5_ib_stage_common_roce_cleanup(dev);
5325}
5326
4723static int mlx5_ib_stage_roce_init(struct mlx5_ib_dev *dev)
4724{
4725 struct mlx5_core_dev *mdev = dev->mdev;
4726 enum rdma_link_layer ll;
4727 int port_type_cap;
4728 u8 port_num;
4729 int err;
5327static int mlx5_ib_stage_roce_init(struct mlx5_ib_dev *dev)
5328{
5329 struct mlx5_core_dev *mdev = dev->mdev;
5330 enum rdma_link_layer ll;
5331 int port_type_cap;
5332 u8 port_num;
5333 int err;
4730 int i;
4731
4732 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
4733 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
4734 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
4735
4736 if (ll == IB_LINK_LAYER_ETHERNET) {
5334
5335 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
5336 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
5337 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
5338
5339 if (ll == IB_LINK_LAYER_ETHERNET) {
4737 for (i = 0; i < dev->num_ports; i++) {
4738 dev->roce[i].dev = dev;
4739 dev->roce[i].native_port_num = i + 1;
4740 dev->roce[i].last_port_state = IB_PORT_DOWN;
4741 }
5340 err = mlx5_ib_stage_common_roce_init(dev, port_num);
5341 if (err)
5342 return err;
4742
5343
4743 dev->ib_dev.get_netdev = mlx5_ib_get_netdev;
4744 dev->ib_dev.create_wq = mlx5_ib_create_wq;
4745 dev->ib_dev.modify_wq = mlx5_ib_modify_wq;
4746 dev->ib_dev.destroy_wq = mlx5_ib_destroy_wq;
4747 dev->ib_dev.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table;
4748 dev->ib_dev.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table;
4749 dev->ib_dev.uverbs_ex_cmd_mask |=
4750 (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
4751 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
4752 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
4753 (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
4754 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
4755 err = mlx5_enable_eth(dev, port_num);
4756 if (err)
5344 err = mlx5_enable_eth(dev, port_num);
5345 if (err)
4757 return err;
5346 goto cleanup;
4758 }
4759
4760 return 0;
5347 }
5348
5349 return 0;
5350cleanup:
5351 mlx5_ib_stage_common_roce_cleanup(dev);
5352
5353 return err;
4761}
4762
4763static void mlx5_ib_stage_roce_cleanup(struct mlx5_ib_dev *dev)
4764{
4765 struct mlx5_core_dev *mdev = dev->mdev;
4766 enum rdma_link_layer ll;
4767 int port_type_cap;
4768 u8 port_num;
4769
4770 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
4771 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
4772 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
4773
4774 if (ll == IB_LINK_LAYER_ETHERNET) {
4775 mlx5_disable_eth(dev);
5354}
5355
5356static void mlx5_ib_stage_roce_cleanup(struct mlx5_ib_dev *dev)
5357{
5358 struct mlx5_core_dev *mdev = dev->mdev;
5359 enum rdma_link_layer ll;
5360 int port_type_cap;
5361 u8 port_num;
5362
5363 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
5364 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
5365 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
5366
5367 if (ll == IB_LINK_LAYER_ETHERNET) {
5368 mlx5_disable_eth(dev);
4776 mlx5_remove_netdev_notifier(dev, port_num);
5369 mlx5_ib_stage_common_roce_cleanup(dev);
4777 }
4778}
4779
5370 }
5371}
5372
4780static int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev)
5373int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev)
4781{
4782 return create_dev_resources(&dev->devr);
4783}
4784
5374{
5375 return create_dev_resources(&dev->devr);
5376}
5377
4785static void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev)
5378void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev)
4786{
4787 destroy_dev_resources(&dev->devr);
4788}
4789
4790static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev)
4791{
4792 mlx5_ib_internal_fill_odp_caps(dev);
4793
4794 return mlx5_ib_odp_init_one(dev);
4795}
4796
5379{
5380 destroy_dev_resources(&dev->devr);
5381}
5382
5383static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev)
5384{
5385 mlx5_ib_internal_fill_odp_caps(dev);
5386
5387 return mlx5_ib_odp_init_one(dev);
5388}
5389
4797static int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev)
5390int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev)
4798{
4799 if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
4800 dev->ib_dev.get_hw_stats = mlx5_ib_get_hw_stats;
4801 dev->ib_dev.alloc_hw_stats = mlx5_ib_alloc_hw_stats;
4802
4803 return mlx5_ib_alloc_counters(dev);
4804 }
4805
4806 return 0;
4807}
4808
5391{
5392 if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
5393 dev->ib_dev.get_hw_stats = mlx5_ib_get_hw_stats;
5394 dev->ib_dev.alloc_hw_stats = mlx5_ib_alloc_hw_stats;
5395
5396 return mlx5_ib_alloc_counters(dev);
5397 }
5398
5399 return 0;
5400}
5401
4809static void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev)
5402void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev)
4810{
4811 if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
4812 mlx5_ib_dealloc_counters(dev);
4813}
4814
4815static int mlx5_ib_stage_cong_debugfs_init(struct mlx5_ib_dev *dev)
4816{
4817 return mlx5_ib_init_cong_debugfs(dev,

--- 14 unchanged lines hidden (view full) ---

4832 return 0;
4833}
4834
4835static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev)
4836{
4837 mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
4838}
4839
5403{
5404 if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
5405 mlx5_ib_dealloc_counters(dev);
5406}
5407
5408static int mlx5_ib_stage_cong_debugfs_init(struct mlx5_ib_dev *dev)
5409{
5410 return mlx5_ib_init_cong_debugfs(dev,

--- 14 unchanged lines hidden (view full) ---

5425 return 0;
5426}
5427
5428static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev)
5429{
5430 mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
5431}
5432
4840static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
5433int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
4841{
4842 int err;
4843
4844 err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
4845 if (err)
4846 return err;
4847
4848 err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
4849 if (err)
4850 mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
4851
4852 return err;
4853}
4854
5434{
5435 int err;
5436
5437 err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
5438 if (err)
5439 return err;
5440
5441 err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
5442 if (err)
5443 mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
5444
5445 return err;
5446}
5447
4855static void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev)
5448void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev)
4856{
4857 mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
4858 mlx5_free_bfreg(dev->mdev, &dev->bfreg);
4859}
4860
5449{
5450 mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
5451 mlx5_free_bfreg(dev->mdev, &dev->bfreg);
5452}
5453
4861static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
5454static int mlx5_ib_stage_populate_specs(struct mlx5_ib_dev *dev)
4862{
5455{
4863 return ib_register_device(&dev->ib_dev, NULL);
5456 return populate_specs_root(dev);
4864}
4865
5457}
5458
4866static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
5459int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
4867{
5460{
4868 ib_unregister_device(&dev->ib_dev);
5461 return ib_register_device(&dev->ib_dev, NULL);
4869}
4870
5462}
5463
4871static int mlx5_ib_stage_umr_res_init(struct mlx5_ib_dev *dev)
5464static void mlx5_ib_stage_depopulate_specs(struct mlx5_ib_dev *dev)
4872{
5465{
4873 return create_umr_res(dev);
5466 depopulate_specs_root(dev);
4874}
4875
5467}
5468
4876static void mlx5_ib_stage_umr_res_cleanup(struct mlx5_ib_dev *dev)
5469void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
4877{
4878 destroy_umrc_res(dev);
4879}
4880
5470{
5471 destroy_umrc_res(dev);
5472}
5473
5474void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
5475{
5476 ib_unregister_device(&dev->ib_dev);
5477}
5478
5479int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
5480{
5481 return create_umr_res(dev);
5482}
5483
4881static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev)
4882{
4883 init_delay_drop(dev);
4884
4885 return 0;
4886}
4887
4888static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev)
4889{
4890 cancel_delay_drop(dev);
4891}
4892
5484static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev)
5485{
5486 init_delay_drop(dev);
5487
5488 return 0;
5489}
5490
5491static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev)
5492{
5493 cancel_delay_drop(dev);
5494}
5495
4893static int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev)
5496int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev)
4894{
4895 int err;
4896 int i;
4897
4898 for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) {
4899 err = device_create_file(&dev->ib_dev.dev,
4900 mlx5_class_attributes[i]);
4901 if (err)
4902 return err;
4903 }
4904
4905 return 0;
4906}
4907
5497{
5498 int err;
5499 int i;
5500
5501 for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) {
5502 err = device_create_file(&dev->ib_dev.dev,
5503 mlx5_class_attributes[i]);
5504 if (err)
5505 return err;
5506 }
5507
5508 return 0;
5509}
5510
4908static void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
4909 const struct mlx5_ib_profile *profile,
4910 int stage)
5511static int mlx5_ib_stage_rep_reg_init(struct mlx5_ib_dev *dev)
4911{
5512{
5513 mlx5_ib_register_vport_reps(dev);
5514
5515 return 0;
5516}
5517
5518static void mlx5_ib_stage_rep_reg_cleanup(struct mlx5_ib_dev *dev)
5519{
5520 mlx5_ib_unregister_vport_reps(dev);
5521}
5522
5523void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
5524 const struct mlx5_ib_profile *profile,
5525 int stage)
5526{
4912 /* Number of stages to cleanup */
4913 while (stage) {
4914 stage--;
4915 if (profile->stage[stage].cleanup)
4916 profile->stage[stage].cleanup(dev);
4917 }
4918
4919 ib_dealloc_device((struct ib_device *)dev);
4920}
4921
4922static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev, u8 port_num);
4923
5527 /* Number of stages to cleanup */
5528 while (stage) {
5529 stage--;
5530 if (profile->stage[stage].cleanup)
5531 profile->stage[stage].cleanup(dev);
5532 }
5533
5534 ib_dealloc_device((struct ib_device *)dev);
5535}
5536
5537static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev, u8 port_num);
5538
4924static void *__mlx5_ib_add(struct mlx5_core_dev *mdev,
4925 const struct mlx5_ib_profile *profile)
5539void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
5540 const struct mlx5_ib_profile *profile)
4926{
5541{
4927 struct mlx5_ib_dev *dev;
4928 int err;
4929 int i;
4930
4931 printk_once(KERN_INFO "%s", mlx5_version);
4932
5542 int err;
5543 int i;
5544
5545 printk_once(KERN_INFO "%s", mlx5_version);
5546
4933 dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
4934 if (!dev)
4935 return NULL;
4936
4937 dev->mdev = mdev;
4938 dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
4939 MLX5_CAP_GEN(mdev, num_vhca_ports));
4940
4941 for (i = 0; i < MLX5_IB_STAGE_MAX; i++) {
4942 if (profile->stage[i].init) {
4943 err = profile->stage[i].init(dev);
4944 if (err)
4945 goto err_out;
4946 }
4947 }
4948

--- 7 unchanged lines hidden (view full) ---

4956
4957 return NULL;
4958}
4959
4960static const struct mlx5_ib_profile pf_profile = {
4961 STAGE_CREATE(MLX5_IB_STAGE_INIT,
4962 mlx5_ib_stage_init_init,
4963 mlx5_ib_stage_init_cleanup),
5547 for (i = 0; i < MLX5_IB_STAGE_MAX; i++) {
5548 if (profile->stage[i].init) {
5549 err = profile->stage[i].init(dev);
5550 if (err)
5551 goto err_out;
5552 }
5553 }
5554

--- 7 unchanged lines hidden (view full) ---

5562
5563 return NULL;
5564}
5565
5566static const struct mlx5_ib_profile pf_profile = {
5567 STAGE_CREATE(MLX5_IB_STAGE_INIT,
5568 mlx5_ib_stage_init_init,
5569 mlx5_ib_stage_init_cleanup),
5570 STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB,
5571 mlx5_ib_stage_flow_db_init,
5572 mlx5_ib_stage_flow_db_cleanup),
4964 STAGE_CREATE(MLX5_IB_STAGE_CAPS,
4965 mlx5_ib_stage_caps_init,
4966 NULL),
5573 STAGE_CREATE(MLX5_IB_STAGE_CAPS,
5574 mlx5_ib_stage_caps_init,
5575 NULL),
5576 STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
5577 mlx5_ib_stage_non_default_cb,
5578 NULL),
4967 STAGE_CREATE(MLX5_IB_STAGE_ROCE,
4968 mlx5_ib_stage_roce_init,
4969 mlx5_ib_stage_roce_cleanup),
4970 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
4971 mlx5_ib_stage_dev_res_init,
4972 mlx5_ib_stage_dev_res_cleanup),
4973 STAGE_CREATE(MLX5_IB_STAGE_ODP,
4974 mlx5_ib_stage_odp_init,

--- 5 unchanged lines hidden (view full) ---

4980 mlx5_ib_stage_cong_debugfs_init,
4981 mlx5_ib_stage_cong_debugfs_cleanup),
4982 STAGE_CREATE(MLX5_IB_STAGE_UAR,
4983 mlx5_ib_stage_uar_init,
4984 mlx5_ib_stage_uar_cleanup),
4985 STAGE_CREATE(MLX5_IB_STAGE_BFREG,
4986 mlx5_ib_stage_bfrag_init,
4987 mlx5_ib_stage_bfrag_cleanup),
5579 STAGE_CREATE(MLX5_IB_STAGE_ROCE,
5580 mlx5_ib_stage_roce_init,
5581 mlx5_ib_stage_roce_cleanup),
5582 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
5583 mlx5_ib_stage_dev_res_init,
5584 mlx5_ib_stage_dev_res_cleanup),
5585 STAGE_CREATE(MLX5_IB_STAGE_ODP,
5586 mlx5_ib_stage_odp_init,

--- 5 unchanged lines hidden (view full) ---

5592 mlx5_ib_stage_cong_debugfs_init,
5593 mlx5_ib_stage_cong_debugfs_cleanup),
5594 STAGE_CREATE(MLX5_IB_STAGE_UAR,
5595 mlx5_ib_stage_uar_init,
5596 mlx5_ib_stage_uar_cleanup),
5597 STAGE_CREATE(MLX5_IB_STAGE_BFREG,
5598 mlx5_ib_stage_bfrag_init,
5599 mlx5_ib_stage_bfrag_cleanup),
5600 STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
5601 NULL,
5602 mlx5_ib_stage_pre_ib_reg_umr_cleanup),
5603 STAGE_CREATE(MLX5_IB_STAGE_SPECS,
5604 mlx5_ib_stage_populate_specs,
5605 mlx5_ib_stage_depopulate_specs),
4988 STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
4989 mlx5_ib_stage_ib_reg_init,
4990 mlx5_ib_stage_ib_reg_cleanup),
5606 STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
5607 mlx5_ib_stage_ib_reg_init,
5608 mlx5_ib_stage_ib_reg_cleanup),
4991 STAGE_CREATE(MLX5_IB_STAGE_UMR_RESOURCES,
4992 mlx5_ib_stage_umr_res_init,
4993 mlx5_ib_stage_umr_res_cleanup),
5609 STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
5610 mlx5_ib_stage_post_ib_reg_umr_init,
5611 NULL),
4994 STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
4995 mlx5_ib_stage_delay_drop_init,
4996 mlx5_ib_stage_delay_drop_cleanup),
4997 STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
4998 mlx5_ib_stage_class_attr_init,
4999 NULL),
5000};
5001
5612 STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
5613 mlx5_ib_stage_delay_drop_init,
5614 mlx5_ib_stage_delay_drop_cleanup),
5615 STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
5616 mlx5_ib_stage_class_attr_init,
5617 NULL),
5618};
5619
5620static const struct mlx5_ib_profile nic_rep_profile = {
5621 STAGE_CREATE(MLX5_IB_STAGE_INIT,
5622 mlx5_ib_stage_init_init,
5623 mlx5_ib_stage_init_cleanup),
5624 STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB,
5625 mlx5_ib_stage_flow_db_init,
5626 mlx5_ib_stage_flow_db_cleanup),
5627 STAGE_CREATE(MLX5_IB_STAGE_CAPS,
5628 mlx5_ib_stage_caps_init,
5629 NULL),
5630 STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
5631 mlx5_ib_stage_rep_non_default_cb,
5632 NULL),
5633 STAGE_CREATE(MLX5_IB_STAGE_ROCE,
5634 mlx5_ib_stage_rep_roce_init,
5635 mlx5_ib_stage_rep_roce_cleanup),
5636 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
5637 mlx5_ib_stage_dev_res_init,
5638 mlx5_ib_stage_dev_res_cleanup),
5639 STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
5640 mlx5_ib_stage_counters_init,
5641 mlx5_ib_stage_counters_cleanup),
5642 STAGE_CREATE(MLX5_IB_STAGE_UAR,
5643 mlx5_ib_stage_uar_init,
5644 mlx5_ib_stage_uar_cleanup),
5645 STAGE_CREATE(MLX5_IB_STAGE_BFREG,
5646 mlx5_ib_stage_bfrag_init,
5647 mlx5_ib_stage_bfrag_cleanup),
5648 STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
5649 NULL,
5650 mlx5_ib_stage_pre_ib_reg_umr_cleanup),
5651 STAGE_CREATE(MLX5_IB_STAGE_SPECS,
5652 mlx5_ib_stage_populate_specs,
5653 mlx5_ib_stage_depopulate_specs),
5654 STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
5655 mlx5_ib_stage_ib_reg_init,
5656 mlx5_ib_stage_ib_reg_cleanup),
5657 STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
5658 mlx5_ib_stage_post_ib_reg_umr_init,
5659 NULL),
5660 STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
5661 mlx5_ib_stage_class_attr_init,
5662 NULL),
5663 STAGE_CREATE(MLX5_IB_STAGE_REP_REG,
5664 mlx5_ib_stage_rep_reg_init,
5665 mlx5_ib_stage_rep_reg_cleanup),
5666};
5667
5002static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev, u8 port_num)
5003{
5004 struct mlx5_ib_multiport_info *mpi;
5005 struct mlx5_ib_dev *dev;
5006 bool bound = false;
5007 int err;
5008
5009 mpi = kzalloc(sizeof(*mpi), GFP_KERNEL);

--- 29 unchanged lines hidden (view full) ---

5039 mutex_unlock(&mlx5_ib_multiport_mutex);
5040
5041 return mpi;
5042}
5043
5044static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
5045{
5046 enum rdma_link_layer ll;
5668static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev, u8 port_num)
5669{
5670 struct mlx5_ib_multiport_info *mpi;
5671 struct mlx5_ib_dev *dev;
5672 bool bound = false;
5673 int err;
5674
5675 mpi = kzalloc(sizeof(*mpi), GFP_KERNEL);

--- 29 unchanged lines hidden (view full) ---

5705 mutex_unlock(&mlx5_ib_multiport_mutex);
5706
5707 return mpi;
5708}
5709
5710static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
5711{
5712 enum rdma_link_layer ll;
5713 struct mlx5_ib_dev *dev;
5047 int port_type_cap;
5048
5714 int port_type_cap;
5715
5716 printk_once(KERN_INFO "%s", mlx5_version);
5717
5049 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
5050 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
5051
5052 if (mlx5_core_is_mp_slave(mdev) && ll == IB_LINK_LAYER_ETHERNET) {
5053 u8 port_num = mlx5_core_native_port_num(mdev) - 1;
5054
5055 return mlx5_ib_add_slave_port(mdev, port_num);
5056 }
5057
5718 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
5719 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
5720
5721 if (mlx5_core_is_mp_slave(mdev) && ll == IB_LINK_LAYER_ETHERNET) {
5722 u8 port_num = mlx5_core_native_port_num(mdev) - 1;
5723
5724 return mlx5_ib_add_slave_port(mdev, port_num);
5725 }
5726
5058 return __mlx5_ib_add(mdev, &pf_profile);
5727 dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
5728 if (!dev)
5729 return NULL;
5730
5731 dev->mdev = mdev;
5732 dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
5733 MLX5_CAP_GEN(mdev, num_vhca_ports));
5734
5735 if (MLX5_VPORT_MANAGER(mdev) &&
5736 mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
5737 dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0);
5738
5739 return __mlx5_ib_add(dev, &nic_rep_profile);
5740 }
5741
5742 return __mlx5_ib_add(dev, &pf_profile);
5059}
5060
5061static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
5062{
5063 struct mlx5_ib_multiport_info *mpi;
5064 struct mlx5_ib_dev *dev;
5065
5066 if (mlx5_core_is_mp_slave(mdev)) {

--- 15 unchanged lines hidden (view full) ---

5082 .remove = mlx5_ib_remove,
5083 .event = mlx5_ib_event,
5084#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
5085 .pfault = mlx5_ib_pfault,
5086#endif
5087 .protocol = MLX5_INTERFACE_PROTOCOL_IB,
5088};
5089
5743}
5744
5745static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
5746{
5747 struct mlx5_ib_multiport_info *mpi;
5748 struct mlx5_ib_dev *dev;
5749
5750 if (mlx5_core_is_mp_slave(mdev)) {

--- 15 unchanged lines hidden (view full) ---

5766 .remove = mlx5_ib_remove,
5767 .event = mlx5_ib_event,
5768#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
5769 .pfault = mlx5_ib_pfault,
5770#endif
5771 .protocol = MLX5_INTERFACE_PROTOCOL_IB,
5772};
5773
5774unsigned long mlx5_ib_get_xlt_emergency_page(void)
5775{
5776 mutex_lock(&xlt_emergency_page_mutex);
5777 return xlt_emergency_page;
5778}
5779
5780void mlx5_ib_put_xlt_emergency_page(void)
5781{
5782 mutex_unlock(&xlt_emergency_page_mutex);
5783}
5784
5090static int __init mlx5_ib_init(void)
5091{
5092 int err;
5093
5785static int __init mlx5_ib_init(void)
5786{
5787 int err;
5788
5789 xlt_emergency_page = __get_free_page(GFP_KERNEL);
5790 if (!xlt_emergency_page)
5791 return -ENOMEM;
5792
5793 mutex_init(&xlt_emergency_page_mutex);
5794
5094 mlx5_ib_event_wq = alloc_ordered_workqueue("mlx5_ib_event_wq", 0);
5795 mlx5_ib_event_wq = alloc_ordered_workqueue("mlx5_ib_event_wq", 0);
5095 if (!mlx5_ib_event_wq)
5796 if (!mlx5_ib_event_wq) {
5797 free_page(xlt_emergency_page);
5096 return -ENOMEM;
5798 return -ENOMEM;
5799 }
5097
5098 mlx5_ib_odp_init();
5099
5100 err = mlx5_register_interface(&mlx5_ib_interface);
5101
5102 return err;
5103}
5104
5105static void __exit mlx5_ib_cleanup(void)
5106{
5107 mlx5_unregister_interface(&mlx5_ib_interface);
5108 destroy_workqueue(mlx5_ib_event_wq);
5800
5801 mlx5_ib_odp_init();
5802
5803 err = mlx5_register_interface(&mlx5_ib_interface);
5804
5805 return err;
5806}
5807
5808static void __exit mlx5_ib_cleanup(void)
5809{
5810 mlx5_unregister_interface(&mlx5_ib_interface);
5811 destroy_workqueue(mlx5_ib_event_wq);
5812 mutex_destroy(&xlt_emergency_page_mutex);
5813 free_page(xlt_emergency_page);
5109}
5110
5111module_init(mlx5_ib_init);
5112module_exit(mlx5_ib_cleanup);
5814}
5815
5816module_init(mlx5_ib_init);
5817module_exit(mlx5_ib_cleanup);