main.c (e093111ddb6c786e32b882108c1c08ef83d781f4) main.c (ec2558796d25e6024071b6bcb8e11392538d57bf)
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:

--- 16 unchanged lines hidden (view full) ---

25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:

--- 16 unchanged lines hidden (view full) ---

25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/debugfs.h>
34#include <linux/highmem.h>
35#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/errno.h>
38#include <linux/pci.h>
39#include <linux/dma-mapping.h>
40#include <linux/slab.h>
41#if defined(CONFIG_X86)

--- 12 unchanged lines hidden (view full) ---

54#include <rdma/ib_smi.h>
55#include <rdma/ib_umem.h>
56#include <linux/in.h>
57#include <linux/etherdevice.h>
58#include <linux/mlx5/fs.h>
59#include <linux/mlx5/vport.h>
60#include "mlx5_ib.h"
61#include "cmd.h"
33#include <linux/highmem.h>
34#include <linux/module.h>
35#include <linux/init.h>
36#include <linux/errno.h>
37#include <linux/pci.h>
38#include <linux/dma-mapping.h>
39#include <linux/slab.h>
40#if defined(CONFIG_X86)

--- 12 unchanged lines hidden (view full) ---

53#include <rdma/ib_smi.h>
54#include <rdma/ib_umem.h>
55#include <linux/in.h>
56#include <linux/etherdevice.h>
57#include <linux/mlx5/fs.h>
58#include <linux/mlx5/vport.h>
59#include "mlx5_ib.h"
60#include "cmd.h"
62#include <linux/mlx5/vport.h>
63
64#define DRIVER_NAME "mlx5_ib"
65#define DRIVER_VERSION "5.0-0"
66
67MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
68MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
69MODULE_LICENSE("Dual BSD/GPL");
61
62#define DRIVER_NAME "mlx5_ib"
63#define DRIVER_VERSION "5.0-0"
64
65MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
66MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
67MODULE_LICENSE("Dual BSD/GPL");
68MODULE_VERSION(DRIVER_VERSION);
70
71static char mlx5_version[] =
72 DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
73 DRIVER_VERSION "\n";
74
75enum {
76 MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3,
77};

--- 15 unchanged lines hidden (view full) ---

93mlx5_ib_port_link_layer(struct ib_device *device, u8 port_num)
94{
95 struct mlx5_ib_dev *dev = to_mdev(device);
96 int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
97
98 return mlx5_port_type_cap_to_rdma_ll(port_type_cap);
99}
100
69
70static char mlx5_version[] =
71 DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
72 DRIVER_VERSION "\n";
73
74enum {
75 MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3,
76};

--- 15 unchanged lines hidden (view full) ---

92mlx5_ib_port_link_layer(struct ib_device *device, u8 port_num)
93{
94 struct mlx5_ib_dev *dev = to_mdev(device);
95 int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
96
97 return mlx5_port_type_cap_to_rdma_ll(port_type_cap);
98}
99
101static int get_port_state(struct ib_device *ibdev,
102 u8 port_num,
103 enum ib_port_state *state)
104{
105 struct ib_port_attr attr;
106 int ret;
107
108 memset(&attr, 0, sizeof(attr));
109 ret = mlx5_ib_query_port(ibdev, port_num, &attr);
110 if (!ret)
111 *state = attr.state;
112 return ret;
113}
114
115static int mlx5_netdev_event(struct notifier_block *this,
116 unsigned long event, void *ptr)
117{
118 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
119 struct mlx5_ib_dev *ibdev = container_of(this, struct mlx5_ib_dev,
120 roce.nb);
121
122 switch (event) {
123 case NETDEV_REGISTER:
124 case NETDEV_UNREGISTER:
125 write_lock(&ibdev->roce.netdev_lock);
126 if (ndev->dev.parent == &ibdev->mdev->pdev->dev)
127 ibdev->roce.netdev = (event == NETDEV_UNREGISTER) ?
128 NULL : ndev;
129 write_unlock(&ibdev->roce.netdev_lock);
130 break;
131
100static int mlx5_netdev_event(struct notifier_block *this,
101 unsigned long event, void *ptr)
102{
103 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
104 struct mlx5_ib_dev *ibdev = container_of(this, struct mlx5_ib_dev,
105 roce.nb);
106
107 switch (event) {
108 case NETDEV_REGISTER:
109 case NETDEV_UNREGISTER:
110 write_lock(&ibdev->roce.netdev_lock);
111 if (ndev->dev.parent == &ibdev->mdev->pdev->dev)
112 ibdev->roce.netdev = (event == NETDEV_UNREGISTER) ?
113 NULL : ndev;
114 write_unlock(&ibdev->roce.netdev_lock);
115 break;
116
132 case NETDEV_CHANGE:
133 case NETDEV_UP:
134 case NETDEV_DOWN: {
135 struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(ibdev->mdev);
136 struct net_device *upper = NULL;
137
138 if (lag_ndev) {
139 upper = netdev_master_upper_dev_get(lag_ndev);
140 dev_put(lag_ndev);
141 }
142
143 if ((upper == ndev || (!upper && ndev == ibdev->roce.netdev))
144 && ibdev->ib_active) {
145 struct ib_event ibev = { };
117 case NETDEV_UP:
118 case NETDEV_DOWN: {
119 struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(ibdev->mdev);
120 struct net_device *upper = NULL;
121
122 if (lag_ndev) {
123 upper = netdev_master_upper_dev_get(lag_ndev);
124 dev_put(lag_ndev);
125 }
126
127 if ((upper == ndev || (!upper && ndev == ibdev->roce.netdev))
128 && ibdev->ib_active) {
129 struct ib_event ibev = { };
146 enum ib_port_state port_state;
147
130
148 if (get_port_state(&ibdev->ib_dev, 1, &port_state))
149 return NOTIFY_DONE;
150
151 if (ibdev->roce.last_port_state == port_state)
152 return NOTIFY_DONE;
153
154 ibdev->roce.last_port_state = port_state;
155 ibev.device = &ibdev->ib_dev;
131 ibev.device = &ibdev->ib_dev;
156 if (port_state == IB_PORT_DOWN)
157 ibev.event = IB_EVENT_PORT_ERR;
158 else if (port_state == IB_PORT_ACTIVE)
159 ibev.event = IB_EVENT_PORT_ACTIVE;
160 else
161 return NOTIFY_DONE;
162
132 ibev.event = (event == NETDEV_UP) ?
133 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
163 ibev.element.port_num = 1;
164 ib_dispatch_event(&ibev);
165 }
166 break;
167 }
168
169 default:
170 break;

--- 521 unchanged lines hidden (view full) ---

692 resp.response_length += sizeof(resp.rss_caps);
693 }
694
695 if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
696 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
697 props->device_cap_flags |= IB_DEVICE_UD_TSO;
698 }
699
134 ibev.element.port_num = 1;
135 ib_dispatch_event(&ibev);
136 }
137 break;
138 }
139
140 default:
141 break;

--- 521 unchanged lines hidden (view full) ---

663 resp.response_length += sizeof(resp.rss_caps);
664 }
665
666 if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
667 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
668 props->device_cap_flags |= IB_DEVICE_UD_TSO;
669 }
670
700 if (MLX5_CAP_GEN(dev->mdev, rq_delay_drop) &&
701 MLX5_CAP_GEN(dev->mdev, general_notification_event))
702 props->raw_packet_caps |= IB_RAW_PACKET_CAP_DELAY_DROP;
703
704 if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
705 MLX5_CAP_IPOIB_ENHANCED(mdev, csum_cap))
706 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
707
708 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
709 MLX5_CAP_ETH(dev->mdev, scatter_fcs)) {
710 /* Legacy bit to support old userspace libraries */
711 props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
712 props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
713 }
714
715 if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))

--- 401 unchanged lines hidden (view full) ---

1117 struct ib_port_attr attr;
1118 u32 tmp;
1119 int err;
1120 u32 change_mask;
1121 u32 value;
1122 bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) ==
1123 IB_LINK_LAYER_INFINIBAND);
1124
671 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
672 MLX5_CAP_ETH(dev->mdev, scatter_fcs)) {
673 /* Legacy bit to support old userspace libraries */
674 props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
675 props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
676 }
677
678 if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))

--- 401 unchanged lines hidden (view full) ---

1080 struct ib_port_attr attr;
1081 u32 tmp;
1082 int err;
1083 u32 change_mask;
1084 u32 value;
1085 bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) ==
1086 IB_LINK_LAYER_INFINIBAND);
1087
1088 /* CM layer calls ib_modify_port() regardless of the link layer. For
1089 * Ethernet ports, qkey violation and Port capabilities are meaningless.
1090 */
1091 if (!is_ib)
1092 return 0;
1093
1125 if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) {
1126 change_mask = props->clr_port_cap_mask | props->set_port_cap_mask;
1127 value = ~props->clr_port_cap_mask | props->set_port_cap_mask;
1128 return set_port_caps_atomic(dev, port, change_mask, value);
1129 }
1130
1131 mutex_lock(&dev->cap_mask_mutex);
1132

--- 37 unchanged lines hidden (view full) ---

1170 uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k);
1171 bfregs_per_sys_page = uars_per_sys_page * MLX5_NON_FP_BFREGS_PER_UAR;
1172 req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page);
1173 *num_sys_pages = req->total_num_bfregs / bfregs_per_sys_page;
1174
1175 if (req->num_low_latency_bfregs > req->total_num_bfregs - 1)
1176 return -EINVAL;
1177
1094 if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) {
1095 change_mask = props->clr_port_cap_mask | props->set_port_cap_mask;
1096 value = ~props->clr_port_cap_mask | props->set_port_cap_mask;
1097 return set_port_caps_atomic(dev, port, change_mask, value);
1098 }
1099
1100 mutex_lock(&dev->cap_mask_mutex);
1101

--- 37 unchanged lines hidden (view full) ---

1139 uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k);
1140 bfregs_per_sys_page = uars_per_sys_page * MLX5_NON_FP_BFREGS_PER_UAR;
1141 req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page);
1142 *num_sys_pages = req->total_num_bfregs / bfregs_per_sys_page;
1143
1144 if (req->num_low_latency_bfregs > req->total_num_bfregs - 1)
1145 return -EINVAL;
1146
1178 mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, allocated %d, using %d sys pages\n",
1147 mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, alloated %d, using %d sys pages\n",
1179 MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no",
1180 lib_uar_4k ? "yes" : "no", ref_bfregs,
1181 req->total_num_bfregs, *num_sys_pages);
1182
1183 return 0;
1184}
1185
1186static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)

--- 32 unchanged lines hidden (view full) ---

1219 if (err) {
1220 mlx5_ib_warn(dev, "failed to free uar %d\n", i);
1221 return err;
1222 }
1223 }
1224 return 0;
1225}
1226
1148 MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no",
1149 lib_uar_4k ? "yes" : "no", ref_bfregs,
1150 req->total_num_bfregs, *num_sys_pages);
1151
1152 return 0;
1153}
1154
1155static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)

--- 32 unchanged lines hidden (view full) ---

1188 if (err) {
1189 mlx5_ib_warn(dev, "failed to free uar %d\n", i);
1190 return err;
1191 }
1192 }
1193 return 0;
1194}
1195
1227static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn)
1228{
1229 int err;
1230
1231 err = mlx5_core_alloc_transport_domain(dev->mdev, tdn);
1232 if (err)
1233 return err;
1234
1235 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1236 !MLX5_CAP_GEN(dev->mdev, disable_local_lb))
1237 return err;
1238
1239 mutex_lock(&dev->lb_mutex);
1240 dev->user_td++;
1241
1242 if (dev->user_td == 2)
1243 err = mlx5_nic_vport_update_local_lb(dev->mdev, true);
1244
1245 mutex_unlock(&dev->lb_mutex);
1246 return err;
1247}
1248
1249static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn)
1250{
1251 mlx5_core_dealloc_transport_domain(dev->mdev, tdn);
1252
1253 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1254 !MLX5_CAP_GEN(dev->mdev, disable_local_lb))
1255 return;
1256
1257 mutex_lock(&dev->lb_mutex);
1258 dev->user_td--;
1259
1260 if (dev->user_td < 2)
1261 mlx5_nic_vport_update_local_lb(dev->mdev, false);
1262
1263 mutex_unlock(&dev->lb_mutex);
1264}
1265
1266static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
1267 struct ib_udata *udata)
1268{
1269 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1270 struct mlx5_ib_alloc_ucontext_req_v2 req = {};
1271 struct mlx5_ib_alloc_ucontext_resp resp = {};
1272 struct mlx5_ib_ucontext *context;
1273 struct mlx5_bfreg_info *bfregi;
1274 int ver;
1275 int err;
1196static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
1197 struct ib_udata *udata)
1198{
1199 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1200 struct mlx5_ib_alloc_ucontext_req_v2 req = {};
1201 struct mlx5_ib_alloc_ucontext_resp resp = {};
1202 struct mlx5_ib_ucontext *context;
1203 struct mlx5_bfreg_info *bfregi;
1204 int ver;
1205 int err;
1206 size_t reqlen;
1276 size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
1277 max_cqe_version);
1278 bool lib_uar_4k;
1279
1280 if (!dev->ib_active)
1281 return ERR_PTR(-EAGAIN);
1282
1207 size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
1208 max_cqe_version);
1209 bool lib_uar_4k;
1210
1211 if (!dev->ib_active)
1212 return ERR_PTR(-EAGAIN);
1213
1283 if (udata->inlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
1214 if (udata->inlen < sizeof(struct ib_uverbs_cmd_hdr))
1215 return ERR_PTR(-EINVAL);
1216
1217 reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
1218 if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
1284 ver = 0;
1219 ver = 0;
1285 else if (udata->inlen >= min_req_v2)
1220 else if (reqlen >= min_req_v2)
1286 ver = 2;
1287 else
1288 return ERR_PTR(-EINVAL);
1289
1221 ver = 2;
1222 else
1223 return ERR_PTR(-EINVAL);
1224
1290 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
1225 err = ib_copy_from_udata(&req, udata, min(reqlen, sizeof(req)));
1291 if (err)
1292 return ERR_PTR(err);
1293
1294 if (req.flags)
1295 return ERR_PTR(-EINVAL);
1296
1297 if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
1298 return ERR_PTR(-EOPNOTSUPP);

--- 62 unchanged lines hidden (view full) ---

1361 context->upd_xlt_page = __get_free_page(GFP_KERNEL);
1362 if (!context->upd_xlt_page) {
1363 err = -ENOMEM;
1364 goto out_uars;
1365 }
1366 mutex_init(&context->upd_xlt_page_mutex);
1367
1368 if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) {
1226 if (err)
1227 return ERR_PTR(err);
1228
1229 if (req.flags)
1230 return ERR_PTR(-EINVAL);
1231
1232 if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
1233 return ERR_PTR(-EOPNOTSUPP);

--- 62 unchanged lines hidden (view full) ---

1296 context->upd_xlt_page = __get_free_page(GFP_KERNEL);
1297 if (!context->upd_xlt_page) {
1298 err = -ENOMEM;
1299 goto out_uars;
1300 }
1301 mutex_init(&context->upd_xlt_page_mutex);
1302
1303 if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) {
1369 err = mlx5_ib_alloc_transport_domain(dev, &context->tdn);
1304 err = mlx5_core_alloc_transport_domain(dev->mdev,
1305 &context->tdn);
1370 if (err)
1371 goto out_page;
1372 }
1373
1374 INIT_LIST_HEAD(&context->vma_private_list);
1375 INIT_LIST_HEAD(&context->db_page_list);
1376 mutex_init(&context->db_page_mutex);
1377

--- 49 unchanged lines hidden (view full) ---

1427 context->cqe_version = resp.cqe_version;
1428 context->lib_caps = req.lib_caps;
1429 print_lib_caps(dev, context->lib_caps);
1430
1431 return &context->ibucontext;
1432
1433out_td:
1434 if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1306 if (err)
1307 goto out_page;
1308 }
1309
1310 INIT_LIST_HEAD(&context->vma_private_list);
1311 INIT_LIST_HEAD(&context->db_page_list);
1312 mutex_init(&context->db_page_mutex);
1313

--- 49 unchanged lines hidden (view full) ---

1363 context->cqe_version = resp.cqe_version;
1364 context->lib_caps = req.lib_caps;
1365 print_lib_caps(dev, context->lib_caps);
1366
1367 return &context->ibucontext;
1368
1369out_td:
1370 if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1435 mlx5_ib_dealloc_transport_domain(dev, context->tdn);
1371 mlx5_core_dealloc_transport_domain(dev->mdev, context->tdn);
1436
1437out_page:
1438 free_page(context->upd_xlt_page);
1439
1440out_uars:
1441 deallocate_uars(dev, context);
1442
1443out_sys_pages:

--- 11 unchanged lines hidden (view full) ---

1455static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
1456{
1457 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
1458 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
1459 struct mlx5_bfreg_info *bfregi;
1460
1461 bfregi = &context->bfregi;
1462 if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1372
1373out_page:
1374 free_page(context->upd_xlt_page);
1375
1376out_uars:
1377 deallocate_uars(dev, context);
1378
1379out_sys_pages:

--- 11 unchanged lines hidden (view full) ---

1391static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
1392{
1393 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
1394 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
1395 struct mlx5_bfreg_info *bfregi;
1396
1397 bfregi = &context->bfregi;
1398 if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1463 mlx5_ib_dealloc_transport_domain(dev, context->tdn);
1399 mlx5_core_dealloc_transport_domain(dev->mdev, context->tdn);
1464
1465 free_page(context->upd_xlt_page);
1466 deallocate_uars(dev, context);
1467 kfree(bfregi->sys_pages);
1468 kfree(bfregi->count);
1469 kfree(context);
1470
1471 return 0;

--- 623 unchanged lines hidden (view full) ---

2095}
2096
2097/* If a flow could catch both multicast and unicast packets,
2098 * it won't fall into the multicast flow steering table and this rule
2099 * could steal other multicast packets.
2100 */
2101static bool flow_is_multicast_only(struct ib_flow_attr *ib_attr)
2102{
1400
1401 free_page(context->upd_xlt_page);
1402 deallocate_uars(dev, context);
1403 kfree(bfregi->sys_pages);
1404 kfree(bfregi->count);
1405 kfree(context);
1406
1407 return 0;

--- 623 unchanged lines hidden (view full) ---

2031}
2032
2033/* If a flow could catch both multicast and unicast packets,
2034 * it won't fall into the multicast flow steering table and this rule
2035 * could steal other multicast packets.
2036 */
2037static bool flow_is_multicast_only(struct ib_flow_attr *ib_attr)
2038{
2103 union ib_flow_spec *flow_spec;
2039 struct ib_flow_spec_eth *eth_spec;
2104
2105 if (ib_attr->type != IB_FLOW_ATTR_NORMAL ||
2040
2041 if (ib_attr->type != IB_FLOW_ATTR_NORMAL ||
2042 ib_attr->size < sizeof(struct ib_flow_attr) +
2043 sizeof(struct ib_flow_spec_eth) ||
2106 ib_attr->num_of_specs < 1)
2107 return false;
2108
2044 ib_attr->num_of_specs < 1)
2045 return false;
2046
2109 flow_spec = (union ib_flow_spec *)(ib_attr + 1);
2110 if (flow_spec->type == IB_FLOW_SPEC_IPV4) {
2111 struct ib_flow_spec_ipv4 *ipv4_spec;
2112
2113 ipv4_spec = (struct ib_flow_spec_ipv4 *)flow_spec;
2114 if (ipv4_is_multicast(ipv4_spec->val.dst_ip))
2115 return true;
2116
2047 eth_spec = (struct ib_flow_spec_eth *)(ib_attr + 1);
2048 if (eth_spec->type != IB_FLOW_SPEC_ETH ||
2049 eth_spec->size != sizeof(*eth_spec))
2117 return false;
2050 return false;
2118 }
2119
2051
2120 if (flow_spec->type == IB_FLOW_SPEC_ETH) {
2121 struct ib_flow_spec_eth *eth_spec;
2122
2123 eth_spec = (struct ib_flow_spec_eth *)flow_spec;
2124 return is_multicast_ether_addr(eth_spec->mask.dst_mac) &&
2125 is_multicast_ether_addr(eth_spec->val.dst_mac);
2126 }
2127
2128 return false;
2052 return is_multicast_ether_addr(eth_spec->mask.dst_mac) &&
2053 is_multicast_ether_addr(eth_spec->val.dst_mac);
2129}
2130
2131static bool is_valid_ethertype(struct mlx5_core_dev *mdev,
2132 const struct ib_flow_attr *flow_attr,
2133 bool check_inner)
2134{
2135 union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1);
2136 int match_ipv = check_inner ?

--- 461 unchanged lines hidden (view full) ---

2598 kfree(dst);
2599 kfree(handler);
2600 return ERR_PTR(err);
2601}
2602
2603static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
2604{
2605 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2054}
2055
2056static bool is_valid_ethertype(struct mlx5_core_dev *mdev,
2057 const struct ib_flow_attr *flow_attr,
2058 bool check_inner)
2059{
2060 union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1);
2061 int match_ipv = check_inner ?

--- 461 unchanged lines hidden (view full) ---

2523 kfree(dst);
2524 kfree(handler);
2525 return ERR_PTR(err);
2526}
2527
2528static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
2529{
2530 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2606 struct mlx5_ib_qp *mqp = to_mqp(ibqp);
2607 int err;
2608
2531 int err;
2532
2609 if (mqp->flags & MLX5_IB_QP_UNDERLAY) {
2610 mlx5_ib_dbg(dev, "Attaching a multi cast group to underlay QP is not supported\n");
2611 return -EOPNOTSUPP;
2612 }
2613
2614 err = mlx5_core_attach_mcg(dev->mdev, gid, ibqp->qp_num);
2615 if (err)
2616 mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
2617 ibqp->qp_num, gid->raw);
2618
2619 return err;
2620}
2621

--- 145 unchanged lines hidden (view full) ---

2767 * lock/unlock above locks Now need to arm all involved CQs.
2768 */
2769 list_for_each_entry(mcq, &cq_armed_list, reset_notify) {
2770 mcq->comp(mcq);
2771 }
2772 spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
2773}
2774
2533 err = mlx5_core_attach_mcg(dev->mdev, gid, ibqp->qp_num);
2534 if (err)
2535 mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
2536 ibqp->qp_num, gid->raw);
2537
2538 return err;
2539}
2540

--- 145 unchanged lines hidden (view full) ---

2686 * lock/unlock above locks Now need to arm all involved CQs.
2687 */
2688 list_for_each_entry(mcq, &cq_armed_list, reset_notify) {
2689 mcq->comp(mcq);
2690 }
2691 spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
2692}
2693
2775static void delay_drop_handler(struct work_struct *work)
2776{
2777 int err;
2778 struct mlx5_ib_delay_drop *delay_drop =
2779 container_of(work, struct mlx5_ib_delay_drop,
2780 delay_drop_work);
2781
2782 atomic_inc(&delay_drop->events_cnt);
2783
2784 mutex_lock(&delay_drop->lock);
2785 err = mlx5_core_set_delay_drop(delay_drop->dev->mdev,
2786 delay_drop->timeout);
2787 if (err) {
2788 mlx5_ib_warn(delay_drop->dev, "Failed to set delay drop, timeout=%u\n",
2789 delay_drop->timeout);
2790 delay_drop->activate = false;
2791 }
2792 mutex_unlock(&delay_drop->lock);
2793}
2794
2795static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
2796 enum mlx5_dev_event event, unsigned long param)
2797{
2798 struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context;
2799 struct ib_event ibev;
2800 bool fatal = false;
2801 u8 port = 0;
2802

--- 36 unchanged lines hidden (view full) ---

2839 ibev.event = IB_EVENT_GID_CHANGE;
2840 port = (u8)param;
2841 break;
2842
2843 case MLX5_DEV_EVENT_CLIENT_REREG:
2844 ibev.event = IB_EVENT_CLIENT_REREGISTER;
2845 port = (u8)param;
2846 break;
2694static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
2695 enum mlx5_dev_event event, unsigned long param)
2696{
2697 struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context;
2698 struct ib_event ibev;
2699 bool fatal = false;
2700 u8 port = 0;
2701

--- 36 unchanged lines hidden (view full) ---

2738 ibev.event = IB_EVENT_GID_CHANGE;
2739 port = (u8)param;
2740 break;
2741
2742 case MLX5_DEV_EVENT_CLIENT_REREG:
2743 ibev.event = IB_EVENT_CLIENT_REREGISTER;
2744 port = (u8)param;
2745 break;
2847 case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT:
2848 schedule_work(&ibdev->delay_drop.delay_drop_work);
2849 goto out;
2850 default:
2746 default:
2851 goto out;
2747 return;
2852 }
2853
2854 ibev.device = &ibdev->ib_dev;
2855 ibev.element.port_num = port;
2856
2857 if (port < 1 || port > ibdev->num_ports) {
2858 mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
2748 }
2749
2750 ibev.device = &ibdev->ib_dev;
2751 ibev.element.port_num = port;
2752
2753 if (port < 1 || port > ibdev->num_ports) {
2754 mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
2859 goto out;
2755 return;
2860 }
2861
2862 if (ibdev->ib_active)
2863 ib_dispatch_event(&ibev);
2864
2865 if (fatal)
2866 ibdev->ib_active = false;
2756 }
2757
2758 if (ibdev->ib_active)
2759 ib_dispatch_event(&ibev);
2760
2761 if (fatal)
2762 ibdev->ib_active = false;
2867
2868out:
2869 return;
2870}
2871
2872static int set_has_smi_cap(struct mlx5_ib_dev *dev)
2873{
2874 struct mlx5_hca_vport_context vport_ctx;
2875 int err;
2876 int port;
2877

--- 397 unchanged lines hidden (view full) ---

3275 immutable->gid_tbl_len = attr.gid_tbl_len;
3276 immutable->core_cap_flags = get_core_cap_flags(ibdev);
3277 if ((ll == IB_LINK_LAYER_INFINIBAND) || MLX5_CAP_GEN(dev->mdev, roce))
3278 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
3279
3280 return 0;
3281}
3282
2763}
2764
2765static int set_has_smi_cap(struct mlx5_ib_dev *dev)
2766{
2767 struct mlx5_hca_vport_context vport_ctx;
2768 int err;
2769 int port;
2770

--- 397 unchanged lines hidden (view full) ---

3168 immutable->gid_tbl_len = attr.gid_tbl_len;
3169 immutable->core_cap_flags = get_core_cap_flags(ibdev);
3170 if ((ll == IB_LINK_LAYER_INFINIBAND) || MLX5_CAP_GEN(dev->mdev, roce))
3171 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
3172
3173 return 0;
3174}
3175
3283static void get_dev_fw_str(struct ib_device *ibdev, char *str)
3176static void get_dev_fw_str(struct ib_device *ibdev, char *str,
3177 size_t str_len)
3284{
3285 struct mlx5_ib_dev *dev =
3286 container_of(ibdev, struct mlx5_ib_dev, ib_dev);
3178{
3179 struct mlx5_ib_dev *dev =
3180 container_of(ibdev, struct mlx5_ib_dev, ib_dev);
3287 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%04d",
3288 fw_rev_maj(dev->mdev), fw_rev_min(dev->mdev),
3289 fw_rev_sub(dev->mdev));
3181 snprintf(str, str_len, "%d.%d.%04d", fw_rev_maj(dev->mdev),
3182 fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
3290}
3291
3292static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
3293{
3294 struct mlx5_core_dev *mdev = dev->mdev;
3295 struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev,
3296 MLX5_FLOW_NAMESPACE_LAG);
3297 struct mlx5_flow_table *ft;

--- 123 unchanged lines hidden (view full) ---

3421
3422static const struct mlx5_ib_counter cong_cnts[] = {
3423 INIT_CONG_COUNTER(rp_cnp_ignored),
3424 INIT_CONG_COUNTER(rp_cnp_handled),
3425 INIT_CONG_COUNTER(np_ecn_marked_roce_packets),
3426 INIT_CONG_COUNTER(np_cnp_sent),
3427};
3428
3183}
3184
3185static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
3186{
3187 struct mlx5_core_dev *mdev = dev->mdev;
3188 struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev,
3189 MLX5_FLOW_NAMESPACE_LAG);
3190 struct mlx5_flow_table *ft;

--- 123 unchanged lines hidden (view full) ---

3314
3315static const struct mlx5_ib_counter cong_cnts[] = {
3316 INIT_CONG_COUNTER(rp_cnp_ignored),
3317 INIT_CONG_COUNTER(rp_cnp_handled),
3318 INIT_CONG_COUNTER(np_ecn_marked_roce_packets),
3319 INIT_CONG_COUNTER(np_cnp_sent),
3320};
3321
3429static const struct mlx5_ib_counter extended_err_cnts[] = {
3430 INIT_Q_COUNTER(resp_local_length_error),
3431 INIT_Q_COUNTER(resp_cqe_error),
3432 INIT_Q_COUNTER(req_cqe_error),
3433 INIT_Q_COUNTER(req_remote_invalid_request),
3434 INIT_Q_COUNTER(req_remote_access_errors),
3435 INIT_Q_COUNTER(resp_remote_access_errors),
3436 INIT_Q_COUNTER(resp_cqe_flush_error),
3437 INIT_Q_COUNTER(req_cqe_flush_error),
3438};
3439
3440static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
3441{
3442 unsigned int i;
3443
3444 for (i = 0; i < dev->num_ports; i++) {
3445 mlx5_core_dealloc_q_counter(dev->mdev,
3446 dev->port[i].cnts.set_id);
3447 kfree(dev->port[i].cnts.names);

--- 8 unchanged lines hidden (view full) ---

3456
3457 num_counters = ARRAY_SIZE(basic_q_cnts);
3458
3459 if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt))
3460 num_counters += ARRAY_SIZE(out_of_seq_q_cnts);
3461
3462 if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters))
3463 num_counters += ARRAY_SIZE(retrans_q_cnts);
3322static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
3323{
3324 unsigned int i;
3325
3326 for (i = 0; i < dev->num_ports; i++) {
3327 mlx5_core_dealloc_q_counter(dev->mdev,
3328 dev->port[i].cnts.set_id);
3329 kfree(dev->port[i].cnts.names);

--- 8 unchanged lines hidden (view full) ---

3338
3339 num_counters = ARRAY_SIZE(basic_q_cnts);
3340
3341 if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt))
3342 num_counters += ARRAY_SIZE(out_of_seq_q_cnts);
3343
3344 if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters))
3345 num_counters += ARRAY_SIZE(retrans_q_cnts);
3464
3465 if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters))
3466 num_counters += ARRAY_SIZE(extended_err_cnts);
3467
3468 cnts->num_q_counters = num_counters;
3469
3470 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
3471 cnts->num_cong_counters = ARRAY_SIZE(cong_cnts);
3472 num_counters += ARRAY_SIZE(cong_cnts);
3473 }
3474
3475 cnts->names = kcalloc(num_counters, sizeof(cnts->names), GFP_KERNEL);

--- 33 unchanged lines hidden (view full) ---

3509
3510 if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) {
3511 for (i = 0; i < ARRAY_SIZE(retrans_q_cnts); i++, j++) {
3512 names[j] = retrans_q_cnts[i].name;
3513 offsets[j] = retrans_q_cnts[i].offset;
3514 }
3515 }
3516
3346 cnts->num_q_counters = num_counters;
3347
3348 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
3349 cnts->num_cong_counters = ARRAY_SIZE(cong_cnts);
3350 num_counters += ARRAY_SIZE(cong_cnts);
3351 }
3352
3353 cnts->names = kcalloc(num_counters, sizeof(cnts->names), GFP_KERNEL);

--- 33 unchanged lines hidden (view full) ---

3387
3388 if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) {
3389 for (i = 0; i < ARRAY_SIZE(retrans_q_cnts); i++, j++) {
3390 names[j] = retrans_q_cnts[i].name;
3391 offsets[j] = retrans_q_cnts[i].offset;
3392 }
3393 }
3394
3517 if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) {
3518 for (i = 0; i < ARRAY_SIZE(extended_err_cnts); i++, j++) {
3519 names[j] = extended_err_cnts[i].name;
3520 offsets[j] = extended_err_cnts[i].offset;
3521 }
3522 }
3523
3524 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
3525 for (i = 0; i < ARRAY_SIZE(cong_cnts); i++, j++) {
3526 names[j] = cong_cnts[i].name;
3527 offsets[j] = cong_cnts[i].offset;
3528 }
3529 }
3530}
3531

--- 154 unchanged lines hidden (view full) ---

3686 name, setup);
3687 if (likely(!IS_ERR_OR_NULL(netdev))) {
3688 rn = netdev_priv(netdev);
3689 rn->free_rdma_netdev = mlx5_ib_free_rdma_netdev;
3690 }
3691 return netdev;
3692}
3693
3395 if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
3396 for (i = 0; i < ARRAY_SIZE(cong_cnts); i++, j++) {
3397 names[j] = cong_cnts[i].name;
3398 offsets[j] = cong_cnts[i].offset;
3399 }
3400 }
3401}
3402

--- 154 unchanged lines hidden (view full) ---

3557 name, setup);
3558 if (likely(!IS_ERR_OR_NULL(netdev))) {
3559 rn = netdev_priv(netdev);
3560 rn->free_rdma_netdev = mlx5_ib_free_rdma_netdev;
3561 }
3562 return netdev;
3563}
3564
3694static void delay_drop_debugfs_cleanup(struct mlx5_ib_dev *dev)
3695{
3696 if (!dev->delay_drop.dbg)
3697 return;
3698 debugfs_remove_recursive(dev->delay_drop.dbg->dir_debugfs);
3699 kfree(dev->delay_drop.dbg);
3700 dev->delay_drop.dbg = NULL;
3701}
3702
3703static void cancel_delay_drop(struct mlx5_ib_dev *dev)
3704{
3705 if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
3706 return;
3707
3708 cancel_work_sync(&dev->delay_drop.delay_drop_work);
3709 delay_drop_debugfs_cleanup(dev);
3710}
3711
3712static ssize_t delay_drop_timeout_read(struct file *filp, char __user *buf,
3713 size_t count, loff_t *pos)
3714{
3715 struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
3716 char lbuf[20];
3717 int len;
3718
3719 len = snprintf(lbuf, sizeof(lbuf), "%u\n", delay_drop->timeout);
3720 return simple_read_from_buffer(buf, count, pos, lbuf, len);
3721}
3722
3723static ssize_t delay_drop_timeout_write(struct file *filp, const char __user *buf,
3724 size_t count, loff_t *pos)
3725{
3726 struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
3727 u32 timeout;
3728 u32 var;
3729
3730 if (kstrtouint_from_user(buf, count, 0, &var))
3731 return -EFAULT;
3732
3733 timeout = min_t(u32, roundup(var, 100), MLX5_MAX_DELAY_DROP_TIMEOUT_MS *
3734 1000);
3735 if (timeout != var)
3736 mlx5_ib_dbg(delay_drop->dev, "Round delay drop timeout to %u usec\n",
3737 timeout);
3738
3739 delay_drop->timeout = timeout;
3740
3741 return count;
3742}
3743
3744static const struct file_operations fops_delay_drop_timeout = {
3745 .owner = THIS_MODULE,
3746 .open = simple_open,
3747 .write = delay_drop_timeout_write,
3748 .read = delay_drop_timeout_read,
3749};
3750
3751static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev)
3752{
3753 struct mlx5_ib_dbg_delay_drop *dbg;
3754
3755 if (!mlx5_debugfs_root)
3756 return 0;
3757
3758 dbg = kzalloc(sizeof(*dbg), GFP_KERNEL);
3759 if (!dbg)
3760 return -ENOMEM;
3761
3762 dbg->dir_debugfs =
3763 debugfs_create_dir("delay_drop",
3764 dev->mdev->priv.dbg_root);
3765 if (!dbg->dir_debugfs)
3766 return -ENOMEM;
3767
3768 dbg->events_cnt_debugfs =
3769 debugfs_create_atomic_t("num_timeout_events", 0400,
3770 dbg->dir_debugfs,
3771 &dev->delay_drop.events_cnt);
3772 if (!dbg->events_cnt_debugfs)
3773 goto out_debugfs;
3774
3775 dbg->rqs_cnt_debugfs =
3776 debugfs_create_atomic_t("num_rqs", 0400,
3777 dbg->dir_debugfs,
3778 &dev->delay_drop.rqs_cnt);
3779 if (!dbg->rqs_cnt_debugfs)
3780 goto out_debugfs;
3781
3782 dbg->timeout_debugfs =
3783 debugfs_create_file("timeout", 0600,
3784 dbg->dir_debugfs,
3785 &dev->delay_drop,
3786 &fops_delay_drop_timeout);
3787 if (!dbg->timeout_debugfs)
3788 goto out_debugfs;
3789
3790 return 0;
3791
3792out_debugfs:
3793 delay_drop_debugfs_cleanup(dev);
3794 return -ENOMEM;
3795}
3796
3797static void init_delay_drop(struct mlx5_ib_dev *dev)
3798{
3799 if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
3800 return;
3801
3802 mutex_init(&dev->delay_drop.lock);
3803 dev->delay_drop.dev = dev;
3804 dev->delay_drop.activate = false;
3805 dev->delay_drop.timeout = MLX5_MAX_DELAY_DROP_TIMEOUT_MS * 1000;
3806 INIT_WORK(&dev->delay_drop.delay_drop_work, delay_drop_handler);
3807 atomic_set(&dev->delay_drop.rqs_cnt, 0);
3808 atomic_set(&dev->delay_drop.events_cnt, 0);
3809
3810 if (delay_drop_debugfs_init(dev))
3811 mlx5_ib_warn(dev, "Failed to init delay drop debugfs\n");
3812}
3813
3814const struct cpumask *mlx5_ib_get_vector_affinity(struct ib_device *ibdev,
3815 int comp_vector)
3816{
3817 struct mlx5_ib_dev *dev = to_mdev(ibdev);
3818
3819 return mlx5_get_vector_affinity(dev->mdev, comp_vector);
3820}
3821
3822static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
3823{
3824 struct mlx5_ib_dev *dev;
3825 enum rdma_link_layer ll;
3826 int port_type_cap;
3827 const char *name;
3828 int err;
3829 int i;

--- 114 unchanged lines hidden (view full) ---

3944 dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach;
3945 dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach;
3946 dev->ib_dev.process_mad = mlx5_ib_process_mad;
3947 dev->ib_dev.alloc_mr = mlx5_ib_alloc_mr;
3948 dev->ib_dev.map_mr_sg = mlx5_ib_map_mr_sg;
3949 dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
3950 dev->ib_dev.get_port_immutable = mlx5_port_immutable;
3951 dev->ib_dev.get_dev_fw_str = get_dev_fw_str;
3565static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
3566{
3567 struct mlx5_ib_dev *dev;
3568 enum rdma_link_layer ll;
3569 int port_type_cap;
3570 const char *name;
3571 int err;
3572 int i;

--- 114 unchanged lines hidden (view full) ---

3687 dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach;
3688 dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach;
3689 dev->ib_dev.process_mad = mlx5_ib_process_mad;
3690 dev->ib_dev.alloc_mr = mlx5_ib_alloc_mr;
3691 dev->ib_dev.map_mr_sg = mlx5_ib_map_mr_sg;
3692 dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
3693 dev->ib_dev.get_port_immutable = mlx5_port_immutable;
3694 dev->ib_dev.get_dev_fw_str = get_dev_fw_str;
3952 dev->ib_dev.get_vector_affinity = mlx5_ib_get_vector_affinity;
3953 if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads))
3954 dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev;
3955
3956 if (mlx5_core_is_pf(mdev)) {
3957 dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config;
3958 dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state;
3959 dev->ib_dev.get_vf_stats = mlx5_ib_get_vf_stats;
3960 dev->ib_dev.set_vf_guid = mlx5_ib_set_vf_guid;

--- 21 unchanged lines hidden (view full) ---

3982 if (MLX5_CAP_GEN(mdev, xrc)) {
3983 dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
3984 dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
3985 dev->ib_dev.uverbs_cmd_mask |=
3986 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
3987 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
3988 }
3989
3695 if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads))
3696 dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev;
3697
3698 if (mlx5_core_is_pf(mdev)) {
3699 dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config;
3700 dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state;
3701 dev->ib_dev.get_vf_stats = mlx5_ib_get_vf_stats;
3702 dev->ib_dev.set_vf_guid = mlx5_ib_set_vf_guid;

--- 21 unchanged lines hidden (view full) ---

3724 if (MLX5_CAP_GEN(mdev, xrc)) {
3725 dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
3726 dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
3727 dev->ib_dev.uverbs_cmd_mask |=
3728 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
3729 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
3730 }
3731
3990 dev->ib_dev.create_flow = mlx5_ib_create_flow;
3991 dev->ib_dev.destroy_flow = mlx5_ib_destroy_flow;
3992 dev->ib_dev.uverbs_ex_cmd_mask |=
3993 (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
3994 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
3995
3996 if (mlx5_ib_port_link_layer(&dev->ib_dev, 1) ==
3997 IB_LINK_LAYER_ETHERNET) {
3732 if (mlx5_ib_port_link_layer(&dev->ib_dev, 1) ==
3733 IB_LINK_LAYER_ETHERNET) {
3734 dev->ib_dev.create_flow = mlx5_ib_create_flow;
3735 dev->ib_dev.destroy_flow = mlx5_ib_destroy_flow;
3998 dev->ib_dev.create_wq = mlx5_ib_create_wq;
3999 dev->ib_dev.modify_wq = mlx5_ib_modify_wq;
4000 dev->ib_dev.destroy_wq = mlx5_ib_destroy_wq;
4001 dev->ib_dev.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table;
4002 dev->ib_dev.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table;
4003 dev->ib_dev.uverbs_ex_cmd_mask |=
3736 dev->ib_dev.create_wq = mlx5_ib_create_wq;
3737 dev->ib_dev.modify_wq = mlx5_ib_modify_wq;
3738 dev->ib_dev.destroy_wq = mlx5_ib_destroy_wq;
3739 dev->ib_dev.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table;
3740 dev->ib_dev.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table;
3741 dev->ib_dev.uverbs_ex_cmd_mask |=
3742 (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
3743 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW) |
4004 (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
4005 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
4006 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
4007 (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
4008 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
4009 }
4010 err = init_node_data(dev);
4011 if (err)
4012 goto err_free_port;
4013
4014 mutex_init(&dev->flow_db.lock);
4015 mutex_init(&dev->cap_mask_mutex);
4016 INIT_LIST_HEAD(&dev->qp_list);
4017 spin_lock_init(&dev->reset_flow_resource_lock);
4018
4019 if (ll == IB_LINK_LAYER_ETHERNET) {
4020 err = mlx5_enable_eth(dev);
4021 if (err)
4022 goto err_free_port;
3744 (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
3745 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
3746 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
3747 (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
3748 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
3749 }
3750 err = init_node_data(dev);
3751 if (err)
3752 goto err_free_port;
3753
3754 mutex_init(&dev->flow_db.lock);
3755 mutex_init(&dev->cap_mask_mutex);
3756 INIT_LIST_HEAD(&dev->qp_list);
3757 spin_lock_init(&dev->reset_flow_resource_lock);
3758
3759 if (ll == IB_LINK_LAYER_ETHERNET) {
3760 err = mlx5_enable_eth(dev);
3761 if (err)
3762 goto err_free_port;
4023 dev->roce.last_port_state = IB_PORT_DOWN;
4024 }
4025
4026 err = create_dev_resources(&dev->devr);
4027 if (err)
4028 goto err_disable_eth;
4029
4030 err = mlx5_ib_odp_init_one(dev);
4031 if (err)
4032 goto err_rsrc;
4033
4034 if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
4035 err = mlx5_ib_alloc_counters(dev);
4036 if (err)
4037 goto err_odp;
4038 }
4039
3763 }
3764
3765 err = create_dev_resources(&dev->devr);
3766 if (err)
3767 goto err_disable_eth;
3768
3769 err = mlx5_ib_odp_init_one(dev);
3770 if (err)
3771 goto err_rsrc;
3772
3773 if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
3774 err = mlx5_ib_alloc_counters(dev);
3775 if (err)
3776 goto err_odp;
3777 }
3778
4040 err = mlx5_ib_init_cong_debugfs(dev);
4041 if (err)
4042 goto err_cnt;
4043
4044 dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
4045 if (!dev->mdev->priv.uar)
3779 dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
3780 if (!dev->mdev->priv.uar)
4046 goto err_cong;
3781 goto err_cnt;
4047
4048 err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
4049 if (err)
4050 goto err_uar_page;
4051
4052 err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
4053 if (err)
4054 goto err_bfreg;
4055
4056 err = ib_register_device(&dev->ib_dev, NULL);
4057 if (err)
4058 goto err_fp_bfreg;
4059
4060 err = create_umr_res(dev);
4061 if (err)
4062 goto err_dev;
4063
3782
3783 err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
3784 if (err)
3785 goto err_uar_page;
3786
3787 err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
3788 if (err)
3789 goto err_bfreg;
3790
3791 err = ib_register_device(&dev->ib_dev, NULL);
3792 if (err)
3793 goto err_fp_bfreg;
3794
3795 err = create_umr_res(dev);
3796 if (err)
3797 goto err_dev;
3798
4064 init_delay_drop(dev);
4065
4066 for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) {
4067 err = device_create_file(&dev->ib_dev.dev,
4068 mlx5_class_attributes[i]);
4069 if (err)
3799 for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) {
3800 err = device_create_file(&dev->ib_dev.dev,
3801 mlx5_class_attributes[i]);
3802 if (err)
4070 goto err_delay_drop;
3803 goto err_umrc;
4071 }
4072
3804 }
3805
4073 if ((MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
4074 MLX5_CAP_GEN(mdev, disable_local_lb))
4075 mutex_init(&dev->lb_mutex);
4076
4077 dev->ib_active = true;
4078
4079 return dev;
4080
3806 dev->ib_active = true;
3807
3808 return dev;
3809
4081err_delay_drop:
4082 cancel_delay_drop(dev);
3810err_umrc:
4083 destroy_umrc_res(dev);
4084
4085err_dev:
4086 ib_unregister_device(&dev->ib_dev);
4087
4088err_fp_bfreg:
4089 mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
4090
4091err_bfreg:
4092 mlx5_free_bfreg(dev->mdev, &dev->bfreg);
4093
4094err_uar_page:
4095 mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
4096
4097err_cnt:
3811 destroy_umrc_res(dev);
3812
3813err_dev:
3814 ib_unregister_device(&dev->ib_dev);
3815
3816err_fp_bfreg:
3817 mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
3818
3819err_bfreg:
3820 mlx5_free_bfreg(dev->mdev, &dev->bfreg);
3821
3822err_uar_page:
3823 mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
3824
3825err_cnt:
4098 mlx5_ib_cleanup_cong_debugfs(dev);
4099err_cong:
4100 if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
4101 mlx5_ib_dealloc_counters(dev);
4102
4103err_odp:
4104 mlx5_ib_odp_remove_one(dev);
4105
4106err_rsrc:
4107 destroy_dev_resources(&dev->devr);

--- 13 unchanged lines hidden (view full) ---

4121 return NULL;
4122}
4123
4124static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
4125{
4126 struct mlx5_ib_dev *dev = context;
4127 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1);
4128
3826 if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
3827 mlx5_ib_dealloc_counters(dev);
3828
3829err_odp:
3830 mlx5_ib_odp_remove_one(dev);
3831
3832err_rsrc:
3833 destroy_dev_resources(&dev->devr);

--- 13 unchanged lines hidden (view full) ---

3847 return NULL;
3848}
3849
3850static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
3851{
3852 struct mlx5_ib_dev *dev = context;
3853 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1);
3854
4129 cancel_delay_drop(dev);
4130 mlx5_remove_netdev_notifier(dev);
4131 ib_unregister_device(&dev->ib_dev);
4132 mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
4133 mlx5_free_bfreg(dev->mdev, &dev->bfreg);
4134 mlx5_put_uars_page(dev->mdev, mdev->priv.uar);
3855 mlx5_remove_netdev_notifier(dev);
3856 ib_unregister_device(&dev->ib_dev);
3857 mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
3858 mlx5_free_bfreg(dev->mdev, &dev->bfreg);
3859 mlx5_put_uars_page(dev->mdev, mdev->priv.uar);
4135 mlx5_ib_cleanup_cong_debugfs(dev);
4136 if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
4137 mlx5_ib_dealloc_counters(dev);
4138 destroy_umrc_res(dev);
4139 mlx5_ib_odp_remove_one(dev);
4140 destroy_dev_resources(&dev->devr);
4141 if (ll == IB_LINK_LAYER_ETHERNET)
4142 mlx5_disable_eth(dev);
4143 kfree(dev->port);

--- 31 unchanged lines hidden ---
3860 if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
3861 mlx5_ib_dealloc_counters(dev);
3862 destroy_umrc_res(dev);
3863 mlx5_ib_odp_remove_one(dev);
3864 destroy_dev_resources(&dev->devr);
3865 if (ll == IB_LINK_LAYER_ETHERNET)
3866 mlx5_disable_eth(dev);
3867 kfree(dev->port);

--- 31 unchanged lines hidden ---