xref: /openbmc/linux/drivers/infiniband/hw/mlx5/main.c (revision c47ac6aee6df4dc17460f56dd66af0a271e8392c)
1e126ba97SEli Cohen /*
26cf0a15fSSaeed Mahameed  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3e126ba97SEli Cohen  *
4e126ba97SEli Cohen  * This software is available to you under a choice of one of two
5e126ba97SEli Cohen  * licenses.  You may choose to be licensed under the terms of the GNU
6e126ba97SEli Cohen  * General Public License (GPL) Version 2, available from the file
7e126ba97SEli Cohen  * COPYING in the main directory of this source tree, or the
8e126ba97SEli Cohen  * OpenIB.org BSD license below:
9e126ba97SEli Cohen  *
10e126ba97SEli Cohen  *     Redistribution and use in source and binary forms, with or
11e126ba97SEli Cohen  *     without modification, are permitted provided that the following
12e126ba97SEli Cohen  *     conditions are met:
13e126ba97SEli Cohen  *
14e126ba97SEli Cohen  *      - Redistributions of source code must retain the above
15e126ba97SEli Cohen  *        copyright notice, this list of conditions and the following
16e126ba97SEli Cohen  *        disclaimer.
17e126ba97SEli Cohen  *
18e126ba97SEli Cohen  *      - Redistributions in binary form must reproduce the above
19e126ba97SEli Cohen  *        copyright notice, this list of conditions and the following
20e126ba97SEli Cohen  *        disclaimer in the documentation and/or other materials
21e126ba97SEli Cohen  *        provided with the distribution.
22e126ba97SEli Cohen  *
23e126ba97SEli Cohen  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24e126ba97SEli Cohen  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25e126ba97SEli Cohen  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26e126ba97SEli Cohen  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27e126ba97SEli Cohen  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28e126ba97SEli Cohen  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29e126ba97SEli Cohen  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30e126ba97SEli Cohen  * SOFTWARE.
31e126ba97SEli Cohen  */
32e126ba97SEli Cohen 
33adec640eSChristoph Hellwig #include <linux/highmem.h>
34e126ba97SEli Cohen #include <linux/module.h>
35e126ba97SEli Cohen #include <linux/init.h>
36e126ba97SEli Cohen #include <linux/errno.h>
37e126ba97SEli Cohen #include <linux/pci.h>
38e126ba97SEli Cohen #include <linux/dma-mapping.h>
39e126ba97SEli Cohen #include <linux/slab.h>
40e126ba97SEli Cohen #include <linux/io-mapping.h>
4137aa5c36SGuy Levi #if defined(CONFIG_X86)
4237aa5c36SGuy Levi #include <asm/pat.h>
4337aa5c36SGuy Levi #endif
44e126ba97SEli Cohen #include <linux/sched.h>
457c2344c3SMaor Gottlieb #include <linux/delay.h>
46e126ba97SEli Cohen #include <rdma/ib_user_verbs.h>
473f89a643SAchiad Shochat #include <rdma/ib_addr.h>
482811ba51SAchiad Shochat #include <rdma/ib_cache.h>
49ada68c31SAchiad Shochat #include <linux/mlx5/port.h>
501b5daf11SMajd Dibbiny #include <linux/mlx5/vport.h>
517c2344c3SMaor Gottlieb #include <linux/list.h>
52e126ba97SEli Cohen #include <rdma/ib_smi.h>
53e126ba97SEli Cohen #include <rdma/ib_umem.h>
54038d2ef8SMaor Gottlieb #include <linux/in.h>
55038d2ef8SMaor Gottlieb #include <linux/etherdevice.h>
56038d2ef8SMaor Gottlieb #include <linux/mlx5/fs.h>
57e126ba97SEli Cohen #include "user.h"
58e126ba97SEli Cohen #include "mlx5_ib.h"
59e126ba97SEli Cohen 
60e126ba97SEli Cohen #define DRIVER_NAME "mlx5_ib"
61169a1d85SAmir Vadai #define DRIVER_VERSION "2.2-1"
62169a1d85SAmir Vadai #define DRIVER_RELDATE	"Feb 2014"
63e126ba97SEli Cohen 
64e126ba97SEli Cohen MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
65e126ba97SEli Cohen MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
66e126ba97SEli Cohen MODULE_LICENSE("Dual BSD/GPL");
67e126ba97SEli Cohen MODULE_VERSION(DRIVER_VERSION);
68e126ba97SEli Cohen 
699603b61dSJack Morgenstein static int deprecated_prof_sel = 2;
709603b61dSJack Morgenstein module_param_named(prof_sel, deprecated_prof_sel, int, 0444);
719603b61dSJack Morgenstein MODULE_PARM_DESC(prof_sel, "profile selector. Deprecated here. Moved to module mlx5_core");
72e126ba97SEli Cohen 
73e126ba97SEli Cohen static char mlx5_version[] =
74e126ba97SEli Cohen 	DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
75e126ba97SEli Cohen 	DRIVER_VERSION " (" DRIVER_RELDATE ")\n";
76e126ba97SEli Cohen 
77da7525d2SEran Ben Elisha enum {
78da7525d2SEran Ben Elisha 	MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3,
79da7525d2SEran Ben Elisha };
801b5daf11SMajd Dibbiny 
811b5daf11SMajd Dibbiny static enum rdma_link_layer
82ebd61f68SAchiad Shochat mlx5_port_type_cap_to_rdma_ll(int port_type_cap)
831b5daf11SMajd Dibbiny {
84ebd61f68SAchiad Shochat 	switch (port_type_cap) {
851b5daf11SMajd Dibbiny 	case MLX5_CAP_PORT_TYPE_IB:
861b5daf11SMajd Dibbiny 		return IB_LINK_LAYER_INFINIBAND;
871b5daf11SMajd Dibbiny 	case MLX5_CAP_PORT_TYPE_ETH:
881b5daf11SMajd Dibbiny 		return IB_LINK_LAYER_ETHERNET;
891b5daf11SMajd Dibbiny 	default:
901b5daf11SMajd Dibbiny 		return IB_LINK_LAYER_UNSPECIFIED;
911b5daf11SMajd Dibbiny 	}
921b5daf11SMajd Dibbiny }
931b5daf11SMajd Dibbiny 
94ebd61f68SAchiad Shochat static enum rdma_link_layer
95ebd61f68SAchiad Shochat mlx5_ib_port_link_layer(struct ib_device *device, u8 port_num)
96ebd61f68SAchiad Shochat {
97ebd61f68SAchiad Shochat 	struct mlx5_ib_dev *dev = to_mdev(device);
98ebd61f68SAchiad Shochat 	int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
99ebd61f68SAchiad Shochat 
100ebd61f68SAchiad Shochat 	return mlx5_port_type_cap_to_rdma_ll(port_type_cap);
101ebd61f68SAchiad Shochat }
102ebd61f68SAchiad Shochat 
103fc24fc5eSAchiad Shochat static int mlx5_netdev_event(struct notifier_block *this,
104fc24fc5eSAchiad Shochat 			     unsigned long event, void *ptr)
105fc24fc5eSAchiad Shochat {
106fc24fc5eSAchiad Shochat 	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
107fc24fc5eSAchiad Shochat 	struct mlx5_ib_dev *ibdev = container_of(this, struct mlx5_ib_dev,
108fc24fc5eSAchiad Shochat 						 roce.nb);
109fc24fc5eSAchiad Shochat 
110fc24fc5eSAchiad Shochat 	if ((event != NETDEV_UNREGISTER) && (event != NETDEV_REGISTER))
111fc24fc5eSAchiad Shochat 		return NOTIFY_DONE;
112fc24fc5eSAchiad Shochat 
113fc24fc5eSAchiad Shochat 	write_lock(&ibdev->roce.netdev_lock);
114fc24fc5eSAchiad Shochat 	if (ndev->dev.parent == &ibdev->mdev->pdev->dev)
115fc24fc5eSAchiad Shochat 		ibdev->roce.netdev = (event == NETDEV_UNREGISTER) ? NULL : ndev;
116fc24fc5eSAchiad Shochat 	write_unlock(&ibdev->roce.netdev_lock);
117fc24fc5eSAchiad Shochat 
118fc24fc5eSAchiad Shochat 	return NOTIFY_DONE;
119fc24fc5eSAchiad Shochat }
120fc24fc5eSAchiad Shochat 
121fc24fc5eSAchiad Shochat static struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
122fc24fc5eSAchiad Shochat 					     u8 port_num)
123fc24fc5eSAchiad Shochat {
124fc24fc5eSAchiad Shochat 	struct mlx5_ib_dev *ibdev = to_mdev(device);
125fc24fc5eSAchiad Shochat 	struct net_device *ndev;
126fc24fc5eSAchiad Shochat 
127fc24fc5eSAchiad Shochat 	/* Ensure ndev does not disappear before we invoke dev_hold()
128fc24fc5eSAchiad Shochat 	 */
129fc24fc5eSAchiad Shochat 	read_lock(&ibdev->roce.netdev_lock);
130fc24fc5eSAchiad Shochat 	ndev = ibdev->roce.netdev;
131fc24fc5eSAchiad Shochat 	if (ndev)
132fc24fc5eSAchiad Shochat 		dev_hold(ndev);
133fc24fc5eSAchiad Shochat 	read_unlock(&ibdev->roce.netdev_lock);
134fc24fc5eSAchiad Shochat 
135fc24fc5eSAchiad Shochat 	return ndev;
136fc24fc5eSAchiad Shochat }
137fc24fc5eSAchiad Shochat 
1383f89a643SAchiad Shochat static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
1393f89a643SAchiad Shochat 				struct ib_port_attr *props)
1403f89a643SAchiad Shochat {
1413f89a643SAchiad Shochat 	struct mlx5_ib_dev *dev = to_mdev(device);
1423f89a643SAchiad Shochat 	struct net_device *ndev;
1433f89a643SAchiad Shochat 	enum ib_mtu ndev_ib_mtu;
144c876a1b7SLeon Romanovsky 	u16 qkey_viol_cntr;
1453f89a643SAchiad Shochat 
1463f89a643SAchiad Shochat 	memset(props, 0, sizeof(*props));
1473f89a643SAchiad Shochat 
1483f89a643SAchiad Shochat 	props->port_cap_flags  |= IB_PORT_CM_SUP;
1493f89a643SAchiad Shochat 	props->port_cap_flags  |= IB_PORT_IP_BASED_GIDS;
1503f89a643SAchiad Shochat 
1513f89a643SAchiad Shochat 	props->gid_tbl_len      = MLX5_CAP_ROCE(dev->mdev,
1523f89a643SAchiad Shochat 						roce_address_table_size);
1533f89a643SAchiad Shochat 	props->max_mtu          = IB_MTU_4096;
1543f89a643SAchiad Shochat 	props->max_msg_sz       = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg);
1553f89a643SAchiad Shochat 	props->pkey_tbl_len     = 1;
1563f89a643SAchiad Shochat 	props->state            = IB_PORT_DOWN;
1573f89a643SAchiad Shochat 	props->phys_state       = 3;
1583f89a643SAchiad Shochat 
159c876a1b7SLeon Romanovsky 	mlx5_query_nic_vport_qkey_viol_cntr(dev->mdev, &qkey_viol_cntr);
160c876a1b7SLeon Romanovsky 	props->qkey_viol_cntr = qkey_viol_cntr;
1613f89a643SAchiad Shochat 
1623f89a643SAchiad Shochat 	ndev = mlx5_ib_get_netdev(device, port_num);
1633f89a643SAchiad Shochat 	if (!ndev)
1643f89a643SAchiad Shochat 		return 0;
1653f89a643SAchiad Shochat 
1663f89a643SAchiad Shochat 	if (netif_running(ndev) && netif_carrier_ok(ndev)) {
1673f89a643SAchiad Shochat 		props->state      = IB_PORT_ACTIVE;
1683f89a643SAchiad Shochat 		props->phys_state = 5;
1693f89a643SAchiad Shochat 	}
1703f89a643SAchiad Shochat 
1713f89a643SAchiad Shochat 	ndev_ib_mtu = iboe_get_mtu(ndev->mtu);
1723f89a643SAchiad Shochat 
1733f89a643SAchiad Shochat 	dev_put(ndev);
1743f89a643SAchiad Shochat 
1753f89a643SAchiad Shochat 	props->active_mtu	= min(props->max_mtu, ndev_ib_mtu);
1763f89a643SAchiad Shochat 
1773f89a643SAchiad Shochat 	props->active_width	= IB_WIDTH_4X;  /* TODO */
1783f89a643SAchiad Shochat 	props->active_speed	= IB_SPEED_QDR; /* TODO */
1793f89a643SAchiad Shochat 
1803f89a643SAchiad Shochat 	return 0;
1813f89a643SAchiad Shochat }
1823f89a643SAchiad Shochat 
1833cca2606SAchiad Shochat static void ib_gid_to_mlx5_roce_addr(const union ib_gid *gid,
1843cca2606SAchiad Shochat 				     const struct ib_gid_attr *attr,
1853cca2606SAchiad Shochat 				     void *mlx5_addr)
1863cca2606SAchiad Shochat {
1873cca2606SAchiad Shochat #define MLX5_SET_RA(p, f, v) MLX5_SET(roce_addr_layout, p, f, v)
1883cca2606SAchiad Shochat 	char *mlx5_addr_l3_addr	= MLX5_ADDR_OF(roce_addr_layout, mlx5_addr,
1893cca2606SAchiad Shochat 					       source_l3_address);
1903cca2606SAchiad Shochat 	void *mlx5_addr_mac	= MLX5_ADDR_OF(roce_addr_layout, mlx5_addr,
1913cca2606SAchiad Shochat 					       source_mac_47_32);
1923cca2606SAchiad Shochat 
1933cca2606SAchiad Shochat 	if (!gid)
1943cca2606SAchiad Shochat 		return;
1953cca2606SAchiad Shochat 
1963cca2606SAchiad Shochat 	ether_addr_copy(mlx5_addr_mac, attr->ndev->dev_addr);
1973cca2606SAchiad Shochat 
1983cca2606SAchiad Shochat 	if (is_vlan_dev(attr->ndev)) {
1993cca2606SAchiad Shochat 		MLX5_SET_RA(mlx5_addr, vlan_valid, 1);
2003cca2606SAchiad Shochat 		MLX5_SET_RA(mlx5_addr, vlan_id, vlan_dev_vlan_id(attr->ndev));
2013cca2606SAchiad Shochat 	}
2023cca2606SAchiad Shochat 
2033cca2606SAchiad Shochat 	switch (attr->gid_type) {
2043cca2606SAchiad Shochat 	case IB_GID_TYPE_IB:
2053cca2606SAchiad Shochat 		MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_1);
2063cca2606SAchiad Shochat 		break;
2073cca2606SAchiad Shochat 	case IB_GID_TYPE_ROCE_UDP_ENCAP:
2083cca2606SAchiad Shochat 		MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_2);
2093cca2606SAchiad Shochat 		break;
2103cca2606SAchiad Shochat 
2113cca2606SAchiad Shochat 	default:
2123cca2606SAchiad Shochat 		WARN_ON(true);
2133cca2606SAchiad Shochat 	}
2143cca2606SAchiad Shochat 
2153cca2606SAchiad Shochat 	if (attr->gid_type != IB_GID_TYPE_IB) {
2163cca2606SAchiad Shochat 		if (ipv6_addr_v4mapped((void *)gid))
2173cca2606SAchiad Shochat 			MLX5_SET_RA(mlx5_addr, roce_l3_type,
2183cca2606SAchiad Shochat 				    MLX5_ROCE_L3_TYPE_IPV4);
2193cca2606SAchiad Shochat 		else
2203cca2606SAchiad Shochat 			MLX5_SET_RA(mlx5_addr, roce_l3_type,
2213cca2606SAchiad Shochat 				    MLX5_ROCE_L3_TYPE_IPV6);
2223cca2606SAchiad Shochat 	}
2233cca2606SAchiad Shochat 
2243cca2606SAchiad Shochat 	if ((attr->gid_type == IB_GID_TYPE_IB) ||
2253cca2606SAchiad Shochat 	    !ipv6_addr_v4mapped((void *)gid))
2263cca2606SAchiad Shochat 		memcpy(mlx5_addr_l3_addr, gid, sizeof(*gid));
2273cca2606SAchiad Shochat 	else
2283cca2606SAchiad Shochat 		memcpy(&mlx5_addr_l3_addr[12], &gid->raw[12], 4);
2293cca2606SAchiad Shochat }
2303cca2606SAchiad Shochat 
2313cca2606SAchiad Shochat static int set_roce_addr(struct ib_device *device, u8 port_num,
2323cca2606SAchiad Shochat 			 unsigned int index,
2333cca2606SAchiad Shochat 			 const union ib_gid *gid,
2343cca2606SAchiad Shochat 			 const struct ib_gid_attr *attr)
2353cca2606SAchiad Shochat {
2363cca2606SAchiad Shochat 	struct mlx5_ib_dev *dev = to_mdev(device);
237c4f287c4SSaeed Mahameed 	u32  in[MLX5_ST_SZ_DW(set_roce_address_in)]  = {0};
238c4f287c4SSaeed Mahameed 	u32 out[MLX5_ST_SZ_DW(set_roce_address_out)] = {0};
2393cca2606SAchiad Shochat 	void *in_addr = MLX5_ADDR_OF(set_roce_address_in, in, roce_address);
2403cca2606SAchiad Shochat 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(device, port_num);
2413cca2606SAchiad Shochat 
2423cca2606SAchiad Shochat 	if (ll != IB_LINK_LAYER_ETHERNET)
2433cca2606SAchiad Shochat 		return -EINVAL;
2443cca2606SAchiad Shochat 
2453cca2606SAchiad Shochat 	ib_gid_to_mlx5_roce_addr(gid, attr, in_addr);
2463cca2606SAchiad Shochat 
2473cca2606SAchiad Shochat 	MLX5_SET(set_roce_address_in, in, roce_address_index, index);
2483cca2606SAchiad Shochat 	MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS);
2493cca2606SAchiad Shochat 	return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
2503cca2606SAchiad Shochat }
2513cca2606SAchiad Shochat 
2523cca2606SAchiad Shochat static int mlx5_ib_add_gid(struct ib_device *device, u8 port_num,
2533cca2606SAchiad Shochat 			   unsigned int index, const union ib_gid *gid,
2543cca2606SAchiad Shochat 			   const struct ib_gid_attr *attr,
2553cca2606SAchiad Shochat 			   __always_unused void **context)
2563cca2606SAchiad Shochat {
2573cca2606SAchiad Shochat 	return set_roce_addr(device, port_num, index, gid, attr);
2583cca2606SAchiad Shochat }
2593cca2606SAchiad Shochat 
2603cca2606SAchiad Shochat static int mlx5_ib_del_gid(struct ib_device *device, u8 port_num,
2613cca2606SAchiad Shochat 			   unsigned int index, __always_unused void **context)
2623cca2606SAchiad Shochat {
2633cca2606SAchiad Shochat 	return set_roce_addr(device, port_num, index, NULL, NULL);
2643cca2606SAchiad Shochat }
2653cca2606SAchiad Shochat 
2662811ba51SAchiad Shochat __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
2672811ba51SAchiad Shochat 			       int index)
2682811ba51SAchiad Shochat {
2692811ba51SAchiad Shochat 	struct ib_gid_attr attr;
2702811ba51SAchiad Shochat 	union ib_gid gid;
2712811ba51SAchiad Shochat 
2722811ba51SAchiad Shochat 	if (ib_get_cached_gid(&dev->ib_dev, port_num, index, &gid, &attr))
2732811ba51SAchiad Shochat 		return 0;
2742811ba51SAchiad Shochat 
2752811ba51SAchiad Shochat 	if (!attr.ndev)
2762811ba51SAchiad Shochat 		return 0;
2772811ba51SAchiad Shochat 
2782811ba51SAchiad Shochat 	dev_put(attr.ndev);
2792811ba51SAchiad Shochat 
2802811ba51SAchiad Shochat 	if (attr.gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
2812811ba51SAchiad Shochat 		return 0;
2822811ba51SAchiad Shochat 
2832811ba51SAchiad Shochat 	return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
2842811ba51SAchiad Shochat }
2852811ba51SAchiad Shochat 
2861b5daf11SMajd Dibbiny static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
2871b5daf11SMajd Dibbiny {
288d603c809SEli Cohen 	return !MLX5_CAP_GEN(dev->mdev, ib_virt);
2891b5daf11SMajd Dibbiny }
2901b5daf11SMajd Dibbiny 
2911b5daf11SMajd Dibbiny enum {
2921b5daf11SMajd Dibbiny 	MLX5_VPORT_ACCESS_METHOD_MAD,
2931b5daf11SMajd Dibbiny 	MLX5_VPORT_ACCESS_METHOD_HCA,
2941b5daf11SMajd Dibbiny 	MLX5_VPORT_ACCESS_METHOD_NIC,
2951b5daf11SMajd Dibbiny };
2961b5daf11SMajd Dibbiny 
2971b5daf11SMajd Dibbiny static int mlx5_get_vport_access_method(struct ib_device *ibdev)
2981b5daf11SMajd Dibbiny {
2991b5daf11SMajd Dibbiny 	if (mlx5_use_mad_ifc(to_mdev(ibdev)))
3001b5daf11SMajd Dibbiny 		return MLX5_VPORT_ACCESS_METHOD_MAD;
3011b5daf11SMajd Dibbiny 
302ebd61f68SAchiad Shochat 	if (mlx5_ib_port_link_layer(ibdev, 1) ==
3031b5daf11SMajd Dibbiny 	    IB_LINK_LAYER_ETHERNET)
3041b5daf11SMajd Dibbiny 		return MLX5_VPORT_ACCESS_METHOD_NIC;
3051b5daf11SMajd Dibbiny 
3061b5daf11SMajd Dibbiny 	return MLX5_VPORT_ACCESS_METHOD_HCA;
3071b5daf11SMajd Dibbiny }
3081b5daf11SMajd Dibbiny 
309da7525d2SEran Ben Elisha static void get_atomic_caps(struct mlx5_ib_dev *dev,
310da7525d2SEran Ben Elisha 			    struct ib_device_attr *props)
311da7525d2SEran Ben Elisha {
312da7525d2SEran Ben Elisha 	u8 tmp;
313da7525d2SEran Ben Elisha 	u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
314da7525d2SEran Ben Elisha 	u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
315da7525d2SEran Ben Elisha 	u8 atomic_req_8B_endianness_mode =
316da7525d2SEran Ben Elisha 		MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianess_mode);
317da7525d2SEran Ben Elisha 
318da7525d2SEran Ben Elisha 	/* Check if HW supports 8 bytes standard atomic operations and capable
319da7525d2SEran Ben Elisha 	 * of host endianness respond
320da7525d2SEran Ben Elisha 	 */
321da7525d2SEran Ben Elisha 	tmp = MLX5_ATOMIC_OPS_CMP_SWAP | MLX5_ATOMIC_OPS_FETCH_ADD;
322da7525d2SEran Ben Elisha 	if (((atomic_operations & tmp) == tmp) &&
323da7525d2SEran Ben Elisha 	    (atomic_size_qp & MLX5_ATOMIC_SIZE_QP_8BYTES) &&
324da7525d2SEran Ben Elisha 	    (atomic_req_8B_endianness_mode)) {
325da7525d2SEran Ben Elisha 		props->atomic_cap = IB_ATOMIC_HCA;
326da7525d2SEran Ben Elisha 	} else {
327da7525d2SEran Ben Elisha 		props->atomic_cap = IB_ATOMIC_NONE;
328da7525d2SEran Ben Elisha 	}
329da7525d2SEran Ben Elisha }
330da7525d2SEran Ben Elisha 
3311b5daf11SMajd Dibbiny static int mlx5_query_system_image_guid(struct ib_device *ibdev,
3321b5daf11SMajd Dibbiny 					__be64 *sys_image_guid)
3331b5daf11SMajd Dibbiny {
3341b5daf11SMajd Dibbiny 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
3351b5daf11SMajd Dibbiny 	struct mlx5_core_dev *mdev = dev->mdev;
3361b5daf11SMajd Dibbiny 	u64 tmp;
3371b5daf11SMajd Dibbiny 	int err;
3381b5daf11SMajd Dibbiny 
3391b5daf11SMajd Dibbiny 	switch (mlx5_get_vport_access_method(ibdev)) {
3401b5daf11SMajd Dibbiny 	case MLX5_VPORT_ACCESS_METHOD_MAD:
3411b5daf11SMajd Dibbiny 		return mlx5_query_mad_ifc_system_image_guid(ibdev,
3421b5daf11SMajd Dibbiny 							    sys_image_guid);
3431b5daf11SMajd Dibbiny 
3441b5daf11SMajd Dibbiny 	case MLX5_VPORT_ACCESS_METHOD_HCA:
3451b5daf11SMajd Dibbiny 		err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
3463f89a643SAchiad Shochat 		break;
3473f89a643SAchiad Shochat 
3483f89a643SAchiad Shochat 	case MLX5_VPORT_ACCESS_METHOD_NIC:
3493f89a643SAchiad Shochat 		err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
3503f89a643SAchiad Shochat 		break;
3511b5daf11SMajd Dibbiny 
3521b5daf11SMajd Dibbiny 	default:
3531b5daf11SMajd Dibbiny 		return -EINVAL;
3541b5daf11SMajd Dibbiny 	}
3553f89a643SAchiad Shochat 
3563f89a643SAchiad Shochat 	if (!err)
3573f89a643SAchiad Shochat 		*sys_image_guid = cpu_to_be64(tmp);
3583f89a643SAchiad Shochat 
3593f89a643SAchiad Shochat 	return err;
3603f89a643SAchiad Shochat 
3611b5daf11SMajd Dibbiny }
3621b5daf11SMajd Dibbiny 
3631b5daf11SMajd Dibbiny static int mlx5_query_max_pkeys(struct ib_device *ibdev,
3641b5daf11SMajd Dibbiny 				u16 *max_pkeys)
3651b5daf11SMajd Dibbiny {
3661b5daf11SMajd Dibbiny 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
3671b5daf11SMajd Dibbiny 	struct mlx5_core_dev *mdev = dev->mdev;
3681b5daf11SMajd Dibbiny 
3691b5daf11SMajd Dibbiny 	switch (mlx5_get_vport_access_method(ibdev)) {
3701b5daf11SMajd Dibbiny 	case MLX5_VPORT_ACCESS_METHOD_MAD:
3711b5daf11SMajd Dibbiny 		return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys);
3721b5daf11SMajd Dibbiny 
3731b5daf11SMajd Dibbiny 	case MLX5_VPORT_ACCESS_METHOD_HCA:
3741b5daf11SMajd Dibbiny 	case MLX5_VPORT_ACCESS_METHOD_NIC:
3751b5daf11SMajd Dibbiny 		*max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev,
3761b5daf11SMajd Dibbiny 						pkey_table_size));
3771b5daf11SMajd Dibbiny 		return 0;
3781b5daf11SMajd Dibbiny 
3791b5daf11SMajd Dibbiny 	default:
3801b5daf11SMajd Dibbiny 		return -EINVAL;
3811b5daf11SMajd Dibbiny 	}
3821b5daf11SMajd Dibbiny }
3831b5daf11SMajd Dibbiny 
3841b5daf11SMajd Dibbiny static int mlx5_query_vendor_id(struct ib_device *ibdev,
3851b5daf11SMajd Dibbiny 				u32 *vendor_id)
3861b5daf11SMajd Dibbiny {
3871b5daf11SMajd Dibbiny 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
3881b5daf11SMajd Dibbiny 
3891b5daf11SMajd Dibbiny 	switch (mlx5_get_vport_access_method(ibdev)) {
3901b5daf11SMajd Dibbiny 	case MLX5_VPORT_ACCESS_METHOD_MAD:
3911b5daf11SMajd Dibbiny 		return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id);
3921b5daf11SMajd Dibbiny 
3931b5daf11SMajd Dibbiny 	case MLX5_VPORT_ACCESS_METHOD_HCA:
3941b5daf11SMajd Dibbiny 	case MLX5_VPORT_ACCESS_METHOD_NIC:
3951b5daf11SMajd Dibbiny 		return mlx5_core_query_vendor_id(dev->mdev, vendor_id);
3961b5daf11SMajd Dibbiny 
3971b5daf11SMajd Dibbiny 	default:
3981b5daf11SMajd Dibbiny 		return -EINVAL;
3991b5daf11SMajd Dibbiny 	}
4001b5daf11SMajd Dibbiny }
4011b5daf11SMajd Dibbiny 
4021b5daf11SMajd Dibbiny static int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
4031b5daf11SMajd Dibbiny 				__be64 *node_guid)
4041b5daf11SMajd Dibbiny {
4051b5daf11SMajd Dibbiny 	u64 tmp;
4061b5daf11SMajd Dibbiny 	int err;
4071b5daf11SMajd Dibbiny 
4081b5daf11SMajd Dibbiny 	switch (mlx5_get_vport_access_method(&dev->ib_dev)) {
4091b5daf11SMajd Dibbiny 	case MLX5_VPORT_ACCESS_METHOD_MAD:
4101b5daf11SMajd Dibbiny 		return mlx5_query_mad_ifc_node_guid(dev, node_guid);
4111b5daf11SMajd Dibbiny 
4121b5daf11SMajd Dibbiny 	case MLX5_VPORT_ACCESS_METHOD_HCA:
4131b5daf11SMajd Dibbiny 		err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp);
4143f89a643SAchiad Shochat 		break;
4153f89a643SAchiad Shochat 
4163f89a643SAchiad Shochat 	case MLX5_VPORT_ACCESS_METHOD_NIC:
4173f89a643SAchiad Shochat 		err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp);
4183f89a643SAchiad Shochat 		break;
4191b5daf11SMajd Dibbiny 
4201b5daf11SMajd Dibbiny 	default:
4211b5daf11SMajd Dibbiny 		return -EINVAL;
4221b5daf11SMajd Dibbiny 	}
4233f89a643SAchiad Shochat 
4243f89a643SAchiad Shochat 	if (!err)
4253f89a643SAchiad Shochat 		*node_guid = cpu_to_be64(tmp);
4263f89a643SAchiad Shochat 
4273f89a643SAchiad Shochat 	return err;
4281b5daf11SMajd Dibbiny }
4291b5daf11SMajd Dibbiny 
4301b5daf11SMajd Dibbiny struct mlx5_reg_node_desc {
4311b5daf11SMajd Dibbiny 	u8	desc[64];
4321b5daf11SMajd Dibbiny };
4331b5daf11SMajd Dibbiny 
4341b5daf11SMajd Dibbiny static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
4351b5daf11SMajd Dibbiny {
4361b5daf11SMajd Dibbiny 	struct mlx5_reg_node_desc in;
4371b5daf11SMajd Dibbiny 
4381b5daf11SMajd Dibbiny 	if (mlx5_use_mad_ifc(dev))
4391b5daf11SMajd Dibbiny 		return mlx5_query_mad_ifc_node_desc(dev, node_desc);
4401b5daf11SMajd Dibbiny 
4411b5daf11SMajd Dibbiny 	memset(&in, 0, sizeof(in));
4421b5daf11SMajd Dibbiny 
4431b5daf11SMajd Dibbiny 	return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc,
4441b5daf11SMajd Dibbiny 				    sizeof(struct mlx5_reg_node_desc),
4451b5daf11SMajd Dibbiny 				    MLX5_REG_NODE_DESC, 0, 0);
4461b5daf11SMajd Dibbiny }
4471b5daf11SMajd Dibbiny 
448e126ba97SEli Cohen static int mlx5_ib_query_device(struct ib_device *ibdev,
4492528e33eSMatan Barak 				struct ib_device_attr *props,
4502528e33eSMatan Barak 				struct ib_udata *uhw)
451e126ba97SEli Cohen {
452e126ba97SEli Cohen 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
453938fe83cSSaeed Mahameed 	struct mlx5_core_dev *mdev = dev->mdev;
454e126ba97SEli Cohen 	int err = -ENOMEM;
455e126ba97SEli Cohen 	int max_rq_sg;
456e126ba97SEli Cohen 	int max_sq_sg;
457e0238a6aSSagi Grimberg 	u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
458402ca536SBodong Wang 	struct mlx5_ib_query_device_resp resp = {};
459402ca536SBodong Wang 	size_t resp_len;
460402ca536SBodong Wang 	u64 max_tso;
461e126ba97SEli Cohen 
462402ca536SBodong Wang 	resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length);
463402ca536SBodong Wang 	if (uhw->outlen && uhw->outlen < resp_len)
464402ca536SBodong Wang 		return -EINVAL;
465402ca536SBodong Wang 	else
466402ca536SBodong Wang 		resp.response_length = resp_len;
467402ca536SBodong Wang 
468402ca536SBodong Wang 	if (uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
4692528e33eSMatan Barak 		return -EINVAL;
4702528e33eSMatan Barak 
471e126ba97SEli Cohen 	memset(props, 0, sizeof(*props));
4721b5daf11SMajd Dibbiny 	err = mlx5_query_system_image_guid(ibdev,
4731b5daf11SMajd Dibbiny 					   &props->sys_image_guid);
4741b5daf11SMajd Dibbiny 	if (err)
4751b5daf11SMajd Dibbiny 		return err;
4761b5daf11SMajd Dibbiny 
4771b5daf11SMajd Dibbiny 	err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys);
4781b5daf11SMajd Dibbiny 	if (err)
4791b5daf11SMajd Dibbiny 		return err;
4801b5daf11SMajd Dibbiny 
4811b5daf11SMajd Dibbiny 	err = mlx5_query_vendor_id(ibdev, &props->vendor_id);
4821b5daf11SMajd Dibbiny 	if (err)
4831b5daf11SMajd Dibbiny 		return err;
484e126ba97SEli Cohen 
4859603b61dSJack Morgenstein 	props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
4869603b61dSJack Morgenstein 		(fw_rev_min(dev->mdev) << 16) |
4879603b61dSJack Morgenstein 		fw_rev_sub(dev->mdev);
488e126ba97SEli Cohen 	props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT |
489e126ba97SEli Cohen 		IB_DEVICE_PORT_ACTIVE_EVENT		|
490e126ba97SEli Cohen 		IB_DEVICE_SYS_IMAGE_GUID		|
4911a4c3a3dSEli Cohen 		IB_DEVICE_RC_RNR_NAK_GEN;
492938fe83cSSaeed Mahameed 
493938fe83cSSaeed Mahameed 	if (MLX5_CAP_GEN(mdev, pkv))
494e126ba97SEli Cohen 		props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
495938fe83cSSaeed Mahameed 	if (MLX5_CAP_GEN(mdev, qkv))
496e126ba97SEli Cohen 		props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
497938fe83cSSaeed Mahameed 	if (MLX5_CAP_GEN(mdev, apm))
498e126ba97SEli Cohen 		props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
499938fe83cSSaeed Mahameed 	if (MLX5_CAP_GEN(mdev, xrc))
500e126ba97SEli Cohen 		props->device_cap_flags |= IB_DEVICE_XRC;
501d2370e0aSMatan Barak 	if (MLX5_CAP_GEN(mdev, imaicl)) {
502d2370e0aSMatan Barak 		props->device_cap_flags |= IB_DEVICE_MEM_WINDOW |
503d2370e0aSMatan Barak 					   IB_DEVICE_MEM_WINDOW_TYPE_2B;
504d2370e0aSMatan Barak 		props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
505b005d316SSagi Grimberg 		/* We support 'Gappy' memory registration too */
506b005d316SSagi Grimberg 		props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG;
507d2370e0aSMatan Barak 	}
508e126ba97SEli Cohen 	props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
509938fe83cSSaeed Mahameed 	if (MLX5_CAP_GEN(mdev, sho)) {
5102dea9094SSagi Grimberg 		props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER;
5112dea9094SSagi Grimberg 		/* At this stage no support for signature handover */
5122dea9094SSagi Grimberg 		props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 |
5132dea9094SSagi Grimberg 				      IB_PROT_T10DIF_TYPE_2 |
5142dea9094SSagi Grimberg 				      IB_PROT_T10DIF_TYPE_3;
5152dea9094SSagi Grimberg 		props->sig_guard_cap = IB_GUARD_T10DIF_CRC |
5162dea9094SSagi Grimberg 				       IB_GUARD_T10DIF_CSUM;
5172dea9094SSagi Grimberg 	}
518938fe83cSSaeed Mahameed 	if (MLX5_CAP_GEN(mdev, block_lb_mc))
519f360d88aSEli Cohen 		props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
520e126ba97SEli Cohen 
521402ca536SBodong Wang 	if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads)) {
522402ca536SBodong Wang 		if (MLX5_CAP_ETH(mdev, csum_cap))
52388115fe7SBodong Wang 			props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
52488115fe7SBodong Wang 
525402ca536SBodong Wang 		if (field_avail(typeof(resp), tso_caps, uhw->outlen)) {
526402ca536SBodong Wang 			max_tso = MLX5_CAP_ETH(mdev, max_lso_cap);
527402ca536SBodong Wang 			if (max_tso) {
528402ca536SBodong Wang 				resp.tso_caps.max_tso = 1 << max_tso;
529402ca536SBodong Wang 				resp.tso_caps.supported_qpts |=
530402ca536SBodong Wang 					1 << IB_QPT_RAW_PACKET;
531402ca536SBodong Wang 				resp.response_length += sizeof(resp.tso_caps);
532402ca536SBodong Wang 			}
533402ca536SBodong Wang 		}
53431f69a82SYishai Hadas 
53531f69a82SYishai Hadas 		if (field_avail(typeof(resp), rss_caps, uhw->outlen)) {
53631f69a82SYishai Hadas 			resp.rss_caps.rx_hash_function =
53731f69a82SYishai Hadas 						MLX5_RX_HASH_FUNC_TOEPLITZ;
53831f69a82SYishai Hadas 			resp.rss_caps.rx_hash_fields_mask =
53931f69a82SYishai Hadas 						MLX5_RX_HASH_SRC_IPV4 |
54031f69a82SYishai Hadas 						MLX5_RX_HASH_DST_IPV4 |
54131f69a82SYishai Hadas 						MLX5_RX_HASH_SRC_IPV6 |
54231f69a82SYishai Hadas 						MLX5_RX_HASH_DST_IPV6 |
54331f69a82SYishai Hadas 						MLX5_RX_HASH_SRC_PORT_TCP |
54431f69a82SYishai Hadas 						MLX5_RX_HASH_DST_PORT_TCP |
54531f69a82SYishai Hadas 						MLX5_RX_HASH_SRC_PORT_UDP |
54631f69a82SYishai Hadas 						MLX5_RX_HASH_DST_PORT_UDP;
54731f69a82SYishai Hadas 			resp.response_length += sizeof(resp.rss_caps);
54831f69a82SYishai Hadas 		}
54931f69a82SYishai Hadas 	} else {
55031f69a82SYishai Hadas 		if (field_avail(typeof(resp), tso_caps, uhw->outlen))
55131f69a82SYishai Hadas 			resp.response_length += sizeof(resp.tso_caps);
55231f69a82SYishai Hadas 		if (field_avail(typeof(resp), rss_caps, uhw->outlen))
55331f69a82SYishai Hadas 			resp.response_length += sizeof(resp.rss_caps);
554402ca536SBodong Wang 	}
555402ca536SBodong Wang 
556f0313965SErez Shitrit 	if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
557f0313965SErez Shitrit 		props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
558f0313965SErez Shitrit 		props->device_cap_flags |= IB_DEVICE_UD_TSO;
559f0313965SErez Shitrit 	}
560f0313965SErez Shitrit 
561cff5a0f3SMajd Dibbiny 	if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
562cff5a0f3SMajd Dibbiny 	    MLX5_CAP_ETH(dev->mdev, scatter_fcs))
563cff5a0f3SMajd Dibbiny 		props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
564cff5a0f3SMajd Dibbiny 
565da6d6ba3SMaor Gottlieb 	if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
566da6d6ba3SMaor Gottlieb 		props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
567da6d6ba3SMaor Gottlieb 
5681b5daf11SMajd Dibbiny 	props->vendor_part_id	   = mdev->pdev->device;
5691b5daf11SMajd Dibbiny 	props->hw_ver		   = mdev->pdev->revision;
570e126ba97SEli Cohen 
571e126ba97SEli Cohen 	props->max_mr_size	   = ~0ull;
572e0238a6aSSagi Grimberg 	props->page_size_cap	   = ~(min_page_size - 1);
573938fe83cSSaeed Mahameed 	props->max_qp		   = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
574938fe83cSSaeed Mahameed 	props->max_qp_wr	   = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
575938fe83cSSaeed Mahameed 	max_rq_sg =  MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
576938fe83cSSaeed Mahameed 		     sizeof(struct mlx5_wqe_data_seg);
577938fe83cSSaeed Mahameed 	max_sq_sg = (MLX5_CAP_GEN(mdev, max_wqe_sz_sq) -
578938fe83cSSaeed Mahameed 		     sizeof(struct mlx5_wqe_ctrl_seg)) /
579e126ba97SEli Cohen 		     sizeof(struct mlx5_wqe_data_seg);
580e126ba97SEli Cohen 	props->max_sge = min(max_rq_sg, max_sq_sg);
581986ef95eSSagi Grimberg 	props->max_sge_rd	   = MLX5_MAX_SGE_RD;
582938fe83cSSaeed Mahameed 	props->max_cq		   = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
5839f177686SLeon Romanovsky 	props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
584938fe83cSSaeed Mahameed 	props->max_mr		   = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
585938fe83cSSaeed Mahameed 	props->max_pd		   = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
586938fe83cSSaeed Mahameed 	props->max_qp_rd_atom	   = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
587938fe83cSSaeed Mahameed 	props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp);
588938fe83cSSaeed Mahameed 	props->max_srq		   = 1 << MLX5_CAP_GEN(mdev, log_max_srq);
589938fe83cSSaeed Mahameed 	props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1;
590938fe83cSSaeed Mahameed 	props->local_ca_ack_delay  = MLX5_CAP_GEN(mdev, local_ca_ack_delay);
591e126ba97SEli Cohen 	props->max_res_rd_atom	   = props->max_qp_rd_atom * props->max_qp;
592e126ba97SEli Cohen 	props->max_srq_sge	   = max_rq_sg - 1;
593911f4331SSagi Grimberg 	props->max_fast_reg_page_list_len =
594911f4331SSagi Grimberg 		1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size);
595da7525d2SEran Ben Elisha 	get_atomic_caps(dev, props);
59681bea28fSEli Cohen 	props->masked_atomic_cap   = IB_ATOMIC_NONE;
597938fe83cSSaeed Mahameed 	props->max_mcast_grp	   = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
598938fe83cSSaeed Mahameed 	props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
599e126ba97SEli Cohen 	props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
600e126ba97SEli Cohen 					   props->max_mcast_grp;
601e126ba97SEli Cohen 	props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
6027c60bcbbSMatan Barak 	props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz);
6037c60bcbbSMatan Barak 	props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
604e126ba97SEli Cohen 
6058cdd312cSHaggai Eran #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
606938fe83cSSaeed Mahameed 	if (MLX5_CAP_GEN(mdev, pg))
6078cdd312cSHaggai Eran 		props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
6088cdd312cSHaggai Eran 	props->odp_caps = dev->odp_caps;
6098cdd312cSHaggai Eran #endif
6108cdd312cSHaggai Eran 
611051f2630SLeon Romanovsky 	if (MLX5_CAP_GEN(mdev, cd))
612051f2630SLeon Romanovsky 		props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL;
613051f2630SLeon Romanovsky 
614eff901d3SEli Cohen 	if (!mlx5_core_is_pf(mdev))
615eff901d3SEli Cohen 		props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION;
616eff901d3SEli Cohen 
61731f69a82SYishai Hadas 	if (mlx5_ib_port_link_layer(ibdev, 1) ==
61831f69a82SYishai Hadas 	    IB_LINK_LAYER_ETHERNET) {
61931f69a82SYishai Hadas 		props->rss_caps.max_rwq_indirection_tables =
62031f69a82SYishai Hadas 			1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt);
62131f69a82SYishai Hadas 		props->rss_caps.max_rwq_indirection_table_size =
62231f69a82SYishai Hadas 			1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt_size);
62331f69a82SYishai Hadas 		props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
62431f69a82SYishai Hadas 		props->max_wq_type_rq =
62531f69a82SYishai Hadas 			1 << MLX5_CAP_GEN(dev->mdev, log_max_rq);
62631f69a82SYishai Hadas 	}
62731f69a82SYishai Hadas 
628402ca536SBodong Wang 	if (uhw->outlen) {
629402ca536SBodong Wang 		err = ib_copy_to_udata(uhw, &resp, resp.response_length);
630402ca536SBodong Wang 
631402ca536SBodong Wang 		if (err)
632402ca536SBodong Wang 			return err;
633402ca536SBodong Wang 	}
634402ca536SBodong Wang 
6351b5daf11SMajd Dibbiny 	return 0;
6361b5daf11SMajd Dibbiny }
637e126ba97SEli Cohen 
6381b5daf11SMajd Dibbiny enum mlx5_ib_width {
6391b5daf11SMajd Dibbiny 	MLX5_IB_WIDTH_1X	= 1 << 0,
6401b5daf11SMajd Dibbiny 	MLX5_IB_WIDTH_2X	= 1 << 1,
6411b5daf11SMajd Dibbiny 	MLX5_IB_WIDTH_4X	= 1 << 2,
6421b5daf11SMajd Dibbiny 	MLX5_IB_WIDTH_8X	= 1 << 3,
6431b5daf11SMajd Dibbiny 	MLX5_IB_WIDTH_12X	= 1 << 4
6441b5daf11SMajd Dibbiny };
6451b5daf11SMajd Dibbiny 
6461b5daf11SMajd Dibbiny static int translate_active_width(struct ib_device *ibdev, u8 active_width,
6471b5daf11SMajd Dibbiny 				  u8 *ib_width)
6481b5daf11SMajd Dibbiny {
6491b5daf11SMajd Dibbiny 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
6501b5daf11SMajd Dibbiny 	int err = 0;
6511b5daf11SMajd Dibbiny 
6521b5daf11SMajd Dibbiny 	if (active_width & MLX5_IB_WIDTH_1X) {
6531b5daf11SMajd Dibbiny 		*ib_width = IB_WIDTH_1X;
6541b5daf11SMajd Dibbiny 	} else if (active_width & MLX5_IB_WIDTH_2X) {
6551b5daf11SMajd Dibbiny 		mlx5_ib_dbg(dev, "active_width %d is not supported by IB spec\n",
6561b5daf11SMajd Dibbiny 			    (int)active_width);
6571b5daf11SMajd Dibbiny 		err = -EINVAL;
6581b5daf11SMajd Dibbiny 	} else if (active_width & MLX5_IB_WIDTH_4X) {
6591b5daf11SMajd Dibbiny 		*ib_width = IB_WIDTH_4X;
6601b5daf11SMajd Dibbiny 	} else if (active_width & MLX5_IB_WIDTH_8X) {
6611b5daf11SMajd Dibbiny 		*ib_width = IB_WIDTH_8X;
6621b5daf11SMajd Dibbiny 	} else if (active_width & MLX5_IB_WIDTH_12X) {
6631b5daf11SMajd Dibbiny 		*ib_width = IB_WIDTH_12X;
6641b5daf11SMajd Dibbiny 	} else {
6651b5daf11SMajd Dibbiny 		mlx5_ib_dbg(dev, "Invalid active_width %d\n",
6661b5daf11SMajd Dibbiny 			    (int)active_width);
6671b5daf11SMajd Dibbiny 		err = -EINVAL;
6681b5daf11SMajd Dibbiny 	}
6691b5daf11SMajd Dibbiny 
6701b5daf11SMajd Dibbiny 	return err;
6711b5daf11SMajd Dibbiny }
6721b5daf11SMajd Dibbiny 
6731b5daf11SMajd Dibbiny static int mlx5_mtu_to_ib_mtu(int mtu)
6741b5daf11SMajd Dibbiny {
6751b5daf11SMajd Dibbiny 	switch (mtu) {
6761b5daf11SMajd Dibbiny 	case 256: return 1;
6771b5daf11SMajd Dibbiny 	case 512: return 2;
6781b5daf11SMajd Dibbiny 	case 1024: return 3;
6791b5daf11SMajd Dibbiny 	case 2048: return 4;
6801b5daf11SMajd Dibbiny 	case 4096: return 5;
6811b5daf11SMajd Dibbiny 	default:
6821b5daf11SMajd Dibbiny 		pr_warn("invalid mtu\n");
6831b5daf11SMajd Dibbiny 		return -1;
6841b5daf11SMajd Dibbiny 	}
6851b5daf11SMajd Dibbiny }
6861b5daf11SMajd Dibbiny 
6871b5daf11SMajd Dibbiny enum ib_max_vl_num {
6881b5daf11SMajd Dibbiny 	__IB_MAX_VL_0		= 1,
6891b5daf11SMajd Dibbiny 	__IB_MAX_VL_0_1		= 2,
6901b5daf11SMajd Dibbiny 	__IB_MAX_VL_0_3		= 3,
6911b5daf11SMajd Dibbiny 	__IB_MAX_VL_0_7		= 4,
6921b5daf11SMajd Dibbiny 	__IB_MAX_VL_0_14	= 5,
6931b5daf11SMajd Dibbiny };
6941b5daf11SMajd Dibbiny 
6951b5daf11SMajd Dibbiny enum mlx5_vl_hw_cap {
6961b5daf11SMajd Dibbiny 	MLX5_VL_HW_0	= 1,
6971b5daf11SMajd Dibbiny 	MLX5_VL_HW_0_1	= 2,
6981b5daf11SMajd Dibbiny 	MLX5_VL_HW_0_2	= 3,
6991b5daf11SMajd Dibbiny 	MLX5_VL_HW_0_3	= 4,
7001b5daf11SMajd Dibbiny 	MLX5_VL_HW_0_4	= 5,
7011b5daf11SMajd Dibbiny 	MLX5_VL_HW_0_5	= 6,
7021b5daf11SMajd Dibbiny 	MLX5_VL_HW_0_6	= 7,
7031b5daf11SMajd Dibbiny 	MLX5_VL_HW_0_7	= 8,
7041b5daf11SMajd Dibbiny 	MLX5_VL_HW_0_14	= 15
7051b5daf11SMajd Dibbiny };
7061b5daf11SMajd Dibbiny 
7071b5daf11SMajd Dibbiny static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap,
7081b5daf11SMajd Dibbiny 				u8 *max_vl_num)
7091b5daf11SMajd Dibbiny {
7101b5daf11SMajd Dibbiny 	switch (vl_hw_cap) {
7111b5daf11SMajd Dibbiny 	case MLX5_VL_HW_0:
7121b5daf11SMajd Dibbiny 		*max_vl_num = __IB_MAX_VL_0;
7131b5daf11SMajd Dibbiny 		break;
7141b5daf11SMajd Dibbiny 	case MLX5_VL_HW_0_1:
7151b5daf11SMajd Dibbiny 		*max_vl_num = __IB_MAX_VL_0_1;
7161b5daf11SMajd Dibbiny 		break;
7171b5daf11SMajd Dibbiny 	case MLX5_VL_HW_0_3:
7181b5daf11SMajd Dibbiny 		*max_vl_num = __IB_MAX_VL_0_3;
7191b5daf11SMajd Dibbiny 		break;
7201b5daf11SMajd Dibbiny 	case MLX5_VL_HW_0_7:
7211b5daf11SMajd Dibbiny 		*max_vl_num = __IB_MAX_VL_0_7;
7221b5daf11SMajd Dibbiny 		break;
7231b5daf11SMajd Dibbiny 	case MLX5_VL_HW_0_14:
7241b5daf11SMajd Dibbiny 		*max_vl_num = __IB_MAX_VL_0_14;
7251b5daf11SMajd Dibbiny 		break;
7261b5daf11SMajd Dibbiny 
7271b5daf11SMajd Dibbiny 	default:
7281b5daf11SMajd Dibbiny 		return -EINVAL;
7291b5daf11SMajd Dibbiny 	}
7301b5daf11SMajd Dibbiny 
7311b5daf11SMajd Dibbiny 	return 0;
7321b5daf11SMajd Dibbiny }
7331b5daf11SMajd Dibbiny 
7341b5daf11SMajd Dibbiny static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
7351b5daf11SMajd Dibbiny 			       struct ib_port_attr *props)
7361b5daf11SMajd Dibbiny {
7371b5daf11SMajd Dibbiny 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
7381b5daf11SMajd Dibbiny 	struct mlx5_core_dev *mdev = dev->mdev;
7391b5daf11SMajd Dibbiny 	struct mlx5_hca_vport_context *rep;
740046339eaSSaeed Mahameed 	u16 max_mtu;
741046339eaSSaeed Mahameed 	u16 oper_mtu;
7421b5daf11SMajd Dibbiny 	int err;
7431b5daf11SMajd Dibbiny 	u8 ib_link_width_oper;
7441b5daf11SMajd Dibbiny 	u8 vl_hw_cap;
7451b5daf11SMajd Dibbiny 
7461b5daf11SMajd Dibbiny 	rep = kzalloc(sizeof(*rep), GFP_KERNEL);
7471b5daf11SMajd Dibbiny 	if (!rep) {
7481b5daf11SMajd Dibbiny 		err = -ENOMEM;
7491b5daf11SMajd Dibbiny 		goto out;
7501b5daf11SMajd Dibbiny 	}
7511b5daf11SMajd Dibbiny 
7521b5daf11SMajd Dibbiny 	memset(props, 0, sizeof(*props));
7531b5daf11SMajd Dibbiny 
7541b5daf11SMajd Dibbiny 	err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep);
7551b5daf11SMajd Dibbiny 	if (err)
7561b5daf11SMajd Dibbiny 		goto out;
7571b5daf11SMajd Dibbiny 
7581b5daf11SMajd Dibbiny 	props->lid		= rep->lid;
7591b5daf11SMajd Dibbiny 	props->lmc		= rep->lmc;
7601b5daf11SMajd Dibbiny 	props->sm_lid		= rep->sm_lid;
7611b5daf11SMajd Dibbiny 	props->sm_sl		= rep->sm_sl;
7621b5daf11SMajd Dibbiny 	props->state		= rep->vport_state;
7631b5daf11SMajd Dibbiny 	props->phys_state	= rep->port_physical_state;
7641b5daf11SMajd Dibbiny 	props->port_cap_flags	= rep->cap_mask1;
7651b5daf11SMajd Dibbiny 	props->gid_tbl_len	= mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size));
7661b5daf11SMajd Dibbiny 	props->max_msg_sz	= 1 << MLX5_CAP_GEN(mdev, log_max_msg);
7671b5daf11SMajd Dibbiny 	props->pkey_tbl_len	= mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size));
7681b5daf11SMajd Dibbiny 	props->bad_pkey_cntr	= rep->pkey_violation_counter;
7691b5daf11SMajd Dibbiny 	props->qkey_viol_cntr	= rep->qkey_violation_counter;
7701b5daf11SMajd Dibbiny 	props->subnet_timeout	= rep->subnet_timeout;
7711b5daf11SMajd Dibbiny 	props->init_type_reply	= rep->init_type_reply;
772eff901d3SEli Cohen 	props->grh_required	= rep->grh_required;
7731b5daf11SMajd Dibbiny 
7741b5daf11SMajd Dibbiny 	err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port);
7751b5daf11SMajd Dibbiny 	if (err)
7761b5daf11SMajd Dibbiny 		goto out;
7771b5daf11SMajd Dibbiny 
7781b5daf11SMajd Dibbiny 	err = translate_active_width(ibdev, ib_link_width_oper,
7791b5daf11SMajd Dibbiny 				     &props->active_width);
7801b5daf11SMajd Dibbiny 	if (err)
7811b5daf11SMajd Dibbiny 		goto out;
782d5beb7f2SNoa Osherovich 	err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port);
7831b5daf11SMajd Dibbiny 	if (err)
7841b5daf11SMajd Dibbiny 		goto out;
7851b5daf11SMajd Dibbiny 
786facc9699SSaeed Mahameed 	mlx5_query_port_max_mtu(mdev, &max_mtu, port);
7871b5daf11SMajd Dibbiny 
7881b5daf11SMajd Dibbiny 	props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu);
7891b5daf11SMajd Dibbiny 
790facc9699SSaeed Mahameed 	mlx5_query_port_oper_mtu(mdev, &oper_mtu, port);
7911b5daf11SMajd Dibbiny 
7921b5daf11SMajd Dibbiny 	props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu);
7931b5daf11SMajd Dibbiny 
7941b5daf11SMajd Dibbiny 	err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port);
7951b5daf11SMajd Dibbiny 	if (err)
7961b5daf11SMajd Dibbiny 		goto out;
7971b5daf11SMajd Dibbiny 
7981b5daf11SMajd Dibbiny 	err = translate_max_vl_num(ibdev, vl_hw_cap,
7991b5daf11SMajd Dibbiny 				   &props->max_vl_num);
8001b5daf11SMajd Dibbiny out:
8011b5daf11SMajd Dibbiny 	kfree(rep);
802e126ba97SEli Cohen 	return err;
803e126ba97SEli Cohen }
804e126ba97SEli Cohen 
805e126ba97SEli Cohen int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
806e126ba97SEli Cohen 		       struct ib_port_attr *props)
807e126ba97SEli Cohen {
8081b5daf11SMajd Dibbiny 	switch (mlx5_get_vport_access_method(ibdev)) {
8091b5daf11SMajd Dibbiny 	case MLX5_VPORT_ACCESS_METHOD_MAD:
8101b5daf11SMajd Dibbiny 		return mlx5_query_mad_ifc_port(ibdev, port, props);
811e126ba97SEli Cohen 
8121b5daf11SMajd Dibbiny 	case MLX5_VPORT_ACCESS_METHOD_HCA:
8131b5daf11SMajd Dibbiny 		return mlx5_query_hca_port(ibdev, port, props);
8141b5daf11SMajd Dibbiny 
8153f89a643SAchiad Shochat 	case MLX5_VPORT_ACCESS_METHOD_NIC:
8163f89a643SAchiad Shochat 		return mlx5_query_port_roce(ibdev, port, props);
8173f89a643SAchiad Shochat 
8181b5daf11SMajd Dibbiny 	default:
819e126ba97SEli Cohen 		return -EINVAL;
820e126ba97SEli Cohen 	}
821e126ba97SEli Cohen }
822e126ba97SEli Cohen 
823e126ba97SEli Cohen static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
824e126ba97SEli Cohen 			     union ib_gid *gid)
825e126ba97SEli Cohen {
8261b5daf11SMajd Dibbiny 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
8271b5daf11SMajd Dibbiny 	struct mlx5_core_dev *mdev = dev->mdev;
828e126ba97SEli Cohen 
8291b5daf11SMajd Dibbiny 	switch (mlx5_get_vport_access_method(ibdev)) {
8301b5daf11SMajd Dibbiny 	case MLX5_VPORT_ACCESS_METHOD_MAD:
8311b5daf11SMajd Dibbiny 		return mlx5_query_mad_ifc_gids(ibdev, port, index, gid);
832e126ba97SEli Cohen 
8331b5daf11SMajd Dibbiny 	case MLX5_VPORT_ACCESS_METHOD_HCA:
8341b5daf11SMajd Dibbiny 		return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid);
835e126ba97SEli Cohen 
8361b5daf11SMajd Dibbiny 	default:
8371b5daf11SMajd Dibbiny 		return -EINVAL;
8381b5daf11SMajd Dibbiny 	}
839e126ba97SEli Cohen 
840e126ba97SEli Cohen }
841e126ba97SEli Cohen 
842e126ba97SEli Cohen static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
843e126ba97SEli Cohen 			      u16 *pkey)
844e126ba97SEli Cohen {
8451b5daf11SMajd Dibbiny 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
8461b5daf11SMajd Dibbiny 	struct mlx5_core_dev *mdev = dev->mdev;
847e126ba97SEli Cohen 
8481b5daf11SMajd Dibbiny 	switch (mlx5_get_vport_access_method(ibdev)) {
8491b5daf11SMajd Dibbiny 	case MLX5_VPORT_ACCESS_METHOD_MAD:
8501b5daf11SMajd Dibbiny 		return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey);
851e126ba97SEli Cohen 
8521b5daf11SMajd Dibbiny 	case MLX5_VPORT_ACCESS_METHOD_HCA:
8531b5daf11SMajd Dibbiny 	case MLX5_VPORT_ACCESS_METHOD_NIC:
8541b5daf11SMajd Dibbiny 		return mlx5_query_hca_vport_pkey(mdev, 0, port,  0, index,
8551b5daf11SMajd Dibbiny 						 pkey);
8561b5daf11SMajd Dibbiny 	default:
8571b5daf11SMajd Dibbiny 		return -EINVAL;
858e126ba97SEli Cohen 	}
8591b5daf11SMajd Dibbiny }
860e126ba97SEli Cohen 
861e126ba97SEli Cohen static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
862e126ba97SEli Cohen 				 struct ib_device_modify *props)
863e126ba97SEli Cohen {
864e126ba97SEli Cohen 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
865e126ba97SEli Cohen 	struct mlx5_reg_node_desc in;
866e126ba97SEli Cohen 	struct mlx5_reg_node_desc out;
867e126ba97SEli Cohen 	int err;
868e126ba97SEli Cohen 
869e126ba97SEli Cohen 	if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
870e126ba97SEli Cohen 		return -EOPNOTSUPP;
871e126ba97SEli Cohen 
872e126ba97SEli Cohen 	if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
873e126ba97SEli Cohen 		return 0;
874e126ba97SEli Cohen 
875e126ba97SEli Cohen 	/*
876e126ba97SEli Cohen 	 * If possible, pass node desc to FW, so it can generate
877e126ba97SEli Cohen 	 * a 144 trap.  If cmd fails, just ignore.
878e126ba97SEli Cohen 	 */
879e126ba97SEli Cohen 	memcpy(&in, props->node_desc, 64);
8809603b61dSJack Morgenstein 	err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out,
881e126ba97SEli Cohen 				   sizeof(out), MLX5_REG_NODE_DESC, 0, 1);
882e126ba97SEli Cohen 	if (err)
883e126ba97SEli Cohen 		return err;
884e126ba97SEli Cohen 
885e126ba97SEli Cohen 	memcpy(ibdev->node_desc, props->node_desc, 64);
886e126ba97SEli Cohen 
887e126ba97SEli Cohen 	return err;
888e126ba97SEli Cohen }
889e126ba97SEli Cohen 
890e126ba97SEli Cohen static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
891e126ba97SEli Cohen 			       struct ib_port_modify *props)
892e126ba97SEli Cohen {
893e126ba97SEli Cohen 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
894e126ba97SEli Cohen 	struct ib_port_attr attr;
895e126ba97SEli Cohen 	u32 tmp;
896e126ba97SEli Cohen 	int err;
897e126ba97SEli Cohen 
898e126ba97SEli Cohen 	mutex_lock(&dev->cap_mask_mutex);
899e126ba97SEli Cohen 
900e126ba97SEli Cohen 	err = mlx5_ib_query_port(ibdev, port, &attr);
901e126ba97SEli Cohen 	if (err)
902e126ba97SEli Cohen 		goto out;
903e126ba97SEli Cohen 
904e126ba97SEli Cohen 	tmp = (attr.port_cap_flags | props->set_port_cap_mask) &
905e126ba97SEli Cohen 		~props->clr_port_cap_mask;
906e126ba97SEli Cohen 
9079603b61dSJack Morgenstein 	err = mlx5_set_port_caps(dev->mdev, port, tmp);
908e126ba97SEli Cohen 
909e126ba97SEli Cohen out:
910e126ba97SEli Cohen 	mutex_unlock(&dev->cap_mask_mutex);
911e126ba97SEli Cohen 	return err;
912e126ba97SEli Cohen }
913e126ba97SEli Cohen 
914e126ba97SEli Cohen static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
915e126ba97SEli Cohen 						  struct ib_udata *udata)
916e126ba97SEli Cohen {
917e126ba97SEli Cohen 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
918b368d7cbSMatan Barak 	struct mlx5_ib_alloc_ucontext_req_v2 req = {};
919b368d7cbSMatan Barak 	struct mlx5_ib_alloc_ucontext_resp resp = {};
920e126ba97SEli Cohen 	struct mlx5_ib_ucontext *context;
921e126ba97SEli Cohen 	struct mlx5_uuar_info *uuari;
922e126ba97SEli Cohen 	struct mlx5_uar *uars;
923c1be5232SEli Cohen 	int gross_uuars;
924e126ba97SEli Cohen 	int num_uars;
92578c0f98cSEli Cohen 	int ver;
926e126ba97SEli Cohen 	int uuarn;
927e126ba97SEli Cohen 	int err;
928e126ba97SEli Cohen 	int i;
929f241e749SJack Morgenstein 	size_t reqlen;
930a168a41cSMajd Dibbiny 	size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
931a168a41cSMajd Dibbiny 				     max_cqe_version);
932e126ba97SEli Cohen 
933e126ba97SEli Cohen 	if (!dev->ib_active)
934e126ba97SEli Cohen 		return ERR_PTR(-EAGAIN);
935e126ba97SEli Cohen 
936dfbee859SHaggai Abramovsky 	if (udata->inlen < sizeof(struct ib_uverbs_cmd_hdr))
937dfbee859SHaggai Abramovsky 		return ERR_PTR(-EINVAL);
938dfbee859SHaggai Abramovsky 
93978c0f98cSEli Cohen 	reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
94078c0f98cSEli Cohen 	if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
94178c0f98cSEli Cohen 		ver = 0;
942a168a41cSMajd Dibbiny 	else if (reqlen >= min_req_v2)
94378c0f98cSEli Cohen 		ver = 2;
94478c0f98cSEli Cohen 	else
94578c0f98cSEli Cohen 		return ERR_PTR(-EINVAL);
94678c0f98cSEli Cohen 
947b368d7cbSMatan Barak 	err = ib_copy_from_udata(&req, udata, min(reqlen, sizeof(req)));
948e126ba97SEli Cohen 	if (err)
949e126ba97SEli Cohen 		return ERR_PTR(err);
950e126ba97SEli Cohen 
951b368d7cbSMatan Barak 	if (req.flags)
95278c0f98cSEli Cohen 		return ERR_PTR(-EINVAL);
95378c0f98cSEli Cohen 
954e126ba97SEli Cohen 	if (req.total_num_uuars > MLX5_MAX_UUARS)
955e126ba97SEli Cohen 		return ERR_PTR(-ENOMEM);
956e126ba97SEli Cohen 
957e126ba97SEli Cohen 	if (req.total_num_uuars == 0)
958e126ba97SEli Cohen 		return ERR_PTR(-EINVAL);
959e126ba97SEli Cohen 
960f72300c5SHaggai Abramovsky 	if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
961b368d7cbSMatan Barak 		return ERR_PTR(-EOPNOTSUPP);
962b368d7cbSMatan Barak 
963b368d7cbSMatan Barak 	if (reqlen > sizeof(req) &&
964b368d7cbSMatan Barak 	    !ib_is_udata_cleared(udata, sizeof(req),
965dfbee859SHaggai Abramovsky 				 reqlen - sizeof(req)))
966b368d7cbSMatan Barak 		return ERR_PTR(-EOPNOTSUPP);
967b368d7cbSMatan Barak 
968c1be5232SEli Cohen 	req.total_num_uuars = ALIGN(req.total_num_uuars,
969c1be5232SEli Cohen 				    MLX5_NON_FP_BF_REGS_PER_PAGE);
970e126ba97SEli Cohen 	if (req.num_low_latency_uuars > req.total_num_uuars - 1)
971e126ba97SEli Cohen 		return ERR_PTR(-EINVAL);
972e126ba97SEli Cohen 
973c1be5232SEli Cohen 	num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
974c1be5232SEli Cohen 	gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
975938fe83cSSaeed Mahameed 	resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
9762cc6ad5fSNoa Osherovich 	if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
977938fe83cSSaeed Mahameed 		resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
978e126ba97SEli Cohen 	resp.cache_line_size = L1_CACHE_BYTES;
979938fe83cSSaeed Mahameed 	resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
980938fe83cSSaeed Mahameed 	resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
981938fe83cSSaeed Mahameed 	resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
982938fe83cSSaeed Mahameed 	resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
983938fe83cSSaeed Mahameed 	resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
984f72300c5SHaggai Abramovsky 	resp.cqe_version = min_t(__u8,
985f72300c5SHaggai Abramovsky 				 (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version),
986f72300c5SHaggai Abramovsky 				 req.max_cqe_version);
987b368d7cbSMatan Barak 	resp.response_length = min(offsetof(typeof(resp), response_length) +
988b368d7cbSMatan Barak 				   sizeof(resp.response_length), udata->outlen);
989e126ba97SEli Cohen 
990e126ba97SEli Cohen 	context = kzalloc(sizeof(*context), GFP_KERNEL);
991e126ba97SEli Cohen 	if (!context)
992e126ba97SEli Cohen 		return ERR_PTR(-ENOMEM);
993e126ba97SEli Cohen 
994e126ba97SEli Cohen 	uuari = &context->uuari;
995e126ba97SEli Cohen 	mutex_init(&uuari->lock);
996e126ba97SEli Cohen 	uars = kcalloc(num_uars, sizeof(*uars), GFP_KERNEL);
997e126ba97SEli Cohen 	if (!uars) {
998e126ba97SEli Cohen 		err = -ENOMEM;
999e126ba97SEli Cohen 		goto out_ctx;
1000e126ba97SEli Cohen 	}
1001e126ba97SEli Cohen 
1002c1be5232SEli Cohen 	uuari->bitmap = kcalloc(BITS_TO_LONGS(gross_uuars),
1003e126ba97SEli Cohen 				sizeof(*uuari->bitmap),
1004e126ba97SEli Cohen 				GFP_KERNEL);
1005e126ba97SEli Cohen 	if (!uuari->bitmap) {
1006e126ba97SEli Cohen 		err = -ENOMEM;
1007e126ba97SEli Cohen 		goto out_uar_ctx;
1008e126ba97SEli Cohen 	}
1009e126ba97SEli Cohen 	/*
1010e126ba97SEli Cohen 	 * clear all fast path uuars
1011e126ba97SEli Cohen 	 */
1012c1be5232SEli Cohen 	for (i = 0; i < gross_uuars; i++) {
1013e126ba97SEli Cohen 		uuarn = i & 3;
1014e126ba97SEli Cohen 		if (uuarn == 2 || uuarn == 3)
1015e126ba97SEli Cohen 			set_bit(i, uuari->bitmap);
1016e126ba97SEli Cohen 	}
1017e126ba97SEli Cohen 
1018c1be5232SEli Cohen 	uuari->count = kcalloc(gross_uuars, sizeof(*uuari->count), GFP_KERNEL);
1019e126ba97SEli Cohen 	if (!uuari->count) {
1020e126ba97SEli Cohen 		err = -ENOMEM;
1021e126ba97SEli Cohen 		goto out_bitmap;
1022e126ba97SEli Cohen 	}
1023e126ba97SEli Cohen 
1024e126ba97SEli Cohen 	for (i = 0; i < num_uars; i++) {
10259603b61dSJack Morgenstein 		err = mlx5_cmd_alloc_uar(dev->mdev, &uars[i].index);
1026e126ba97SEli Cohen 		if (err)
1027e126ba97SEli Cohen 			goto out_count;
1028e126ba97SEli Cohen 	}
1029e126ba97SEli Cohen 
1030b4cfe447SHaggai Eran #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1031b4cfe447SHaggai Eran 	context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range;
1032b4cfe447SHaggai Eran #endif
1033b4cfe447SHaggai Eran 
1034146d2f1aSmajd@mellanox.com 	if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) {
1035146d2f1aSmajd@mellanox.com 		err = mlx5_core_alloc_transport_domain(dev->mdev,
1036146d2f1aSmajd@mellanox.com 						       &context->tdn);
1037146d2f1aSmajd@mellanox.com 		if (err)
1038146d2f1aSmajd@mellanox.com 			goto out_uars;
1039146d2f1aSmajd@mellanox.com 	}
1040146d2f1aSmajd@mellanox.com 
10417c2344c3SMaor Gottlieb 	INIT_LIST_HEAD(&context->vma_private_list);
1042e126ba97SEli Cohen 	INIT_LIST_HEAD(&context->db_page_list);
1043e126ba97SEli Cohen 	mutex_init(&context->db_page_mutex);
1044e126ba97SEli Cohen 
1045e126ba97SEli Cohen 	resp.tot_uuars = req.total_num_uuars;
1046938fe83cSSaeed Mahameed 	resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports);
1047b368d7cbSMatan Barak 
1048f72300c5SHaggai Abramovsky 	if (field_avail(typeof(resp), cqe_version, udata->outlen))
1049f72300c5SHaggai Abramovsky 		resp.response_length += sizeof(resp.cqe_version);
1050b368d7cbSMatan Barak 
1051402ca536SBodong Wang 	if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) {
1052402ca536SBodong Wang 		resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE;
1053402ca536SBodong Wang 		resp.response_length += sizeof(resp.cmds_supp_uhw);
1054402ca536SBodong Wang 	}
1055402ca536SBodong Wang 
1056bc5c6eedSNoa Osherovich 	/*
1057bc5c6eedSNoa Osherovich 	 * We don't want to expose information from the PCI bar that is located
1058bc5c6eedSNoa Osherovich 	 * after 4096 bytes, so if the arch only supports larger pages, let's
1059bc5c6eedSNoa Osherovich 	 * pretend we don't support reading the HCA's core clock. This is also
1060bc5c6eedSNoa Osherovich 	 * forced by mmap function.
1061bc5c6eedSNoa Osherovich 	 */
1062bc5c6eedSNoa Osherovich 	if (PAGE_SIZE <= 4096 &&
1063bc5c6eedSNoa Osherovich 	    field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
1064b368d7cbSMatan Barak 		resp.comp_mask |=
1065b368d7cbSMatan Barak 			MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
1066b368d7cbSMatan Barak 		resp.hca_core_clock_offset =
1067b368d7cbSMatan Barak 			offsetof(struct mlx5_init_seg, internal_timer_h) %
1068b368d7cbSMatan Barak 			PAGE_SIZE;
1069f72300c5SHaggai Abramovsky 		resp.response_length += sizeof(resp.hca_core_clock_offset) +
1070402ca536SBodong Wang 					sizeof(resp.reserved2);
1071b368d7cbSMatan Barak 	}
1072b368d7cbSMatan Barak 
1073b368d7cbSMatan Barak 	err = ib_copy_to_udata(udata, &resp, resp.response_length);
1074e126ba97SEli Cohen 	if (err)
1075146d2f1aSmajd@mellanox.com 		goto out_td;
1076e126ba97SEli Cohen 
107778c0f98cSEli Cohen 	uuari->ver = ver;
1078e126ba97SEli Cohen 	uuari->num_low_latency_uuars = req.num_low_latency_uuars;
1079e126ba97SEli Cohen 	uuari->uars = uars;
1080e126ba97SEli Cohen 	uuari->num_uars = num_uars;
1081f72300c5SHaggai Abramovsky 	context->cqe_version = resp.cqe_version;
1082f72300c5SHaggai Abramovsky 
1083e126ba97SEli Cohen 	return &context->ibucontext;
1084e126ba97SEli Cohen 
1085146d2f1aSmajd@mellanox.com out_td:
1086146d2f1aSmajd@mellanox.com 	if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1087146d2f1aSmajd@mellanox.com 		mlx5_core_dealloc_transport_domain(dev->mdev, context->tdn);
1088146d2f1aSmajd@mellanox.com 
1089e126ba97SEli Cohen out_uars:
1090e126ba97SEli Cohen 	for (i--; i >= 0; i--)
10919603b61dSJack Morgenstein 		mlx5_cmd_free_uar(dev->mdev, uars[i].index);
1092e126ba97SEli Cohen out_count:
1093e126ba97SEli Cohen 	kfree(uuari->count);
1094e126ba97SEli Cohen 
1095e126ba97SEli Cohen out_bitmap:
1096e126ba97SEli Cohen 	kfree(uuari->bitmap);
1097e126ba97SEli Cohen 
1098e126ba97SEli Cohen out_uar_ctx:
1099e126ba97SEli Cohen 	kfree(uars);
1100e126ba97SEli Cohen 
1101e126ba97SEli Cohen out_ctx:
1102e126ba97SEli Cohen 	kfree(context);
1103e126ba97SEli Cohen 	return ERR_PTR(err);
1104e126ba97SEli Cohen }
1105e126ba97SEli Cohen 
1106e126ba97SEli Cohen static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
1107e126ba97SEli Cohen {
1108e126ba97SEli Cohen 	struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
1109e126ba97SEli Cohen 	struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
1110e126ba97SEli Cohen 	struct mlx5_uuar_info *uuari = &context->uuari;
1111e126ba97SEli Cohen 	int i;
1112e126ba97SEli Cohen 
1113146d2f1aSmajd@mellanox.com 	if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1114146d2f1aSmajd@mellanox.com 		mlx5_core_dealloc_transport_domain(dev->mdev, context->tdn);
1115146d2f1aSmajd@mellanox.com 
1116e126ba97SEli Cohen 	for (i = 0; i < uuari->num_uars; i++) {
11179603b61dSJack Morgenstein 		if (mlx5_cmd_free_uar(dev->mdev, uuari->uars[i].index))
1118e126ba97SEli Cohen 			mlx5_ib_warn(dev, "failed to free UAR 0x%x\n", uuari->uars[i].index);
1119e126ba97SEli Cohen 	}
1120e126ba97SEli Cohen 
1121e126ba97SEli Cohen 	kfree(uuari->count);
1122e126ba97SEli Cohen 	kfree(uuari->bitmap);
1123e126ba97SEli Cohen 	kfree(uuari->uars);
1124e126ba97SEli Cohen 	kfree(context);
1125e126ba97SEli Cohen 
1126e126ba97SEli Cohen 	return 0;
1127e126ba97SEli Cohen }
1128e126ba97SEli Cohen 
1129e126ba97SEli Cohen static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, int index)
1130e126ba97SEli Cohen {
11319603b61dSJack Morgenstein 	return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + index;
1132e126ba97SEli Cohen }
1133e126ba97SEli Cohen 
1134e126ba97SEli Cohen static int get_command(unsigned long offset)
1135e126ba97SEli Cohen {
1136e126ba97SEli Cohen 	return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK;
1137e126ba97SEli Cohen }
1138e126ba97SEli Cohen 
1139e126ba97SEli Cohen static int get_arg(unsigned long offset)
1140e126ba97SEli Cohen {
1141e126ba97SEli Cohen 	return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1);
1142e126ba97SEli Cohen }
1143e126ba97SEli Cohen 
1144e126ba97SEli Cohen static int get_index(unsigned long offset)
1145e126ba97SEli Cohen {
1146e126ba97SEli Cohen 	return get_arg(offset);
1147e126ba97SEli Cohen }
1148e126ba97SEli Cohen 
11497c2344c3SMaor Gottlieb static void  mlx5_ib_vma_open(struct vm_area_struct *area)
11507c2344c3SMaor Gottlieb {
11517c2344c3SMaor Gottlieb 	/* vma_open is called when a new VMA is created on top of our VMA.  This
11527c2344c3SMaor Gottlieb 	 * is done through either mremap flow or split_vma (usually due to
11537c2344c3SMaor Gottlieb 	 * mlock, madvise, munmap, etc.) We do not support a clone of the VMA,
11547c2344c3SMaor Gottlieb 	 * as this VMA is strongly hardware related.  Therefore we set the
11557c2344c3SMaor Gottlieb 	 * vm_ops of the newly created/cloned VMA to NULL, to prevent it from
11567c2344c3SMaor Gottlieb 	 * calling us again and trying to do incorrect actions.  We assume that
11577c2344c3SMaor Gottlieb 	 * the original VMA size is exactly a single page, and therefore all
11587c2344c3SMaor Gottlieb 	 * "splitting" operation will not happen to it.
11597c2344c3SMaor Gottlieb 	 */
11607c2344c3SMaor Gottlieb 	area->vm_ops = NULL;
11617c2344c3SMaor Gottlieb }
11627c2344c3SMaor Gottlieb 
11637c2344c3SMaor Gottlieb static void  mlx5_ib_vma_close(struct vm_area_struct *area)
11647c2344c3SMaor Gottlieb {
11657c2344c3SMaor Gottlieb 	struct mlx5_ib_vma_private_data *mlx5_ib_vma_priv_data;
11667c2344c3SMaor Gottlieb 
11677c2344c3SMaor Gottlieb 	/* It's guaranteed that all VMAs opened on a FD are closed before the
11687c2344c3SMaor Gottlieb 	 * file itself is closed, therefore no sync is needed with the regular
11697c2344c3SMaor Gottlieb 	 * closing flow. (e.g. mlx5 ib_dealloc_ucontext)
11707c2344c3SMaor Gottlieb 	 * However need a sync with accessing the vma as part of
11717c2344c3SMaor Gottlieb 	 * mlx5_ib_disassociate_ucontext.
11727c2344c3SMaor Gottlieb 	 * The close operation is usually called under mm->mmap_sem except when
11737c2344c3SMaor Gottlieb 	 * process is exiting.
11747c2344c3SMaor Gottlieb 	 * The exiting case is handled explicitly as part of
11757c2344c3SMaor Gottlieb 	 * mlx5_ib_disassociate_ucontext.
11767c2344c3SMaor Gottlieb 	 */
11777c2344c3SMaor Gottlieb 	mlx5_ib_vma_priv_data = (struct mlx5_ib_vma_private_data *)area->vm_private_data;
11787c2344c3SMaor Gottlieb 
11797c2344c3SMaor Gottlieb 	/* setting the vma context pointer to null in the mlx5_ib driver's
11807c2344c3SMaor Gottlieb 	 * private data, to protect a race condition in
11817c2344c3SMaor Gottlieb 	 * mlx5_ib_disassociate_ucontext().
11827c2344c3SMaor Gottlieb 	 */
11837c2344c3SMaor Gottlieb 	mlx5_ib_vma_priv_data->vma = NULL;
11847c2344c3SMaor Gottlieb 	list_del(&mlx5_ib_vma_priv_data->list);
11857c2344c3SMaor Gottlieb 	kfree(mlx5_ib_vma_priv_data);
11867c2344c3SMaor Gottlieb }
11877c2344c3SMaor Gottlieb 
11887c2344c3SMaor Gottlieb static const struct vm_operations_struct mlx5_ib_vm_ops = {
11897c2344c3SMaor Gottlieb 	.open = mlx5_ib_vma_open,
11907c2344c3SMaor Gottlieb 	.close = mlx5_ib_vma_close
11917c2344c3SMaor Gottlieb };
11927c2344c3SMaor Gottlieb 
11937c2344c3SMaor Gottlieb static int mlx5_ib_set_vma_data(struct vm_area_struct *vma,
11947c2344c3SMaor Gottlieb 				struct mlx5_ib_ucontext *ctx)
11957c2344c3SMaor Gottlieb {
11967c2344c3SMaor Gottlieb 	struct mlx5_ib_vma_private_data *vma_prv;
11977c2344c3SMaor Gottlieb 	struct list_head *vma_head = &ctx->vma_private_list;
11987c2344c3SMaor Gottlieb 
11997c2344c3SMaor Gottlieb 	vma_prv = kzalloc(sizeof(*vma_prv), GFP_KERNEL);
12007c2344c3SMaor Gottlieb 	if (!vma_prv)
12017c2344c3SMaor Gottlieb 		return -ENOMEM;
12027c2344c3SMaor Gottlieb 
12037c2344c3SMaor Gottlieb 	vma_prv->vma = vma;
12047c2344c3SMaor Gottlieb 	vma->vm_private_data = vma_prv;
12057c2344c3SMaor Gottlieb 	vma->vm_ops =  &mlx5_ib_vm_ops;
12067c2344c3SMaor Gottlieb 
12077c2344c3SMaor Gottlieb 	list_add(&vma_prv->list, vma_head);
12087c2344c3SMaor Gottlieb 
12097c2344c3SMaor Gottlieb 	return 0;
12107c2344c3SMaor Gottlieb }
12117c2344c3SMaor Gottlieb 
12127c2344c3SMaor Gottlieb static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
12137c2344c3SMaor Gottlieb {
12147c2344c3SMaor Gottlieb 	int ret;
12157c2344c3SMaor Gottlieb 	struct vm_area_struct *vma;
12167c2344c3SMaor Gottlieb 	struct mlx5_ib_vma_private_data *vma_private, *n;
12177c2344c3SMaor Gottlieb 	struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
12187c2344c3SMaor Gottlieb 	struct task_struct *owning_process  = NULL;
12197c2344c3SMaor Gottlieb 	struct mm_struct   *owning_mm       = NULL;
12207c2344c3SMaor Gottlieb 
12217c2344c3SMaor Gottlieb 	owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID);
12227c2344c3SMaor Gottlieb 	if (!owning_process)
12237c2344c3SMaor Gottlieb 		return;
12247c2344c3SMaor Gottlieb 
12257c2344c3SMaor Gottlieb 	owning_mm = get_task_mm(owning_process);
12267c2344c3SMaor Gottlieb 	if (!owning_mm) {
12277c2344c3SMaor Gottlieb 		pr_info("no mm, disassociate ucontext is pending task termination\n");
12287c2344c3SMaor Gottlieb 		while (1) {
12297c2344c3SMaor Gottlieb 			put_task_struct(owning_process);
12307c2344c3SMaor Gottlieb 			usleep_range(1000, 2000);
12317c2344c3SMaor Gottlieb 			owning_process = get_pid_task(ibcontext->tgid,
12327c2344c3SMaor Gottlieb 						      PIDTYPE_PID);
12337c2344c3SMaor Gottlieb 			if (!owning_process ||
12347c2344c3SMaor Gottlieb 			    owning_process->state == TASK_DEAD) {
12357c2344c3SMaor Gottlieb 				pr_info("disassociate ucontext done, task was terminated\n");
12367c2344c3SMaor Gottlieb 				/* in case task was dead need to release the
12377c2344c3SMaor Gottlieb 				 * task struct.
12387c2344c3SMaor Gottlieb 				 */
12397c2344c3SMaor Gottlieb 				if (owning_process)
12407c2344c3SMaor Gottlieb 					put_task_struct(owning_process);
12417c2344c3SMaor Gottlieb 				return;
12427c2344c3SMaor Gottlieb 			}
12437c2344c3SMaor Gottlieb 		}
12447c2344c3SMaor Gottlieb 	}
12457c2344c3SMaor Gottlieb 
12467c2344c3SMaor Gottlieb 	/* need to protect from a race on closing the vma as part of
12477c2344c3SMaor Gottlieb 	 * mlx5_ib_vma_close.
12487c2344c3SMaor Gottlieb 	 */
12497c2344c3SMaor Gottlieb 	down_read(&owning_mm->mmap_sem);
12507c2344c3SMaor Gottlieb 	list_for_each_entry_safe(vma_private, n, &context->vma_private_list,
12517c2344c3SMaor Gottlieb 				 list) {
12527c2344c3SMaor Gottlieb 		vma = vma_private->vma;
12537c2344c3SMaor Gottlieb 		ret = zap_vma_ptes(vma, vma->vm_start,
12547c2344c3SMaor Gottlieb 				   PAGE_SIZE);
12557c2344c3SMaor Gottlieb 		WARN_ONCE(ret, "%s: zap_vma_ptes failed", __func__);
12567c2344c3SMaor Gottlieb 		/* context going to be destroyed, should
12577c2344c3SMaor Gottlieb 		 * not access ops any more.
12587c2344c3SMaor Gottlieb 		 */
12597c2344c3SMaor Gottlieb 		vma->vm_ops = NULL;
12607c2344c3SMaor Gottlieb 		list_del(&vma_private->list);
12617c2344c3SMaor Gottlieb 		kfree(vma_private);
12627c2344c3SMaor Gottlieb 	}
12637c2344c3SMaor Gottlieb 	up_read(&owning_mm->mmap_sem);
12647c2344c3SMaor Gottlieb 	mmput(owning_mm);
12657c2344c3SMaor Gottlieb 	put_task_struct(owning_process);
12667c2344c3SMaor Gottlieb }
12677c2344c3SMaor Gottlieb 
126837aa5c36SGuy Levi static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
1269e126ba97SEli Cohen {
127037aa5c36SGuy Levi 	switch (cmd) {
127137aa5c36SGuy Levi 	case MLX5_IB_MMAP_WC_PAGE:
127237aa5c36SGuy Levi 		return "WC";
1273e126ba97SEli Cohen 	case MLX5_IB_MMAP_REGULAR_PAGE:
127437aa5c36SGuy Levi 		return "best effort WC";
127537aa5c36SGuy Levi 	case MLX5_IB_MMAP_NC_PAGE:
127637aa5c36SGuy Levi 		return "NC";
127737aa5c36SGuy Levi 	default:
127837aa5c36SGuy Levi 		return NULL;
127937aa5c36SGuy Levi 	}
128037aa5c36SGuy Levi }
128137aa5c36SGuy Levi 
128237aa5c36SGuy Levi static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
12837c2344c3SMaor Gottlieb 		    struct vm_area_struct *vma,
12847c2344c3SMaor Gottlieb 		    struct mlx5_ib_ucontext *context)
128537aa5c36SGuy Levi {
12867c2344c3SMaor Gottlieb 	struct mlx5_uuar_info *uuari = &context->uuari;
128737aa5c36SGuy Levi 	int err;
128837aa5c36SGuy Levi 	unsigned long idx;
128937aa5c36SGuy Levi 	phys_addr_t pfn, pa;
129037aa5c36SGuy Levi 	pgprot_t prot;
129137aa5c36SGuy Levi 
129237aa5c36SGuy Levi 	switch (cmd) {
129337aa5c36SGuy Levi 	case MLX5_IB_MMAP_WC_PAGE:
129437aa5c36SGuy Levi /* Some architectures don't support WC memory */
129537aa5c36SGuy Levi #if defined(CONFIG_X86)
129637aa5c36SGuy Levi 		if (!pat_enabled())
129737aa5c36SGuy Levi 			return -EPERM;
129837aa5c36SGuy Levi #elif !(defined(CONFIG_PPC) || (defined(CONFIG_ARM) && defined(CONFIG_MMU)))
129937aa5c36SGuy Levi 			return -EPERM;
130037aa5c36SGuy Levi #endif
130137aa5c36SGuy Levi 	/* fall through */
130237aa5c36SGuy Levi 	case MLX5_IB_MMAP_REGULAR_PAGE:
130337aa5c36SGuy Levi 		/* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */
130437aa5c36SGuy Levi 		prot = pgprot_writecombine(vma->vm_page_prot);
130537aa5c36SGuy Levi 		break;
130637aa5c36SGuy Levi 	case MLX5_IB_MMAP_NC_PAGE:
130737aa5c36SGuy Levi 		prot = pgprot_noncached(vma->vm_page_prot);
130837aa5c36SGuy Levi 		break;
130937aa5c36SGuy Levi 	default:
131037aa5c36SGuy Levi 		return -EINVAL;
131137aa5c36SGuy Levi 	}
131237aa5c36SGuy Levi 
1313e126ba97SEli Cohen 	if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1314e126ba97SEli Cohen 		return -EINVAL;
1315e126ba97SEli Cohen 
1316e126ba97SEli Cohen 	idx = get_index(vma->vm_pgoff);
13171c3ce90dSEli Cohen 	if (idx >= uuari->num_uars)
13181c3ce90dSEli Cohen 		return -EINVAL;
13191c3ce90dSEli Cohen 
1320e126ba97SEli Cohen 	pfn = uar_index2pfn(dev, uuari->uars[idx].index);
132137aa5c36SGuy Levi 	mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
1322e126ba97SEli Cohen 
132337aa5c36SGuy Levi 	vma->vm_page_prot = prot;
132437aa5c36SGuy Levi 	err = io_remap_pfn_range(vma, vma->vm_start, pfn,
132537aa5c36SGuy Levi 				 PAGE_SIZE, vma->vm_page_prot);
132637aa5c36SGuy Levi 	if (err) {
132737aa5c36SGuy Levi 		mlx5_ib_err(dev, "io_remap_pfn_range failed with error=%d, vm_start=0x%lx, pfn=%pa, mmap_cmd=%s\n",
132837aa5c36SGuy Levi 			    err, vma->vm_start, &pfn, mmap_cmd2str(cmd));
1329e126ba97SEli Cohen 		return -EAGAIN;
133037aa5c36SGuy Levi 	}
1331e126ba97SEli Cohen 
133237aa5c36SGuy Levi 	pa = pfn << PAGE_SHIFT;
133337aa5c36SGuy Levi 	mlx5_ib_dbg(dev, "mapped %s at 0x%lx, PA %pa\n", mmap_cmd2str(cmd),
133437aa5c36SGuy Levi 		    vma->vm_start, &pa);
133537aa5c36SGuy Levi 
13367c2344c3SMaor Gottlieb 	return mlx5_ib_set_vma_data(vma, context);
133737aa5c36SGuy Levi }
133837aa5c36SGuy Levi 
133937aa5c36SGuy Levi static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
134037aa5c36SGuy Levi {
134137aa5c36SGuy Levi 	struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
134237aa5c36SGuy Levi 	struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
134337aa5c36SGuy Levi 	unsigned long command;
134437aa5c36SGuy Levi 	phys_addr_t pfn;
134537aa5c36SGuy Levi 
134637aa5c36SGuy Levi 	command = get_command(vma->vm_pgoff);
134737aa5c36SGuy Levi 	switch (command) {
134837aa5c36SGuy Levi 	case MLX5_IB_MMAP_WC_PAGE:
134937aa5c36SGuy Levi 	case MLX5_IB_MMAP_NC_PAGE:
135037aa5c36SGuy Levi 	case MLX5_IB_MMAP_REGULAR_PAGE:
13517c2344c3SMaor Gottlieb 		return uar_mmap(dev, command, vma, context);
1352e126ba97SEli Cohen 
1353e126ba97SEli Cohen 	case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
1354e126ba97SEli Cohen 		return -ENOSYS;
1355e126ba97SEli Cohen 
1356d69e3bcfSMatan Barak 	case MLX5_IB_MMAP_CORE_CLOCK:
1357d69e3bcfSMatan Barak 		if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1358d69e3bcfSMatan Barak 			return -EINVAL;
1359d69e3bcfSMatan Barak 
13606cbac1e4SMatan Barak 		if (vma->vm_flags & VM_WRITE)
1361d69e3bcfSMatan Barak 			return -EPERM;
1362d69e3bcfSMatan Barak 
1363d69e3bcfSMatan Barak 		/* Don't expose to user-space information it shouldn't have */
1364d69e3bcfSMatan Barak 		if (PAGE_SIZE > 4096)
1365d69e3bcfSMatan Barak 			return -EOPNOTSUPP;
1366d69e3bcfSMatan Barak 
1367d69e3bcfSMatan Barak 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1368d69e3bcfSMatan Barak 		pfn = (dev->mdev->iseg_base +
1369d69e3bcfSMatan Barak 		       offsetof(struct mlx5_init_seg, internal_timer_h)) >>
1370d69e3bcfSMatan Barak 			PAGE_SHIFT;
1371d69e3bcfSMatan Barak 		if (io_remap_pfn_range(vma, vma->vm_start, pfn,
1372d69e3bcfSMatan Barak 				       PAGE_SIZE, vma->vm_page_prot))
1373d69e3bcfSMatan Barak 			return -EAGAIN;
1374d69e3bcfSMatan Barak 
1375d69e3bcfSMatan Barak 		mlx5_ib_dbg(dev, "mapped internal timer at 0x%lx, PA 0x%llx\n",
1376d69e3bcfSMatan Barak 			    vma->vm_start,
1377d69e3bcfSMatan Barak 			    (unsigned long long)pfn << PAGE_SHIFT);
1378d69e3bcfSMatan Barak 		break;
1379d69e3bcfSMatan Barak 
1380e126ba97SEli Cohen 	default:
1381e126ba97SEli Cohen 		return -EINVAL;
1382e126ba97SEli Cohen 	}
1383e126ba97SEli Cohen 
1384e126ba97SEli Cohen 	return 0;
1385e126ba97SEli Cohen }
1386e126ba97SEli Cohen 
1387e126ba97SEli Cohen static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
1388e126ba97SEli Cohen 				      struct ib_ucontext *context,
1389e126ba97SEli Cohen 				      struct ib_udata *udata)
1390e126ba97SEli Cohen {
1391e126ba97SEli Cohen 	struct mlx5_ib_alloc_pd_resp resp;
1392e126ba97SEli Cohen 	struct mlx5_ib_pd *pd;
1393e126ba97SEli Cohen 	int err;
1394e126ba97SEli Cohen 
1395e126ba97SEli Cohen 	pd = kmalloc(sizeof(*pd), GFP_KERNEL);
1396e126ba97SEli Cohen 	if (!pd)
1397e126ba97SEli Cohen 		return ERR_PTR(-ENOMEM);
1398e126ba97SEli Cohen 
13999603b61dSJack Morgenstein 	err = mlx5_core_alloc_pd(to_mdev(ibdev)->mdev, &pd->pdn);
1400e126ba97SEli Cohen 	if (err) {
1401e126ba97SEli Cohen 		kfree(pd);
1402e126ba97SEli Cohen 		return ERR_PTR(err);
1403e126ba97SEli Cohen 	}
1404e126ba97SEli Cohen 
1405e126ba97SEli Cohen 	if (context) {
1406e126ba97SEli Cohen 		resp.pdn = pd->pdn;
1407e126ba97SEli Cohen 		if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
14089603b61dSJack Morgenstein 			mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn);
1409e126ba97SEli Cohen 			kfree(pd);
1410e126ba97SEli Cohen 			return ERR_PTR(-EFAULT);
1411e126ba97SEli Cohen 		}
1412e126ba97SEli Cohen 	}
1413e126ba97SEli Cohen 
1414e126ba97SEli Cohen 	return &pd->ibpd;
1415e126ba97SEli Cohen }
1416e126ba97SEli Cohen 
1417e126ba97SEli Cohen static int mlx5_ib_dealloc_pd(struct ib_pd *pd)
1418e126ba97SEli Cohen {
1419e126ba97SEli Cohen 	struct mlx5_ib_dev *mdev = to_mdev(pd->device);
1420e126ba97SEli Cohen 	struct mlx5_ib_pd *mpd = to_mpd(pd);
1421e126ba97SEli Cohen 
14229603b61dSJack Morgenstein 	mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn);
1423e126ba97SEli Cohen 	kfree(mpd);
1424e126ba97SEli Cohen 
1425e126ba97SEli Cohen 	return 0;
1426e126ba97SEli Cohen }
1427e126ba97SEli Cohen 
1428038d2ef8SMaor Gottlieb static bool outer_header_zero(u32 *match_criteria)
1429038d2ef8SMaor Gottlieb {
1430038d2ef8SMaor Gottlieb 	int size = MLX5_ST_SZ_BYTES(fte_match_param);
1431038d2ef8SMaor Gottlieb 	char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
1432038d2ef8SMaor Gottlieb 					     outer_headers);
1433038d2ef8SMaor Gottlieb 
1434038d2ef8SMaor Gottlieb 	return outer_headers_c[0] == 0 && !memcmp(outer_headers_c,
1435038d2ef8SMaor Gottlieb 						  outer_headers_c + 1,
1436038d2ef8SMaor Gottlieb 						  size - 1);
1437038d2ef8SMaor Gottlieb }
1438038d2ef8SMaor Gottlieb 
1439*c47ac6aeSMaor Gottlieb #define LAST_ETH_FIELD vlan_tag
1440*c47ac6aeSMaor Gottlieb #define LAST_IB_FIELD sl
1441*c47ac6aeSMaor Gottlieb #define LAST_IPV4_FIELD dst_ip
1442*c47ac6aeSMaor Gottlieb #define LAST_IPV6_FIELD dst_ip
1443*c47ac6aeSMaor Gottlieb #define LAST_TCP_UDP_FIELD src_port
1444*c47ac6aeSMaor Gottlieb 
1445*c47ac6aeSMaor Gottlieb /* Field is the last supported field */
1446*c47ac6aeSMaor Gottlieb #define FIELDS_NOT_SUPPORTED(filter, field)\
1447*c47ac6aeSMaor Gottlieb 	memchr_inv((void *)&filter.field  +\
1448*c47ac6aeSMaor Gottlieb 		   sizeof(filter.field), 0,\
1449*c47ac6aeSMaor Gottlieb 		   sizeof(filter) -\
1450*c47ac6aeSMaor Gottlieb 		   offsetof(typeof(filter), field) -\
1451*c47ac6aeSMaor Gottlieb 		   sizeof(filter.field))
1452*c47ac6aeSMaor Gottlieb 
1453038d2ef8SMaor Gottlieb static int parse_flow_attr(u32 *match_c, u32 *match_v,
1454dd063d0eSMaor Gottlieb 			   const union ib_flow_spec *ib_spec)
1455038d2ef8SMaor Gottlieb {
1456038d2ef8SMaor Gottlieb 	void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
1457038d2ef8SMaor Gottlieb 					     outer_headers);
1458038d2ef8SMaor Gottlieb 	void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
1459038d2ef8SMaor Gottlieb 					     outer_headers);
1460038d2ef8SMaor Gottlieb 	switch (ib_spec->type) {
1461038d2ef8SMaor Gottlieb 	case IB_FLOW_SPEC_ETH:
1462*c47ac6aeSMaor Gottlieb 		if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
1463*c47ac6aeSMaor Gottlieb 			return -ENOTSUPP;
1464038d2ef8SMaor Gottlieb 
1465038d2ef8SMaor Gottlieb 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
1466038d2ef8SMaor Gottlieb 					     dmac_47_16),
1467038d2ef8SMaor Gottlieb 				ib_spec->eth.mask.dst_mac);
1468038d2ef8SMaor Gottlieb 		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
1469038d2ef8SMaor Gottlieb 					     dmac_47_16),
1470038d2ef8SMaor Gottlieb 				ib_spec->eth.val.dst_mac);
1471038d2ef8SMaor Gottlieb 
1472038d2ef8SMaor Gottlieb 		if (ib_spec->eth.mask.vlan_tag) {
1473038d2ef8SMaor Gottlieb 			MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
1474038d2ef8SMaor Gottlieb 				 vlan_tag, 1);
1475038d2ef8SMaor Gottlieb 			MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
1476038d2ef8SMaor Gottlieb 				 vlan_tag, 1);
1477038d2ef8SMaor Gottlieb 
1478038d2ef8SMaor Gottlieb 			MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
1479038d2ef8SMaor Gottlieb 				 first_vid, ntohs(ib_spec->eth.mask.vlan_tag));
1480038d2ef8SMaor Gottlieb 			MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
1481038d2ef8SMaor Gottlieb 				 first_vid, ntohs(ib_spec->eth.val.vlan_tag));
1482038d2ef8SMaor Gottlieb 
1483038d2ef8SMaor Gottlieb 			MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
1484038d2ef8SMaor Gottlieb 				 first_cfi,
1485038d2ef8SMaor Gottlieb 				 ntohs(ib_spec->eth.mask.vlan_tag) >> 12);
1486038d2ef8SMaor Gottlieb 			MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
1487038d2ef8SMaor Gottlieb 				 first_cfi,
1488038d2ef8SMaor Gottlieb 				 ntohs(ib_spec->eth.val.vlan_tag) >> 12);
1489038d2ef8SMaor Gottlieb 
1490038d2ef8SMaor Gottlieb 			MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
1491038d2ef8SMaor Gottlieb 				 first_prio,
1492038d2ef8SMaor Gottlieb 				 ntohs(ib_spec->eth.mask.vlan_tag) >> 13);
1493038d2ef8SMaor Gottlieb 			MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
1494038d2ef8SMaor Gottlieb 				 first_prio,
1495038d2ef8SMaor Gottlieb 				 ntohs(ib_spec->eth.val.vlan_tag) >> 13);
1496038d2ef8SMaor Gottlieb 		}
1497038d2ef8SMaor Gottlieb 		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
1498038d2ef8SMaor Gottlieb 			 ethertype, ntohs(ib_spec->eth.mask.ether_type));
1499038d2ef8SMaor Gottlieb 		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
1500038d2ef8SMaor Gottlieb 			 ethertype, ntohs(ib_spec->eth.val.ether_type));
1501038d2ef8SMaor Gottlieb 		break;
1502038d2ef8SMaor Gottlieb 	case IB_FLOW_SPEC_IPV4:
1503*c47ac6aeSMaor Gottlieb 		if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
1504*c47ac6aeSMaor Gottlieb 			return -ENOTSUPP;
1505038d2ef8SMaor Gottlieb 
1506038d2ef8SMaor Gottlieb 		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
1507038d2ef8SMaor Gottlieb 			 ethertype, 0xffff);
1508038d2ef8SMaor Gottlieb 		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
1509038d2ef8SMaor Gottlieb 			 ethertype, ETH_P_IP);
1510038d2ef8SMaor Gottlieb 
1511038d2ef8SMaor Gottlieb 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
1512038d2ef8SMaor Gottlieb 				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
1513038d2ef8SMaor Gottlieb 		       &ib_spec->ipv4.mask.src_ip,
1514038d2ef8SMaor Gottlieb 		       sizeof(ib_spec->ipv4.mask.src_ip));
1515038d2ef8SMaor Gottlieb 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
1516038d2ef8SMaor Gottlieb 				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
1517038d2ef8SMaor Gottlieb 		       &ib_spec->ipv4.val.src_ip,
1518038d2ef8SMaor Gottlieb 		       sizeof(ib_spec->ipv4.val.src_ip));
1519038d2ef8SMaor Gottlieb 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
1520038d2ef8SMaor Gottlieb 				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
1521038d2ef8SMaor Gottlieb 		       &ib_spec->ipv4.mask.dst_ip,
1522038d2ef8SMaor Gottlieb 		       sizeof(ib_spec->ipv4.mask.dst_ip));
1523038d2ef8SMaor Gottlieb 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
1524038d2ef8SMaor Gottlieb 				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
1525038d2ef8SMaor Gottlieb 		       &ib_spec->ipv4.val.dst_ip,
1526038d2ef8SMaor Gottlieb 		       sizeof(ib_spec->ipv4.val.dst_ip));
1527038d2ef8SMaor Gottlieb 		break;
1528026bae0cSMaor Gottlieb 	case IB_FLOW_SPEC_IPV6:
1529*c47ac6aeSMaor Gottlieb 		if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD))
1530*c47ac6aeSMaor Gottlieb 			return -ENOTSUPP;
1531026bae0cSMaor Gottlieb 
1532026bae0cSMaor Gottlieb 		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
1533026bae0cSMaor Gottlieb 			 ethertype, 0xffff);
1534026bae0cSMaor Gottlieb 		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
1535026bae0cSMaor Gottlieb 			 ethertype, ETH_P_IPV6);
1536026bae0cSMaor Gottlieb 
1537026bae0cSMaor Gottlieb 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
1538026bae0cSMaor Gottlieb 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
1539026bae0cSMaor Gottlieb 		       &ib_spec->ipv6.mask.src_ip,
1540026bae0cSMaor Gottlieb 		       sizeof(ib_spec->ipv6.mask.src_ip));
1541026bae0cSMaor Gottlieb 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
1542026bae0cSMaor Gottlieb 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
1543026bae0cSMaor Gottlieb 		       &ib_spec->ipv6.val.src_ip,
1544026bae0cSMaor Gottlieb 		       sizeof(ib_spec->ipv6.val.src_ip));
1545026bae0cSMaor Gottlieb 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
1546026bae0cSMaor Gottlieb 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1547026bae0cSMaor Gottlieb 		       &ib_spec->ipv6.mask.dst_ip,
1548026bae0cSMaor Gottlieb 		       sizeof(ib_spec->ipv6.mask.dst_ip));
1549026bae0cSMaor Gottlieb 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
1550026bae0cSMaor Gottlieb 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1551026bae0cSMaor Gottlieb 		       &ib_spec->ipv6.val.dst_ip,
1552026bae0cSMaor Gottlieb 		       sizeof(ib_spec->ipv6.val.dst_ip));
1553026bae0cSMaor Gottlieb 		break;
1554038d2ef8SMaor Gottlieb 	case IB_FLOW_SPEC_TCP:
1555*c47ac6aeSMaor Gottlieb 		if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
1556*c47ac6aeSMaor Gottlieb 					 LAST_TCP_UDP_FIELD))
1557*c47ac6aeSMaor Gottlieb 			return -ENOTSUPP;
1558038d2ef8SMaor Gottlieb 
1559038d2ef8SMaor Gottlieb 		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
1560038d2ef8SMaor Gottlieb 			 0xff);
1561038d2ef8SMaor Gottlieb 		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
1562038d2ef8SMaor Gottlieb 			 IPPROTO_TCP);
1563038d2ef8SMaor Gottlieb 
1564038d2ef8SMaor Gottlieb 		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport,
1565038d2ef8SMaor Gottlieb 			 ntohs(ib_spec->tcp_udp.mask.src_port));
1566038d2ef8SMaor Gottlieb 		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_sport,
1567038d2ef8SMaor Gottlieb 			 ntohs(ib_spec->tcp_udp.val.src_port));
1568038d2ef8SMaor Gottlieb 
1569038d2ef8SMaor Gottlieb 		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport,
1570038d2ef8SMaor Gottlieb 			 ntohs(ib_spec->tcp_udp.mask.dst_port));
1571038d2ef8SMaor Gottlieb 		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_dport,
1572038d2ef8SMaor Gottlieb 			 ntohs(ib_spec->tcp_udp.val.dst_port));
1573038d2ef8SMaor Gottlieb 		break;
1574038d2ef8SMaor Gottlieb 	case IB_FLOW_SPEC_UDP:
1575*c47ac6aeSMaor Gottlieb 		if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
1576*c47ac6aeSMaor Gottlieb 					 LAST_TCP_UDP_FIELD))
1577*c47ac6aeSMaor Gottlieb 			return -ENOTSUPP;
1578038d2ef8SMaor Gottlieb 
1579038d2ef8SMaor Gottlieb 		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
1580038d2ef8SMaor Gottlieb 			 0xff);
1581038d2ef8SMaor Gottlieb 		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
1582038d2ef8SMaor Gottlieb 			 IPPROTO_UDP);
1583038d2ef8SMaor Gottlieb 
1584038d2ef8SMaor Gottlieb 		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_sport,
1585038d2ef8SMaor Gottlieb 			 ntohs(ib_spec->tcp_udp.mask.src_port));
1586038d2ef8SMaor Gottlieb 		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_sport,
1587038d2ef8SMaor Gottlieb 			 ntohs(ib_spec->tcp_udp.val.src_port));
1588038d2ef8SMaor Gottlieb 
1589038d2ef8SMaor Gottlieb 		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_dport,
1590038d2ef8SMaor Gottlieb 			 ntohs(ib_spec->tcp_udp.mask.dst_port));
1591038d2ef8SMaor Gottlieb 		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_dport,
1592038d2ef8SMaor Gottlieb 			 ntohs(ib_spec->tcp_udp.val.dst_port));
1593038d2ef8SMaor Gottlieb 		break;
1594038d2ef8SMaor Gottlieb 	default:
1595038d2ef8SMaor Gottlieb 		return -EINVAL;
1596038d2ef8SMaor Gottlieb 	}
1597038d2ef8SMaor Gottlieb 
1598038d2ef8SMaor Gottlieb 	return 0;
1599038d2ef8SMaor Gottlieb }
1600038d2ef8SMaor Gottlieb 
1601038d2ef8SMaor Gottlieb /* If a flow could catch both multicast and unicast packets,
1602038d2ef8SMaor Gottlieb  * it won't fall into the multicast flow steering table and this rule
1603038d2ef8SMaor Gottlieb  * could steal other multicast packets.
1604038d2ef8SMaor Gottlieb  */
1605038d2ef8SMaor Gottlieb static bool flow_is_multicast_only(struct ib_flow_attr *ib_attr)
1606038d2ef8SMaor Gottlieb {
1607038d2ef8SMaor Gottlieb 	struct ib_flow_spec_eth *eth_spec;
1608038d2ef8SMaor Gottlieb 
1609038d2ef8SMaor Gottlieb 	if (ib_attr->type != IB_FLOW_ATTR_NORMAL ||
1610038d2ef8SMaor Gottlieb 	    ib_attr->size < sizeof(struct ib_flow_attr) +
1611038d2ef8SMaor Gottlieb 	    sizeof(struct ib_flow_spec_eth) ||
1612038d2ef8SMaor Gottlieb 	    ib_attr->num_of_specs < 1)
1613038d2ef8SMaor Gottlieb 		return false;
1614038d2ef8SMaor Gottlieb 
1615038d2ef8SMaor Gottlieb 	eth_spec = (struct ib_flow_spec_eth *)(ib_attr + 1);
1616038d2ef8SMaor Gottlieb 	if (eth_spec->type != IB_FLOW_SPEC_ETH ||
1617038d2ef8SMaor Gottlieb 	    eth_spec->size != sizeof(*eth_spec))
1618038d2ef8SMaor Gottlieb 		return false;
1619038d2ef8SMaor Gottlieb 
1620038d2ef8SMaor Gottlieb 	return is_multicast_ether_addr(eth_spec->mask.dst_mac) &&
1621038d2ef8SMaor Gottlieb 	       is_multicast_ether_addr(eth_spec->val.dst_mac);
1622038d2ef8SMaor Gottlieb }
1623038d2ef8SMaor Gottlieb 
1624dd063d0eSMaor Gottlieb static bool is_valid_attr(const struct ib_flow_attr *flow_attr)
1625038d2ef8SMaor Gottlieb {
1626038d2ef8SMaor Gottlieb 	union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1);
1627038d2ef8SMaor Gottlieb 	bool has_ipv4_spec = false;
1628038d2ef8SMaor Gottlieb 	bool eth_type_ipv4 = true;
1629038d2ef8SMaor Gottlieb 	unsigned int spec_index;
1630038d2ef8SMaor Gottlieb 
1631038d2ef8SMaor Gottlieb 	/* Validate that ethertype is correct */
1632038d2ef8SMaor Gottlieb 	for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
1633038d2ef8SMaor Gottlieb 		if (ib_spec->type == IB_FLOW_SPEC_ETH &&
1634038d2ef8SMaor Gottlieb 		    ib_spec->eth.mask.ether_type) {
1635038d2ef8SMaor Gottlieb 			if (!((ib_spec->eth.mask.ether_type == htons(0xffff)) &&
1636038d2ef8SMaor Gottlieb 			      ib_spec->eth.val.ether_type == htons(ETH_P_IP)))
1637038d2ef8SMaor Gottlieb 				eth_type_ipv4 = false;
1638038d2ef8SMaor Gottlieb 		} else if (ib_spec->type == IB_FLOW_SPEC_IPV4) {
1639038d2ef8SMaor Gottlieb 			has_ipv4_spec = true;
1640038d2ef8SMaor Gottlieb 		}
1641038d2ef8SMaor Gottlieb 		ib_spec = (void *)ib_spec + ib_spec->size;
1642038d2ef8SMaor Gottlieb 	}
1643038d2ef8SMaor Gottlieb 	return !has_ipv4_spec || eth_type_ipv4;
1644038d2ef8SMaor Gottlieb }
1645038d2ef8SMaor Gottlieb 
1646038d2ef8SMaor Gottlieb static void put_flow_table(struct mlx5_ib_dev *dev,
1647038d2ef8SMaor Gottlieb 			   struct mlx5_ib_flow_prio *prio, bool ft_added)
1648038d2ef8SMaor Gottlieb {
1649038d2ef8SMaor Gottlieb 	prio->refcount -= !!ft_added;
1650038d2ef8SMaor Gottlieb 	if (!prio->refcount) {
1651038d2ef8SMaor Gottlieb 		mlx5_destroy_flow_table(prio->flow_table);
1652038d2ef8SMaor Gottlieb 		prio->flow_table = NULL;
1653038d2ef8SMaor Gottlieb 	}
1654038d2ef8SMaor Gottlieb }
1655038d2ef8SMaor Gottlieb 
1656038d2ef8SMaor Gottlieb static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
1657038d2ef8SMaor Gottlieb {
1658038d2ef8SMaor Gottlieb 	struct mlx5_ib_dev *dev = to_mdev(flow_id->qp->device);
1659038d2ef8SMaor Gottlieb 	struct mlx5_ib_flow_handler *handler = container_of(flow_id,
1660038d2ef8SMaor Gottlieb 							  struct mlx5_ib_flow_handler,
1661038d2ef8SMaor Gottlieb 							  ibflow);
1662038d2ef8SMaor Gottlieb 	struct mlx5_ib_flow_handler *iter, *tmp;
1663038d2ef8SMaor Gottlieb 
1664038d2ef8SMaor Gottlieb 	mutex_lock(&dev->flow_db.lock);
1665038d2ef8SMaor Gottlieb 
1666038d2ef8SMaor Gottlieb 	list_for_each_entry_safe(iter, tmp, &handler->list, list) {
1667038d2ef8SMaor Gottlieb 		mlx5_del_flow_rule(iter->rule);
1668cc0e5d42SMaor Gottlieb 		put_flow_table(dev, iter->prio, true);
1669038d2ef8SMaor Gottlieb 		list_del(&iter->list);
1670038d2ef8SMaor Gottlieb 		kfree(iter);
1671038d2ef8SMaor Gottlieb 	}
1672038d2ef8SMaor Gottlieb 
1673038d2ef8SMaor Gottlieb 	mlx5_del_flow_rule(handler->rule);
16745497adc6SMaor Gottlieb 	put_flow_table(dev, handler->prio, true);
1675038d2ef8SMaor Gottlieb 	mutex_unlock(&dev->flow_db.lock);
1676038d2ef8SMaor Gottlieb 
1677038d2ef8SMaor Gottlieb 	kfree(handler);
1678038d2ef8SMaor Gottlieb 
1679038d2ef8SMaor Gottlieb 	return 0;
1680038d2ef8SMaor Gottlieb }
1681038d2ef8SMaor Gottlieb 
168235d19011SMaor Gottlieb static int ib_prio_to_core_prio(unsigned int priority, bool dont_trap)
168335d19011SMaor Gottlieb {
168435d19011SMaor Gottlieb 	priority *= 2;
168535d19011SMaor Gottlieb 	if (!dont_trap)
168635d19011SMaor Gottlieb 		priority++;
168735d19011SMaor Gottlieb 	return priority;
168835d19011SMaor Gottlieb }
168935d19011SMaor Gottlieb 
1690cc0e5d42SMaor Gottlieb enum flow_table_type {
1691cc0e5d42SMaor Gottlieb 	MLX5_IB_FT_RX,
1692cc0e5d42SMaor Gottlieb 	MLX5_IB_FT_TX
1693cc0e5d42SMaor Gottlieb };
1694cc0e5d42SMaor Gottlieb 
1695038d2ef8SMaor Gottlieb #define MLX5_FS_MAX_TYPES	 10
1696038d2ef8SMaor Gottlieb #define MLX5_FS_MAX_ENTRIES	 32000UL
1697038d2ef8SMaor Gottlieb static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
1698cc0e5d42SMaor Gottlieb 						struct ib_flow_attr *flow_attr,
1699cc0e5d42SMaor Gottlieb 						enum flow_table_type ft_type)
1700038d2ef8SMaor Gottlieb {
170135d19011SMaor Gottlieb 	bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP;
1702038d2ef8SMaor Gottlieb 	struct mlx5_flow_namespace *ns = NULL;
1703038d2ef8SMaor Gottlieb 	struct mlx5_ib_flow_prio *prio;
1704038d2ef8SMaor Gottlieb 	struct mlx5_flow_table *ft;
1705038d2ef8SMaor Gottlieb 	int num_entries;
1706038d2ef8SMaor Gottlieb 	int num_groups;
1707038d2ef8SMaor Gottlieb 	int priority;
1708038d2ef8SMaor Gottlieb 	int err = 0;
1709038d2ef8SMaor Gottlieb 
1710038d2ef8SMaor Gottlieb 	if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
171135d19011SMaor Gottlieb 		if (flow_is_multicast_only(flow_attr) &&
171235d19011SMaor Gottlieb 		    !dont_trap)
1713038d2ef8SMaor Gottlieb 			priority = MLX5_IB_FLOW_MCAST_PRIO;
1714038d2ef8SMaor Gottlieb 		else
171535d19011SMaor Gottlieb 			priority = ib_prio_to_core_prio(flow_attr->priority,
171635d19011SMaor Gottlieb 							dont_trap);
1717038d2ef8SMaor Gottlieb 		ns = mlx5_get_flow_namespace(dev->mdev,
1718038d2ef8SMaor Gottlieb 					     MLX5_FLOW_NAMESPACE_BYPASS);
1719038d2ef8SMaor Gottlieb 		num_entries = MLX5_FS_MAX_ENTRIES;
1720038d2ef8SMaor Gottlieb 		num_groups = MLX5_FS_MAX_TYPES;
1721038d2ef8SMaor Gottlieb 		prio = &dev->flow_db.prios[priority];
1722038d2ef8SMaor Gottlieb 	} else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
1723038d2ef8SMaor Gottlieb 		   flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
1724038d2ef8SMaor Gottlieb 		ns = mlx5_get_flow_namespace(dev->mdev,
1725038d2ef8SMaor Gottlieb 					     MLX5_FLOW_NAMESPACE_LEFTOVERS);
1726038d2ef8SMaor Gottlieb 		build_leftovers_ft_param(&priority,
1727038d2ef8SMaor Gottlieb 					 &num_entries,
1728038d2ef8SMaor Gottlieb 					 &num_groups);
1729038d2ef8SMaor Gottlieb 		prio = &dev->flow_db.prios[MLX5_IB_FLOW_LEFTOVERS_PRIO];
1730cc0e5d42SMaor Gottlieb 	} else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
1731cc0e5d42SMaor Gottlieb 		if (!MLX5_CAP_FLOWTABLE(dev->mdev,
1732cc0e5d42SMaor Gottlieb 					allow_sniffer_and_nic_rx_shared_tir))
1733cc0e5d42SMaor Gottlieb 			return ERR_PTR(-ENOTSUPP);
1734cc0e5d42SMaor Gottlieb 
1735cc0e5d42SMaor Gottlieb 		ns = mlx5_get_flow_namespace(dev->mdev, ft_type == MLX5_IB_FT_RX ?
1736cc0e5d42SMaor Gottlieb 					     MLX5_FLOW_NAMESPACE_SNIFFER_RX :
1737cc0e5d42SMaor Gottlieb 					     MLX5_FLOW_NAMESPACE_SNIFFER_TX);
1738cc0e5d42SMaor Gottlieb 
1739cc0e5d42SMaor Gottlieb 		prio = &dev->flow_db.sniffer[ft_type];
1740cc0e5d42SMaor Gottlieb 		priority = 0;
1741cc0e5d42SMaor Gottlieb 		num_entries = 1;
1742cc0e5d42SMaor Gottlieb 		num_groups = 1;
1743038d2ef8SMaor Gottlieb 	}
1744038d2ef8SMaor Gottlieb 
1745038d2ef8SMaor Gottlieb 	if (!ns)
1746038d2ef8SMaor Gottlieb 		return ERR_PTR(-ENOTSUPP);
1747038d2ef8SMaor Gottlieb 
1748038d2ef8SMaor Gottlieb 	ft = prio->flow_table;
1749038d2ef8SMaor Gottlieb 	if (!ft) {
1750038d2ef8SMaor Gottlieb 		ft = mlx5_create_auto_grouped_flow_table(ns, priority,
1751038d2ef8SMaor Gottlieb 							 num_entries,
1752d63cd286SMaor Gottlieb 							 num_groups,
1753d63cd286SMaor Gottlieb 							 0);
1754038d2ef8SMaor Gottlieb 
1755038d2ef8SMaor Gottlieb 		if (!IS_ERR(ft)) {
1756038d2ef8SMaor Gottlieb 			prio->refcount = 0;
1757038d2ef8SMaor Gottlieb 			prio->flow_table = ft;
1758038d2ef8SMaor Gottlieb 		} else {
1759038d2ef8SMaor Gottlieb 			err = PTR_ERR(ft);
1760038d2ef8SMaor Gottlieb 		}
1761038d2ef8SMaor Gottlieb 	}
1762038d2ef8SMaor Gottlieb 
1763038d2ef8SMaor Gottlieb 	return err ? ERR_PTR(err) : prio;
1764038d2ef8SMaor Gottlieb }
1765038d2ef8SMaor Gottlieb 
1766038d2ef8SMaor Gottlieb static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
1767038d2ef8SMaor Gottlieb 						     struct mlx5_ib_flow_prio *ft_prio,
1768dd063d0eSMaor Gottlieb 						     const struct ib_flow_attr *flow_attr,
1769038d2ef8SMaor Gottlieb 						     struct mlx5_flow_destination *dst)
1770038d2ef8SMaor Gottlieb {
1771038d2ef8SMaor Gottlieb 	struct mlx5_flow_table	*ft = ft_prio->flow_table;
1772038d2ef8SMaor Gottlieb 	struct mlx5_ib_flow_handler *handler;
1773c5bb1730SMaor Gottlieb 	struct mlx5_flow_spec *spec;
1774dd063d0eSMaor Gottlieb 	const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr);
1775038d2ef8SMaor Gottlieb 	unsigned int spec_index;
177635d19011SMaor Gottlieb 	u32 action;
1777038d2ef8SMaor Gottlieb 	int err = 0;
1778038d2ef8SMaor Gottlieb 
1779038d2ef8SMaor Gottlieb 	if (!is_valid_attr(flow_attr))
1780038d2ef8SMaor Gottlieb 		return ERR_PTR(-EINVAL);
1781038d2ef8SMaor Gottlieb 
1782c5bb1730SMaor Gottlieb 	spec = mlx5_vzalloc(sizeof(*spec));
1783038d2ef8SMaor Gottlieb 	handler = kzalloc(sizeof(*handler), GFP_KERNEL);
1784c5bb1730SMaor Gottlieb 	if (!handler || !spec) {
1785038d2ef8SMaor Gottlieb 		err = -ENOMEM;
1786038d2ef8SMaor Gottlieb 		goto free;
1787038d2ef8SMaor Gottlieb 	}
1788038d2ef8SMaor Gottlieb 
1789038d2ef8SMaor Gottlieb 	INIT_LIST_HEAD(&handler->list);
1790038d2ef8SMaor Gottlieb 
1791038d2ef8SMaor Gottlieb 	for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
1792c5bb1730SMaor Gottlieb 		err = parse_flow_attr(spec->match_criteria,
1793c5bb1730SMaor Gottlieb 				      spec->match_value, ib_flow);
1794038d2ef8SMaor Gottlieb 		if (err < 0)
1795038d2ef8SMaor Gottlieb 			goto free;
1796038d2ef8SMaor Gottlieb 
1797038d2ef8SMaor Gottlieb 		ib_flow += ((union ib_flow_spec *)ib_flow)->size;
1798038d2ef8SMaor Gottlieb 	}
1799038d2ef8SMaor Gottlieb 
1800038d2ef8SMaor Gottlieb 	/* Outer header support only */
1801c5bb1730SMaor Gottlieb 	spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria))
1802c5bb1730SMaor Gottlieb 		<< 0;
180335d19011SMaor Gottlieb 	action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
180435d19011SMaor Gottlieb 		MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
1805c5bb1730SMaor Gottlieb 	handler->rule = mlx5_add_flow_rule(ft, spec,
180635d19011SMaor Gottlieb 					   action,
1807038d2ef8SMaor Gottlieb 					   MLX5_FS_DEFAULT_FLOW_TAG,
1808038d2ef8SMaor Gottlieb 					   dst);
1809038d2ef8SMaor Gottlieb 
1810038d2ef8SMaor Gottlieb 	if (IS_ERR(handler->rule)) {
1811038d2ef8SMaor Gottlieb 		err = PTR_ERR(handler->rule);
1812038d2ef8SMaor Gottlieb 		goto free;
1813038d2ef8SMaor Gottlieb 	}
1814038d2ef8SMaor Gottlieb 
1815d9d4980aSMaor Gottlieb 	ft_prio->refcount++;
18165497adc6SMaor Gottlieb 	handler->prio = ft_prio;
1817038d2ef8SMaor Gottlieb 
1818038d2ef8SMaor Gottlieb 	ft_prio->flow_table = ft;
1819038d2ef8SMaor Gottlieb free:
1820038d2ef8SMaor Gottlieb 	if (err)
1821038d2ef8SMaor Gottlieb 		kfree(handler);
1822c5bb1730SMaor Gottlieb 	kvfree(spec);
1823038d2ef8SMaor Gottlieb 	return err ? ERR_PTR(err) : handler;
1824038d2ef8SMaor Gottlieb }
1825038d2ef8SMaor Gottlieb 
182635d19011SMaor Gottlieb static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev,
182735d19011SMaor Gottlieb 							  struct mlx5_ib_flow_prio *ft_prio,
182835d19011SMaor Gottlieb 							  struct ib_flow_attr *flow_attr,
182935d19011SMaor Gottlieb 							  struct mlx5_flow_destination *dst)
183035d19011SMaor Gottlieb {
183135d19011SMaor Gottlieb 	struct mlx5_ib_flow_handler *handler_dst = NULL;
183235d19011SMaor Gottlieb 	struct mlx5_ib_flow_handler *handler = NULL;
183335d19011SMaor Gottlieb 
183435d19011SMaor Gottlieb 	handler = create_flow_rule(dev, ft_prio, flow_attr, NULL);
183535d19011SMaor Gottlieb 	if (!IS_ERR(handler)) {
183635d19011SMaor Gottlieb 		handler_dst = create_flow_rule(dev, ft_prio,
183735d19011SMaor Gottlieb 					       flow_attr, dst);
183835d19011SMaor Gottlieb 		if (IS_ERR(handler_dst)) {
183935d19011SMaor Gottlieb 			mlx5_del_flow_rule(handler->rule);
1840d9d4980aSMaor Gottlieb 			ft_prio->refcount--;
184135d19011SMaor Gottlieb 			kfree(handler);
184235d19011SMaor Gottlieb 			handler = handler_dst;
184335d19011SMaor Gottlieb 		} else {
184435d19011SMaor Gottlieb 			list_add(&handler_dst->list, &handler->list);
184535d19011SMaor Gottlieb 		}
184635d19011SMaor Gottlieb 	}
184735d19011SMaor Gottlieb 
184835d19011SMaor Gottlieb 	return handler;
184935d19011SMaor Gottlieb }
1850038d2ef8SMaor Gottlieb enum {
1851038d2ef8SMaor Gottlieb 	LEFTOVERS_MC,
1852038d2ef8SMaor Gottlieb 	LEFTOVERS_UC,
1853038d2ef8SMaor Gottlieb };
1854038d2ef8SMaor Gottlieb 
1855038d2ef8SMaor Gottlieb static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev,
1856038d2ef8SMaor Gottlieb 							  struct mlx5_ib_flow_prio *ft_prio,
1857038d2ef8SMaor Gottlieb 							  struct ib_flow_attr *flow_attr,
1858038d2ef8SMaor Gottlieb 							  struct mlx5_flow_destination *dst)
1859038d2ef8SMaor Gottlieb {
1860038d2ef8SMaor Gottlieb 	struct mlx5_ib_flow_handler *handler_ucast = NULL;
1861038d2ef8SMaor Gottlieb 	struct mlx5_ib_flow_handler *handler = NULL;
1862038d2ef8SMaor Gottlieb 
1863038d2ef8SMaor Gottlieb 	static struct {
1864038d2ef8SMaor Gottlieb 		struct ib_flow_attr	flow_attr;
1865038d2ef8SMaor Gottlieb 		struct ib_flow_spec_eth eth_flow;
1866038d2ef8SMaor Gottlieb 	} leftovers_specs[] = {
1867038d2ef8SMaor Gottlieb 		[LEFTOVERS_MC] = {
1868038d2ef8SMaor Gottlieb 			.flow_attr = {
1869038d2ef8SMaor Gottlieb 				.num_of_specs = 1,
1870038d2ef8SMaor Gottlieb 				.size = sizeof(leftovers_specs[0])
1871038d2ef8SMaor Gottlieb 			},
1872038d2ef8SMaor Gottlieb 			.eth_flow = {
1873038d2ef8SMaor Gottlieb 				.type = IB_FLOW_SPEC_ETH,
1874038d2ef8SMaor Gottlieb 				.size = sizeof(struct ib_flow_spec_eth),
1875038d2ef8SMaor Gottlieb 				.mask = {.dst_mac = {0x1} },
1876038d2ef8SMaor Gottlieb 				.val =  {.dst_mac = {0x1} }
1877038d2ef8SMaor Gottlieb 			}
1878038d2ef8SMaor Gottlieb 		},
1879038d2ef8SMaor Gottlieb 		[LEFTOVERS_UC] = {
1880038d2ef8SMaor Gottlieb 			.flow_attr = {
1881038d2ef8SMaor Gottlieb 				.num_of_specs = 1,
1882038d2ef8SMaor Gottlieb 				.size = sizeof(leftovers_specs[0])
1883038d2ef8SMaor Gottlieb 			},
1884038d2ef8SMaor Gottlieb 			.eth_flow = {
1885038d2ef8SMaor Gottlieb 				.type = IB_FLOW_SPEC_ETH,
1886038d2ef8SMaor Gottlieb 				.size = sizeof(struct ib_flow_spec_eth),
1887038d2ef8SMaor Gottlieb 				.mask = {.dst_mac = {0x1} },
1888038d2ef8SMaor Gottlieb 				.val = {.dst_mac = {} }
1889038d2ef8SMaor Gottlieb 			}
1890038d2ef8SMaor Gottlieb 		}
1891038d2ef8SMaor Gottlieb 	};
1892038d2ef8SMaor Gottlieb 
1893038d2ef8SMaor Gottlieb 	handler = create_flow_rule(dev, ft_prio,
1894038d2ef8SMaor Gottlieb 				   &leftovers_specs[LEFTOVERS_MC].flow_attr,
1895038d2ef8SMaor Gottlieb 				   dst);
1896038d2ef8SMaor Gottlieb 	if (!IS_ERR(handler) &&
1897038d2ef8SMaor Gottlieb 	    flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) {
1898038d2ef8SMaor Gottlieb 		handler_ucast = create_flow_rule(dev, ft_prio,
1899038d2ef8SMaor Gottlieb 						 &leftovers_specs[LEFTOVERS_UC].flow_attr,
1900038d2ef8SMaor Gottlieb 						 dst);
1901038d2ef8SMaor Gottlieb 		if (IS_ERR(handler_ucast)) {
19027055a294SMaor Gottlieb 			mlx5_del_flow_rule(handler->rule);
1903d9d4980aSMaor Gottlieb 			ft_prio->refcount--;
1904038d2ef8SMaor Gottlieb 			kfree(handler);
1905038d2ef8SMaor Gottlieb 			handler = handler_ucast;
1906038d2ef8SMaor Gottlieb 		} else {
1907038d2ef8SMaor Gottlieb 			list_add(&handler_ucast->list, &handler->list);
1908038d2ef8SMaor Gottlieb 		}
1909038d2ef8SMaor Gottlieb 	}
1910038d2ef8SMaor Gottlieb 
1911038d2ef8SMaor Gottlieb 	return handler;
1912038d2ef8SMaor Gottlieb }
1913038d2ef8SMaor Gottlieb 
1914cc0e5d42SMaor Gottlieb static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev,
1915cc0e5d42SMaor Gottlieb 							struct mlx5_ib_flow_prio *ft_rx,
1916cc0e5d42SMaor Gottlieb 							struct mlx5_ib_flow_prio *ft_tx,
1917cc0e5d42SMaor Gottlieb 							struct mlx5_flow_destination *dst)
1918cc0e5d42SMaor Gottlieb {
1919cc0e5d42SMaor Gottlieb 	struct mlx5_ib_flow_handler *handler_rx;
1920cc0e5d42SMaor Gottlieb 	struct mlx5_ib_flow_handler *handler_tx;
1921cc0e5d42SMaor Gottlieb 	int err;
1922cc0e5d42SMaor Gottlieb 	static const struct ib_flow_attr flow_attr  = {
1923cc0e5d42SMaor Gottlieb 		.num_of_specs = 0,
1924cc0e5d42SMaor Gottlieb 		.size = sizeof(flow_attr)
1925cc0e5d42SMaor Gottlieb 	};
1926cc0e5d42SMaor Gottlieb 
1927cc0e5d42SMaor Gottlieb 	handler_rx = create_flow_rule(dev, ft_rx, &flow_attr, dst);
1928cc0e5d42SMaor Gottlieb 	if (IS_ERR(handler_rx)) {
1929cc0e5d42SMaor Gottlieb 		err = PTR_ERR(handler_rx);
1930cc0e5d42SMaor Gottlieb 		goto err;
1931cc0e5d42SMaor Gottlieb 	}
1932cc0e5d42SMaor Gottlieb 
1933cc0e5d42SMaor Gottlieb 	handler_tx = create_flow_rule(dev, ft_tx, &flow_attr, dst);
1934cc0e5d42SMaor Gottlieb 	if (IS_ERR(handler_tx)) {
1935cc0e5d42SMaor Gottlieb 		err = PTR_ERR(handler_tx);
1936cc0e5d42SMaor Gottlieb 		goto err_tx;
1937cc0e5d42SMaor Gottlieb 	}
1938cc0e5d42SMaor Gottlieb 
1939cc0e5d42SMaor Gottlieb 	list_add(&handler_tx->list, &handler_rx->list);
1940cc0e5d42SMaor Gottlieb 
1941cc0e5d42SMaor Gottlieb 	return handler_rx;
1942cc0e5d42SMaor Gottlieb 
1943cc0e5d42SMaor Gottlieb err_tx:
1944cc0e5d42SMaor Gottlieb 	mlx5_del_flow_rule(handler_rx->rule);
1945cc0e5d42SMaor Gottlieb 	ft_rx->refcount--;
1946cc0e5d42SMaor Gottlieb 	kfree(handler_rx);
1947cc0e5d42SMaor Gottlieb err:
1948cc0e5d42SMaor Gottlieb 	return ERR_PTR(err);
1949cc0e5d42SMaor Gottlieb }
1950cc0e5d42SMaor Gottlieb 
1951038d2ef8SMaor Gottlieb static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
1952038d2ef8SMaor Gottlieb 					   struct ib_flow_attr *flow_attr,
1953038d2ef8SMaor Gottlieb 					   int domain)
1954038d2ef8SMaor Gottlieb {
1955038d2ef8SMaor Gottlieb 	struct mlx5_ib_dev *dev = to_mdev(qp->device);
1956038d2ef8SMaor Gottlieb 	struct mlx5_ib_flow_handler *handler = NULL;
1957038d2ef8SMaor Gottlieb 	struct mlx5_flow_destination *dst = NULL;
1958cc0e5d42SMaor Gottlieb 	struct mlx5_ib_flow_prio *ft_prio_tx = NULL;
1959038d2ef8SMaor Gottlieb 	struct mlx5_ib_flow_prio *ft_prio;
1960038d2ef8SMaor Gottlieb 	int err;
1961038d2ef8SMaor Gottlieb 
1962038d2ef8SMaor Gottlieb 	if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO)
1963038d2ef8SMaor Gottlieb 		return ERR_PTR(-ENOSPC);
1964038d2ef8SMaor Gottlieb 
1965038d2ef8SMaor Gottlieb 	if (domain != IB_FLOW_DOMAIN_USER ||
1966038d2ef8SMaor Gottlieb 	    flow_attr->port > MLX5_CAP_GEN(dev->mdev, num_ports) ||
196735d19011SMaor Gottlieb 	    (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP))
1968038d2ef8SMaor Gottlieb 		return ERR_PTR(-EINVAL);
1969038d2ef8SMaor Gottlieb 
1970038d2ef8SMaor Gottlieb 	dst = kzalloc(sizeof(*dst), GFP_KERNEL);
1971038d2ef8SMaor Gottlieb 	if (!dst)
1972038d2ef8SMaor Gottlieb 		return ERR_PTR(-ENOMEM);
1973038d2ef8SMaor Gottlieb 
1974038d2ef8SMaor Gottlieb 	mutex_lock(&dev->flow_db.lock);
1975038d2ef8SMaor Gottlieb 
1976cc0e5d42SMaor Gottlieb 	ft_prio = get_flow_table(dev, flow_attr, MLX5_IB_FT_RX);
1977038d2ef8SMaor Gottlieb 	if (IS_ERR(ft_prio)) {
1978038d2ef8SMaor Gottlieb 		err = PTR_ERR(ft_prio);
1979038d2ef8SMaor Gottlieb 		goto unlock;
1980038d2ef8SMaor Gottlieb 	}
1981cc0e5d42SMaor Gottlieb 	if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
1982cc0e5d42SMaor Gottlieb 		ft_prio_tx = get_flow_table(dev, flow_attr, MLX5_IB_FT_TX);
1983cc0e5d42SMaor Gottlieb 		if (IS_ERR(ft_prio_tx)) {
1984cc0e5d42SMaor Gottlieb 			err = PTR_ERR(ft_prio_tx);
1985cc0e5d42SMaor Gottlieb 			ft_prio_tx = NULL;
1986cc0e5d42SMaor Gottlieb 			goto destroy_ft;
1987cc0e5d42SMaor Gottlieb 		}
1988cc0e5d42SMaor Gottlieb 	}
1989038d2ef8SMaor Gottlieb 
1990038d2ef8SMaor Gottlieb 	dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1991038d2ef8SMaor Gottlieb 	dst->tir_num = to_mqp(qp)->raw_packet_qp.rq.tirn;
1992038d2ef8SMaor Gottlieb 
1993038d2ef8SMaor Gottlieb 	if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
199435d19011SMaor Gottlieb 		if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)  {
199535d19011SMaor Gottlieb 			handler = create_dont_trap_rule(dev, ft_prio,
199635d19011SMaor Gottlieb 							flow_attr, dst);
199735d19011SMaor Gottlieb 		} else {
1998038d2ef8SMaor Gottlieb 			handler = create_flow_rule(dev, ft_prio, flow_attr,
1999038d2ef8SMaor Gottlieb 						   dst);
200035d19011SMaor Gottlieb 		}
2001038d2ef8SMaor Gottlieb 	} else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
2002038d2ef8SMaor Gottlieb 		   flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
2003038d2ef8SMaor Gottlieb 		handler = create_leftovers_rule(dev, ft_prio, flow_attr,
2004038d2ef8SMaor Gottlieb 						dst);
2005cc0e5d42SMaor Gottlieb 	} else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
2006cc0e5d42SMaor Gottlieb 		handler = create_sniffer_rule(dev, ft_prio, ft_prio_tx, dst);
2007038d2ef8SMaor Gottlieb 	} else {
2008038d2ef8SMaor Gottlieb 		err = -EINVAL;
2009038d2ef8SMaor Gottlieb 		goto destroy_ft;
2010038d2ef8SMaor Gottlieb 	}
2011038d2ef8SMaor Gottlieb 
2012038d2ef8SMaor Gottlieb 	if (IS_ERR(handler)) {
2013038d2ef8SMaor Gottlieb 		err = PTR_ERR(handler);
2014038d2ef8SMaor Gottlieb 		handler = NULL;
2015038d2ef8SMaor Gottlieb 		goto destroy_ft;
2016038d2ef8SMaor Gottlieb 	}
2017038d2ef8SMaor Gottlieb 
2018038d2ef8SMaor Gottlieb 	mutex_unlock(&dev->flow_db.lock);
2019038d2ef8SMaor Gottlieb 	kfree(dst);
2020038d2ef8SMaor Gottlieb 
2021038d2ef8SMaor Gottlieb 	return &handler->ibflow;
2022038d2ef8SMaor Gottlieb 
2023038d2ef8SMaor Gottlieb destroy_ft:
2024038d2ef8SMaor Gottlieb 	put_flow_table(dev, ft_prio, false);
2025cc0e5d42SMaor Gottlieb 	if (ft_prio_tx)
2026cc0e5d42SMaor Gottlieb 		put_flow_table(dev, ft_prio_tx, false);
2027038d2ef8SMaor Gottlieb unlock:
2028038d2ef8SMaor Gottlieb 	mutex_unlock(&dev->flow_db.lock);
2029038d2ef8SMaor Gottlieb 	kfree(dst);
2030038d2ef8SMaor Gottlieb 	kfree(handler);
2031038d2ef8SMaor Gottlieb 	return ERR_PTR(err);
2032038d2ef8SMaor Gottlieb }
2033038d2ef8SMaor Gottlieb 
2034e126ba97SEli Cohen static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
2035e126ba97SEli Cohen {
2036e126ba97SEli Cohen 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2037e126ba97SEli Cohen 	int err;
2038e126ba97SEli Cohen 
20399603b61dSJack Morgenstein 	err = mlx5_core_attach_mcg(dev->mdev, gid, ibqp->qp_num);
2040e126ba97SEli Cohen 	if (err)
2041e126ba97SEli Cohen 		mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
2042e126ba97SEli Cohen 			     ibqp->qp_num, gid->raw);
2043e126ba97SEli Cohen 
2044e126ba97SEli Cohen 	return err;
2045e126ba97SEli Cohen }
2046e126ba97SEli Cohen 
2047e126ba97SEli Cohen static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
2048e126ba97SEli Cohen {
2049e126ba97SEli Cohen 	struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2050e126ba97SEli Cohen 	int err;
2051e126ba97SEli Cohen 
20529603b61dSJack Morgenstein 	err = mlx5_core_detach_mcg(dev->mdev, gid, ibqp->qp_num);
2053e126ba97SEli Cohen 	if (err)
2054e126ba97SEli Cohen 		mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
2055e126ba97SEli Cohen 			     ibqp->qp_num, gid->raw);
2056e126ba97SEli Cohen 
2057e126ba97SEli Cohen 	return err;
2058e126ba97SEli Cohen }
2059e126ba97SEli Cohen 
2060e126ba97SEli Cohen static int init_node_data(struct mlx5_ib_dev *dev)
2061e126ba97SEli Cohen {
20621b5daf11SMajd Dibbiny 	int err;
2063e126ba97SEli Cohen 
20641b5daf11SMajd Dibbiny 	err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc);
2065e126ba97SEli Cohen 	if (err)
2066e126ba97SEli Cohen 		return err;
20671b5daf11SMajd Dibbiny 
20681b5daf11SMajd Dibbiny 	dev->mdev->rev_id = dev->mdev->pdev->revision;
20691b5daf11SMajd Dibbiny 
20701b5daf11SMajd Dibbiny 	return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid);
2071e126ba97SEli Cohen }
2072e126ba97SEli Cohen 
2073e126ba97SEli Cohen static ssize_t show_fw_pages(struct device *device, struct device_attribute *attr,
2074e126ba97SEli Cohen 			     char *buf)
2075e126ba97SEli Cohen {
2076e126ba97SEli Cohen 	struct mlx5_ib_dev *dev =
2077e126ba97SEli Cohen 		container_of(device, struct mlx5_ib_dev, ib_dev.dev);
2078e126ba97SEli Cohen 
20799603b61dSJack Morgenstein 	return sprintf(buf, "%d\n", dev->mdev->priv.fw_pages);
2080e126ba97SEli Cohen }
2081e126ba97SEli Cohen 
2082e126ba97SEli Cohen static ssize_t show_reg_pages(struct device *device,
2083e126ba97SEli Cohen 			      struct device_attribute *attr, char *buf)
2084e126ba97SEli Cohen {
2085e126ba97SEli Cohen 	struct mlx5_ib_dev *dev =
2086e126ba97SEli Cohen 		container_of(device, struct mlx5_ib_dev, ib_dev.dev);
2087e126ba97SEli Cohen 
20886aec21f6SHaggai Eran 	return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
2089e126ba97SEli Cohen }
2090e126ba97SEli Cohen 
2091e126ba97SEli Cohen static ssize_t show_hca(struct device *device, struct device_attribute *attr,
2092e126ba97SEli Cohen 			char *buf)
2093e126ba97SEli Cohen {
2094e126ba97SEli Cohen 	struct mlx5_ib_dev *dev =
2095e126ba97SEli Cohen 		container_of(device, struct mlx5_ib_dev, ib_dev.dev);
20969603b61dSJack Morgenstein 	return sprintf(buf, "MT%d\n", dev->mdev->pdev->device);
2097e126ba97SEli Cohen }
2098e126ba97SEli Cohen 
2099e126ba97SEli Cohen static ssize_t show_rev(struct device *device, struct device_attribute *attr,
2100e126ba97SEli Cohen 			char *buf)
2101e126ba97SEli Cohen {
2102e126ba97SEli Cohen 	struct mlx5_ib_dev *dev =
2103e126ba97SEli Cohen 		container_of(device, struct mlx5_ib_dev, ib_dev.dev);
21049603b61dSJack Morgenstein 	return sprintf(buf, "%x\n", dev->mdev->rev_id);
2105e126ba97SEli Cohen }
2106e126ba97SEli Cohen 
2107e126ba97SEli Cohen static ssize_t show_board(struct device *device, struct device_attribute *attr,
2108e126ba97SEli Cohen 			  char *buf)
2109e126ba97SEli Cohen {
2110e126ba97SEli Cohen 	struct mlx5_ib_dev *dev =
2111e126ba97SEli Cohen 		container_of(device, struct mlx5_ib_dev, ib_dev.dev);
2112e126ba97SEli Cohen 	return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
21139603b61dSJack Morgenstein 		       dev->mdev->board_id);
2114e126ba97SEli Cohen }
2115e126ba97SEli Cohen 
2116e126ba97SEli Cohen static DEVICE_ATTR(hw_rev,   S_IRUGO, show_rev,    NULL);
2117e126ba97SEli Cohen static DEVICE_ATTR(hca_type, S_IRUGO, show_hca,    NULL);
2118e126ba97SEli Cohen static DEVICE_ATTR(board_id, S_IRUGO, show_board,  NULL);
2119e126ba97SEli Cohen static DEVICE_ATTR(fw_pages, S_IRUGO, show_fw_pages, NULL);
2120e126ba97SEli Cohen static DEVICE_ATTR(reg_pages, S_IRUGO, show_reg_pages, NULL);
2121e126ba97SEli Cohen 
2122e126ba97SEli Cohen static struct device_attribute *mlx5_class_attributes[] = {
2123e126ba97SEli Cohen 	&dev_attr_hw_rev,
2124e126ba97SEli Cohen 	&dev_attr_hca_type,
2125e126ba97SEli Cohen 	&dev_attr_board_id,
2126e126ba97SEli Cohen 	&dev_attr_fw_pages,
2127e126ba97SEli Cohen 	&dev_attr_reg_pages,
2128e126ba97SEli Cohen };
2129e126ba97SEli Cohen 
21307722f47eSHaggai Eran static void pkey_change_handler(struct work_struct *work)
21317722f47eSHaggai Eran {
21327722f47eSHaggai Eran 	struct mlx5_ib_port_resources *ports =
21337722f47eSHaggai Eran 		container_of(work, struct mlx5_ib_port_resources,
21347722f47eSHaggai Eran 			     pkey_change_work);
21357722f47eSHaggai Eran 
21367722f47eSHaggai Eran 	mutex_lock(&ports->devr->mutex);
21377722f47eSHaggai Eran 	mlx5_ib_gsi_pkey_change(ports->gsi);
21387722f47eSHaggai Eran 	mutex_unlock(&ports->devr->mutex);
21397722f47eSHaggai Eran }
21407722f47eSHaggai Eran 
214189ea94a7SMaor Gottlieb static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev)
214289ea94a7SMaor Gottlieb {
214389ea94a7SMaor Gottlieb 	struct mlx5_ib_qp *mqp;
214489ea94a7SMaor Gottlieb 	struct mlx5_ib_cq *send_mcq, *recv_mcq;
214589ea94a7SMaor Gottlieb 	struct mlx5_core_cq *mcq;
214689ea94a7SMaor Gottlieb 	struct list_head cq_armed_list;
214789ea94a7SMaor Gottlieb 	unsigned long flags_qp;
214889ea94a7SMaor Gottlieb 	unsigned long flags_cq;
214989ea94a7SMaor Gottlieb 	unsigned long flags;
215089ea94a7SMaor Gottlieb 
215189ea94a7SMaor Gottlieb 	INIT_LIST_HEAD(&cq_armed_list);
215289ea94a7SMaor Gottlieb 
215389ea94a7SMaor Gottlieb 	/* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
215489ea94a7SMaor Gottlieb 	spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
215589ea94a7SMaor Gottlieb 	list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
215689ea94a7SMaor Gottlieb 		spin_lock_irqsave(&mqp->sq.lock, flags_qp);
215789ea94a7SMaor Gottlieb 		if (mqp->sq.tail != mqp->sq.head) {
215889ea94a7SMaor Gottlieb 			send_mcq = to_mcq(mqp->ibqp.send_cq);
215989ea94a7SMaor Gottlieb 			spin_lock_irqsave(&send_mcq->lock, flags_cq);
216089ea94a7SMaor Gottlieb 			if (send_mcq->mcq.comp &&
216189ea94a7SMaor Gottlieb 			    mqp->ibqp.send_cq->comp_handler) {
216289ea94a7SMaor Gottlieb 				if (!send_mcq->mcq.reset_notify_added) {
216389ea94a7SMaor Gottlieb 					send_mcq->mcq.reset_notify_added = 1;
216489ea94a7SMaor Gottlieb 					list_add_tail(&send_mcq->mcq.reset_notify,
216589ea94a7SMaor Gottlieb 						      &cq_armed_list);
216689ea94a7SMaor Gottlieb 				}
216789ea94a7SMaor Gottlieb 			}
216889ea94a7SMaor Gottlieb 			spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
216989ea94a7SMaor Gottlieb 		}
217089ea94a7SMaor Gottlieb 		spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
217189ea94a7SMaor Gottlieb 		spin_lock_irqsave(&mqp->rq.lock, flags_qp);
217289ea94a7SMaor Gottlieb 		/* no handling is needed for SRQ */
217389ea94a7SMaor Gottlieb 		if (!mqp->ibqp.srq) {
217489ea94a7SMaor Gottlieb 			if (mqp->rq.tail != mqp->rq.head) {
217589ea94a7SMaor Gottlieb 				recv_mcq = to_mcq(mqp->ibqp.recv_cq);
217689ea94a7SMaor Gottlieb 				spin_lock_irqsave(&recv_mcq->lock, flags_cq);
217789ea94a7SMaor Gottlieb 				if (recv_mcq->mcq.comp &&
217889ea94a7SMaor Gottlieb 				    mqp->ibqp.recv_cq->comp_handler) {
217989ea94a7SMaor Gottlieb 					if (!recv_mcq->mcq.reset_notify_added) {
218089ea94a7SMaor Gottlieb 						recv_mcq->mcq.reset_notify_added = 1;
218189ea94a7SMaor Gottlieb 						list_add_tail(&recv_mcq->mcq.reset_notify,
218289ea94a7SMaor Gottlieb 							      &cq_armed_list);
218389ea94a7SMaor Gottlieb 					}
218489ea94a7SMaor Gottlieb 				}
218589ea94a7SMaor Gottlieb 				spin_unlock_irqrestore(&recv_mcq->lock,
218689ea94a7SMaor Gottlieb 						       flags_cq);
218789ea94a7SMaor Gottlieb 			}
218889ea94a7SMaor Gottlieb 		}
218989ea94a7SMaor Gottlieb 		spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
219089ea94a7SMaor Gottlieb 	}
219189ea94a7SMaor Gottlieb 	/*At that point all inflight post send were put to be executed as of we
219289ea94a7SMaor Gottlieb 	 * lock/unlock above locks Now need to arm all involved CQs.
219389ea94a7SMaor Gottlieb 	 */
219489ea94a7SMaor Gottlieb 	list_for_each_entry(mcq, &cq_armed_list, reset_notify) {
219589ea94a7SMaor Gottlieb 		mcq->comp(mcq);
219689ea94a7SMaor Gottlieb 	}
219789ea94a7SMaor Gottlieb 	spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
219889ea94a7SMaor Gottlieb }
219989ea94a7SMaor Gottlieb 
22009603b61dSJack Morgenstein static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
22014d2f9bbbSJack Morgenstein 			  enum mlx5_dev_event event, unsigned long param)
2202e126ba97SEli Cohen {
22039603b61dSJack Morgenstein 	struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context;
2204e126ba97SEli Cohen 	struct ib_event ibev;
22059603b61dSJack Morgenstein 
2206e126ba97SEli Cohen 	u8 port = 0;
2207e126ba97SEli Cohen 
2208e126ba97SEli Cohen 	switch (event) {
2209e126ba97SEli Cohen 	case MLX5_DEV_EVENT_SYS_ERROR:
2210e126ba97SEli Cohen 		ibdev->ib_active = false;
2211e126ba97SEli Cohen 		ibev.event = IB_EVENT_DEVICE_FATAL;
221289ea94a7SMaor Gottlieb 		mlx5_ib_handle_internal_error(ibdev);
2213e126ba97SEli Cohen 		break;
2214e126ba97SEli Cohen 
2215e126ba97SEli Cohen 	case MLX5_DEV_EVENT_PORT_UP:
2216e126ba97SEli Cohen 		ibev.event = IB_EVENT_PORT_ACTIVE;
22174d2f9bbbSJack Morgenstein 		port = (u8)param;
2218e126ba97SEli Cohen 		break;
2219e126ba97SEli Cohen 
2220e126ba97SEli Cohen 	case MLX5_DEV_EVENT_PORT_DOWN:
22212788cf3bSNoa Osherovich 	case MLX5_DEV_EVENT_PORT_INITIALIZED:
2222e126ba97SEli Cohen 		ibev.event = IB_EVENT_PORT_ERR;
22234d2f9bbbSJack Morgenstein 		port = (u8)param;
2224e126ba97SEli Cohen 		break;
2225e126ba97SEli Cohen 
2226e126ba97SEli Cohen 	case MLX5_DEV_EVENT_LID_CHANGE:
2227e126ba97SEli Cohen 		ibev.event = IB_EVENT_LID_CHANGE;
22284d2f9bbbSJack Morgenstein 		port = (u8)param;
2229e126ba97SEli Cohen 		break;
2230e126ba97SEli Cohen 
2231e126ba97SEli Cohen 	case MLX5_DEV_EVENT_PKEY_CHANGE:
2232e126ba97SEli Cohen 		ibev.event = IB_EVENT_PKEY_CHANGE;
22334d2f9bbbSJack Morgenstein 		port = (u8)param;
22347722f47eSHaggai Eran 
22357722f47eSHaggai Eran 		schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
2236e126ba97SEli Cohen 		break;
2237e126ba97SEli Cohen 
2238e126ba97SEli Cohen 	case MLX5_DEV_EVENT_GUID_CHANGE:
2239e126ba97SEli Cohen 		ibev.event = IB_EVENT_GID_CHANGE;
22404d2f9bbbSJack Morgenstein 		port = (u8)param;
2241e126ba97SEli Cohen 		break;
2242e126ba97SEli Cohen 
2243e126ba97SEli Cohen 	case MLX5_DEV_EVENT_CLIENT_REREG:
2244e126ba97SEli Cohen 		ibev.event = IB_EVENT_CLIENT_REREGISTER;
22454d2f9bbbSJack Morgenstein 		port = (u8)param;
2246e126ba97SEli Cohen 		break;
2247e126ba97SEli Cohen 	}
2248e126ba97SEli Cohen 
2249e126ba97SEli Cohen 	ibev.device	      = &ibdev->ib_dev;
2250e126ba97SEli Cohen 	ibev.element.port_num = port;
2251e126ba97SEli Cohen 
2252a0c84c32SEli Cohen 	if (port < 1 || port > ibdev->num_ports) {
2253a0c84c32SEli Cohen 		mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
2254a0c84c32SEli Cohen 		return;
2255a0c84c32SEli Cohen 	}
2256a0c84c32SEli Cohen 
2257e126ba97SEli Cohen 	if (ibdev->ib_active)
2258e126ba97SEli Cohen 		ib_dispatch_event(&ibev);
2259e126ba97SEli Cohen }
2260e126ba97SEli Cohen 
2261e126ba97SEli Cohen static void get_ext_port_caps(struct mlx5_ib_dev *dev)
2262e126ba97SEli Cohen {
2263e126ba97SEli Cohen 	int port;
2264e126ba97SEli Cohen 
2265938fe83cSSaeed Mahameed 	for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++)
2266e126ba97SEli Cohen 		mlx5_query_ext_port_caps(dev, port);
2267e126ba97SEli Cohen }
2268e126ba97SEli Cohen 
2269e126ba97SEli Cohen static int get_port_caps(struct mlx5_ib_dev *dev)
2270e126ba97SEli Cohen {
2271e126ba97SEli Cohen 	struct ib_device_attr *dprops = NULL;
2272e126ba97SEli Cohen 	struct ib_port_attr *pprops = NULL;
2273f614fc15SDan Carpenter 	int err = -ENOMEM;
2274e126ba97SEli Cohen 	int port;
22752528e33eSMatan Barak 	struct ib_udata uhw = {.inlen = 0, .outlen = 0};
2276e126ba97SEli Cohen 
2277e126ba97SEli Cohen 	pprops = kmalloc(sizeof(*pprops), GFP_KERNEL);
2278e126ba97SEli Cohen 	if (!pprops)
2279e126ba97SEli Cohen 		goto out;
2280e126ba97SEli Cohen 
2281e126ba97SEli Cohen 	dprops = kmalloc(sizeof(*dprops), GFP_KERNEL);
2282e126ba97SEli Cohen 	if (!dprops)
2283e126ba97SEli Cohen 		goto out;
2284e126ba97SEli Cohen 
22852528e33eSMatan Barak 	err = mlx5_ib_query_device(&dev->ib_dev, dprops, &uhw);
2286e126ba97SEli Cohen 	if (err) {
2287e126ba97SEli Cohen 		mlx5_ib_warn(dev, "query_device failed %d\n", err);
2288e126ba97SEli Cohen 		goto out;
2289e126ba97SEli Cohen 	}
2290e126ba97SEli Cohen 
2291938fe83cSSaeed Mahameed 	for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) {
2292e126ba97SEli Cohen 		err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
2293e126ba97SEli Cohen 		if (err) {
2294938fe83cSSaeed Mahameed 			mlx5_ib_warn(dev, "query_port %d failed %d\n",
2295938fe83cSSaeed Mahameed 				     port, err);
2296e126ba97SEli Cohen 			break;
2297e126ba97SEli Cohen 		}
2298938fe83cSSaeed Mahameed 		dev->mdev->port_caps[port - 1].pkey_table_len =
2299938fe83cSSaeed Mahameed 						dprops->max_pkeys;
2300938fe83cSSaeed Mahameed 		dev->mdev->port_caps[port - 1].gid_table_len =
2301938fe83cSSaeed Mahameed 						pprops->gid_tbl_len;
2302e126ba97SEli Cohen 		mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n",
2303e126ba97SEli Cohen 			    dprops->max_pkeys, pprops->gid_tbl_len);
2304e126ba97SEli Cohen 	}
2305e126ba97SEli Cohen 
2306e126ba97SEli Cohen out:
2307e126ba97SEli Cohen 	kfree(pprops);
2308e126ba97SEli Cohen 	kfree(dprops);
2309e126ba97SEli Cohen 
2310e126ba97SEli Cohen 	return err;
2311e126ba97SEli Cohen }
2312e126ba97SEli Cohen 
2313e126ba97SEli Cohen static void destroy_umrc_res(struct mlx5_ib_dev *dev)
2314e126ba97SEli Cohen {
2315e126ba97SEli Cohen 	int err;
2316e126ba97SEli Cohen 
2317e126ba97SEli Cohen 	err = mlx5_mr_cache_cleanup(dev);
2318e126ba97SEli Cohen 	if (err)
2319e126ba97SEli Cohen 		mlx5_ib_warn(dev, "mr cache cleanup failed\n");
2320e126ba97SEli Cohen 
2321e126ba97SEli Cohen 	mlx5_ib_destroy_qp(dev->umrc.qp);
2322add08d76SChristoph Hellwig 	ib_free_cq(dev->umrc.cq);
2323e126ba97SEli Cohen 	ib_dealloc_pd(dev->umrc.pd);
2324e126ba97SEli Cohen }
2325e126ba97SEli Cohen 
2326e126ba97SEli Cohen enum {
2327e126ba97SEli Cohen 	MAX_UMR_WR = 128,
2328e126ba97SEli Cohen };
2329e126ba97SEli Cohen 
2330e126ba97SEli Cohen static int create_umr_res(struct mlx5_ib_dev *dev)
2331e126ba97SEli Cohen {
2332e126ba97SEli Cohen 	struct ib_qp_init_attr *init_attr = NULL;
2333e126ba97SEli Cohen 	struct ib_qp_attr *attr = NULL;
2334e126ba97SEli Cohen 	struct ib_pd *pd;
2335e126ba97SEli Cohen 	struct ib_cq *cq;
2336e126ba97SEli Cohen 	struct ib_qp *qp;
2337e126ba97SEli Cohen 	int ret;
2338e126ba97SEli Cohen 
2339e126ba97SEli Cohen 	attr = kzalloc(sizeof(*attr), GFP_KERNEL);
2340e126ba97SEli Cohen 	init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
2341e126ba97SEli Cohen 	if (!attr || !init_attr) {
2342e126ba97SEli Cohen 		ret = -ENOMEM;
2343e126ba97SEli Cohen 		goto error_0;
2344e126ba97SEli Cohen 	}
2345e126ba97SEli Cohen 
2346ed082d36SChristoph Hellwig 	pd = ib_alloc_pd(&dev->ib_dev, 0);
2347e126ba97SEli Cohen 	if (IS_ERR(pd)) {
2348e126ba97SEli Cohen 		mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n");
2349e126ba97SEli Cohen 		ret = PTR_ERR(pd);
2350e126ba97SEli Cohen 		goto error_0;
2351e126ba97SEli Cohen 	}
2352e126ba97SEli Cohen 
2353add08d76SChristoph Hellwig 	cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ);
2354e126ba97SEli Cohen 	if (IS_ERR(cq)) {
2355e126ba97SEli Cohen 		mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n");
2356e126ba97SEli Cohen 		ret = PTR_ERR(cq);
2357e126ba97SEli Cohen 		goto error_2;
2358e126ba97SEli Cohen 	}
2359e126ba97SEli Cohen 
2360e126ba97SEli Cohen 	init_attr->send_cq = cq;
2361e126ba97SEli Cohen 	init_attr->recv_cq = cq;
2362e126ba97SEli Cohen 	init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
2363e126ba97SEli Cohen 	init_attr->cap.max_send_wr = MAX_UMR_WR;
2364e126ba97SEli Cohen 	init_attr->cap.max_send_sge = 1;
2365e126ba97SEli Cohen 	init_attr->qp_type = MLX5_IB_QPT_REG_UMR;
2366e126ba97SEli Cohen 	init_attr->port_num = 1;
2367e126ba97SEli Cohen 	qp = mlx5_ib_create_qp(pd, init_attr, NULL);
2368e126ba97SEli Cohen 	if (IS_ERR(qp)) {
2369e126ba97SEli Cohen 		mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n");
2370e126ba97SEli Cohen 		ret = PTR_ERR(qp);
2371e126ba97SEli Cohen 		goto error_3;
2372e126ba97SEli Cohen 	}
2373e126ba97SEli Cohen 	qp->device     = &dev->ib_dev;
2374e126ba97SEli Cohen 	qp->real_qp    = qp;
2375e126ba97SEli Cohen 	qp->uobject    = NULL;
2376e126ba97SEli Cohen 	qp->qp_type    = MLX5_IB_QPT_REG_UMR;
2377e126ba97SEli Cohen 
2378e126ba97SEli Cohen 	attr->qp_state = IB_QPS_INIT;
2379e126ba97SEli Cohen 	attr->port_num = 1;
2380e126ba97SEli Cohen 	ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX |
2381e126ba97SEli Cohen 				IB_QP_PORT, NULL);
2382e126ba97SEli Cohen 	if (ret) {
2383e126ba97SEli Cohen 		mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
2384e126ba97SEli Cohen 		goto error_4;
2385e126ba97SEli Cohen 	}
2386e126ba97SEli Cohen 
2387e126ba97SEli Cohen 	memset(attr, 0, sizeof(*attr));
2388e126ba97SEli Cohen 	attr->qp_state = IB_QPS_RTR;
2389e126ba97SEli Cohen 	attr->path_mtu = IB_MTU_256;
2390e126ba97SEli Cohen 
2391e126ba97SEli Cohen 	ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
2392e126ba97SEli Cohen 	if (ret) {
2393e126ba97SEli Cohen 		mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n");
2394e126ba97SEli Cohen 		goto error_4;
2395e126ba97SEli Cohen 	}
2396e126ba97SEli Cohen 
2397e126ba97SEli Cohen 	memset(attr, 0, sizeof(*attr));
2398e126ba97SEli Cohen 	attr->qp_state = IB_QPS_RTS;
2399e126ba97SEli Cohen 	ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
2400e126ba97SEli Cohen 	if (ret) {
2401e126ba97SEli Cohen 		mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n");
2402e126ba97SEli Cohen 		goto error_4;
2403e126ba97SEli Cohen 	}
2404e126ba97SEli Cohen 
2405e126ba97SEli Cohen 	dev->umrc.qp = qp;
2406e126ba97SEli Cohen 	dev->umrc.cq = cq;
2407e126ba97SEli Cohen 	dev->umrc.pd = pd;
2408e126ba97SEli Cohen 
2409e126ba97SEli Cohen 	sema_init(&dev->umrc.sem, MAX_UMR_WR);
2410e126ba97SEli Cohen 	ret = mlx5_mr_cache_init(dev);
2411e126ba97SEli Cohen 	if (ret) {
2412e126ba97SEli Cohen 		mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
2413e126ba97SEli Cohen 		goto error_4;
2414e126ba97SEli Cohen 	}
2415e126ba97SEli Cohen 
2416e126ba97SEli Cohen 	kfree(attr);
2417e126ba97SEli Cohen 	kfree(init_attr);
2418e126ba97SEli Cohen 
2419e126ba97SEli Cohen 	return 0;
2420e126ba97SEli Cohen 
2421e126ba97SEli Cohen error_4:
2422e126ba97SEli Cohen 	mlx5_ib_destroy_qp(qp);
2423e126ba97SEli Cohen 
2424e126ba97SEli Cohen error_3:
2425add08d76SChristoph Hellwig 	ib_free_cq(cq);
2426e126ba97SEli Cohen 
2427e126ba97SEli Cohen error_2:
2428e126ba97SEli Cohen 	ib_dealloc_pd(pd);
2429e126ba97SEli Cohen 
2430e126ba97SEli Cohen error_0:
2431e126ba97SEli Cohen 	kfree(attr);
2432e126ba97SEli Cohen 	kfree(init_attr);
2433e126ba97SEli Cohen 	return ret;
2434e126ba97SEli Cohen }
2435e126ba97SEli Cohen 
2436e126ba97SEli Cohen static int create_dev_resources(struct mlx5_ib_resources *devr)
2437e126ba97SEli Cohen {
2438e126ba97SEli Cohen 	struct ib_srq_init_attr attr;
2439e126ba97SEli Cohen 	struct mlx5_ib_dev *dev;
2440bcf4c1eaSMatan Barak 	struct ib_cq_init_attr cq_attr = {.cqe = 1};
24417722f47eSHaggai Eran 	int port;
2442e126ba97SEli Cohen 	int ret = 0;
2443e126ba97SEli Cohen 
2444e126ba97SEli Cohen 	dev = container_of(devr, struct mlx5_ib_dev, devr);
2445e126ba97SEli Cohen 
2446d16e91daSHaggai Eran 	mutex_init(&devr->mutex);
2447d16e91daSHaggai Eran 
2448e126ba97SEli Cohen 	devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL);
2449e126ba97SEli Cohen 	if (IS_ERR(devr->p0)) {
2450e126ba97SEli Cohen 		ret = PTR_ERR(devr->p0);
2451e126ba97SEli Cohen 		goto error0;
2452e126ba97SEli Cohen 	}
2453e126ba97SEli Cohen 	devr->p0->device  = &dev->ib_dev;
2454e126ba97SEli Cohen 	devr->p0->uobject = NULL;
2455e126ba97SEli Cohen 	atomic_set(&devr->p0->usecnt, 0);
2456e126ba97SEli Cohen 
2457bcf4c1eaSMatan Barak 	devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL);
2458e126ba97SEli Cohen 	if (IS_ERR(devr->c0)) {
2459e126ba97SEli Cohen 		ret = PTR_ERR(devr->c0);
2460e126ba97SEli Cohen 		goto error1;
2461e126ba97SEli Cohen 	}
2462e126ba97SEli Cohen 	devr->c0->device        = &dev->ib_dev;
2463e126ba97SEli Cohen 	devr->c0->uobject       = NULL;
2464e126ba97SEli Cohen 	devr->c0->comp_handler  = NULL;
2465e126ba97SEli Cohen 	devr->c0->event_handler = NULL;
2466e126ba97SEli Cohen 	devr->c0->cq_context    = NULL;
2467e126ba97SEli Cohen 	atomic_set(&devr->c0->usecnt, 0);
2468e126ba97SEli Cohen 
2469e126ba97SEli Cohen 	devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
2470e126ba97SEli Cohen 	if (IS_ERR(devr->x0)) {
2471e126ba97SEli Cohen 		ret = PTR_ERR(devr->x0);
2472e126ba97SEli Cohen 		goto error2;
2473e126ba97SEli Cohen 	}
2474e126ba97SEli Cohen 	devr->x0->device = &dev->ib_dev;
2475e126ba97SEli Cohen 	devr->x0->inode = NULL;
2476e126ba97SEli Cohen 	atomic_set(&devr->x0->usecnt, 0);
2477e126ba97SEli Cohen 	mutex_init(&devr->x0->tgt_qp_mutex);
2478e126ba97SEli Cohen 	INIT_LIST_HEAD(&devr->x0->tgt_qp_list);
2479e126ba97SEli Cohen 
2480e126ba97SEli Cohen 	devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
2481e126ba97SEli Cohen 	if (IS_ERR(devr->x1)) {
2482e126ba97SEli Cohen 		ret = PTR_ERR(devr->x1);
2483e126ba97SEli Cohen 		goto error3;
2484e126ba97SEli Cohen 	}
2485e126ba97SEli Cohen 	devr->x1->device = &dev->ib_dev;
2486e126ba97SEli Cohen 	devr->x1->inode = NULL;
2487e126ba97SEli Cohen 	atomic_set(&devr->x1->usecnt, 0);
2488e126ba97SEli Cohen 	mutex_init(&devr->x1->tgt_qp_mutex);
2489e126ba97SEli Cohen 	INIT_LIST_HEAD(&devr->x1->tgt_qp_list);
2490e126ba97SEli Cohen 
2491e126ba97SEli Cohen 	memset(&attr, 0, sizeof(attr));
2492e126ba97SEli Cohen 	attr.attr.max_sge = 1;
2493e126ba97SEli Cohen 	attr.attr.max_wr = 1;
2494e126ba97SEli Cohen 	attr.srq_type = IB_SRQT_XRC;
2495e126ba97SEli Cohen 	attr.ext.xrc.cq = devr->c0;
2496e126ba97SEli Cohen 	attr.ext.xrc.xrcd = devr->x0;
2497e126ba97SEli Cohen 
2498e126ba97SEli Cohen 	devr->s0 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
2499e126ba97SEli Cohen 	if (IS_ERR(devr->s0)) {
2500e126ba97SEli Cohen 		ret = PTR_ERR(devr->s0);
2501e126ba97SEli Cohen 		goto error4;
2502e126ba97SEli Cohen 	}
2503e126ba97SEli Cohen 	devr->s0->device	= &dev->ib_dev;
2504e126ba97SEli Cohen 	devr->s0->pd		= devr->p0;
2505e126ba97SEli Cohen 	devr->s0->uobject       = NULL;
2506e126ba97SEli Cohen 	devr->s0->event_handler = NULL;
2507e126ba97SEli Cohen 	devr->s0->srq_context   = NULL;
2508e126ba97SEli Cohen 	devr->s0->srq_type      = IB_SRQT_XRC;
2509e126ba97SEli Cohen 	devr->s0->ext.xrc.xrcd	= devr->x0;
2510e126ba97SEli Cohen 	devr->s0->ext.xrc.cq	= devr->c0;
2511e126ba97SEli Cohen 	atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt);
2512e126ba97SEli Cohen 	atomic_inc(&devr->s0->ext.xrc.cq->usecnt);
2513e126ba97SEli Cohen 	atomic_inc(&devr->p0->usecnt);
2514e126ba97SEli Cohen 	atomic_set(&devr->s0->usecnt, 0);
2515e126ba97SEli Cohen 
25164aa17b28SHaggai Abramonvsky 	memset(&attr, 0, sizeof(attr));
25174aa17b28SHaggai Abramonvsky 	attr.attr.max_sge = 1;
25184aa17b28SHaggai Abramonvsky 	attr.attr.max_wr = 1;
25194aa17b28SHaggai Abramonvsky 	attr.srq_type = IB_SRQT_BASIC;
25204aa17b28SHaggai Abramonvsky 	devr->s1 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
25214aa17b28SHaggai Abramonvsky 	if (IS_ERR(devr->s1)) {
25224aa17b28SHaggai Abramonvsky 		ret = PTR_ERR(devr->s1);
25234aa17b28SHaggai Abramonvsky 		goto error5;
25244aa17b28SHaggai Abramonvsky 	}
25254aa17b28SHaggai Abramonvsky 	devr->s1->device	= &dev->ib_dev;
25264aa17b28SHaggai Abramonvsky 	devr->s1->pd		= devr->p0;
25274aa17b28SHaggai Abramonvsky 	devr->s1->uobject       = NULL;
25284aa17b28SHaggai Abramonvsky 	devr->s1->event_handler = NULL;
25294aa17b28SHaggai Abramonvsky 	devr->s1->srq_context   = NULL;
25304aa17b28SHaggai Abramonvsky 	devr->s1->srq_type      = IB_SRQT_BASIC;
25314aa17b28SHaggai Abramonvsky 	devr->s1->ext.xrc.cq	= devr->c0;
25324aa17b28SHaggai Abramonvsky 	atomic_inc(&devr->p0->usecnt);
25334aa17b28SHaggai Abramonvsky 	atomic_set(&devr->s0->usecnt, 0);
25344aa17b28SHaggai Abramonvsky 
25357722f47eSHaggai Eran 	for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) {
25367722f47eSHaggai Eran 		INIT_WORK(&devr->ports[port].pkey_change_work,
25377722f47eSHaggai Eran 			  pkey_change_handler);
25387722f47eSHaggai Eran 		devr->ports[port].devr = devr;
25397722f47eSHaggai Eran 	}
25407722f47eSHaggai Eran 
2541e126ba97SEli Cohen 	return 0;
2542e126ba97SEli Cohen 
25434aa17b28SHaggai Abramonvsky error5:
25444aa17b28SHaggai Abramonvsky 	mlx5_ib_destroy_srq(devr->s0);
2545e126ba97SEli Cohen error4:
2546e126ba97SEli Cohen 	mlx5_ib_dealloc_xrcd(devr->x1);
2547e126ba97SEli Cohen error3:
2548e126ba97SEli Cohen 	mlx5_ib_dealloc_xrcd(devr->x0);
2549e126ba97SEli Cohen error2:
2550e126ba97SEli Cohen 	mlx5_ib_destroy_cq(devr->c0);
2551e126ba97SEli Cohen error1:
2552e126ba97SEli Cohen 	mlx5_ib_dealloc_pd(devr->p0);
2553e126ba97SEli Cohen error0:
2554e126ba97SEli Cohen 	return ret;
2555e126ba97SEli Cohen }
2556e126ba97SEli Cohen 
2557e126ba97SEli Cohen static void destroy_dev_resources(struct mlx5_ib_resources *devr)
2558e126ba97SEli Cohen {
25597722f47eSHaggai Eran 	struct mlx5_ib_dev *dev =
25607722f47eSHaggai Eran 		container_of(devr, struct mlx5_ib_dev, devr);
25617722f47eSHaggai Eran 	int port;
25627722f47eSHaggai Eran 
25634aa17b28SHaggai Abramonvsky 	mlx5_ib_destroy_srq(devr->s1);
2564e126ba97SEli Cohen 	mlx5_ib_destroy_srq(devr->s0);
2565e126ba97SEli Cohen 	mlx5_ib_dealloc_xrcd(devr->x0);
2566e126ba97SEli Cohen 	mlx5_ib_dealloc_xrcd(devr->x1);
2567e126ba97SEli Cohen 	mlx5_ib_destroy_cq(devr->c0);
2568e126ba97SEli Cohen 	mlx5_ib_dealloc_pd(devr->p0);
25697722f47eSHaggai Eran 
25707722f47eSHaggai Eran 	/* Make sure no change P_Key work items are still executing */
25717722f47eSHaggai Eran 	for (port = 0; port < dev->num_ports; ++port)
25727722f47eSHaggai Eran 		cancel_work_sync(&devr->ports[port].pkey_change_work);
2573e126ba97SEli Cohen }
2574e126ba97SEli Cohen 
2575e53505a8SAchiad Shochat static u32 get_core_cap_flags(struct ib_device *ibdev)
2576e53505a8SAchiad Shochat {
2577e53505a8SAchiad Shochat 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
2578e53505a8SAchiad Shochat 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1);
2579e53505a8SAchiad Shochat 	u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type);
2580e53505a8SAchiad Shochat 	u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version);
2581e53505a8SAchiad Shochat 	u32 ret = 0;
2582e53505a8SAchiad Shochat 
2583e53505a8SAchiad Shochat 	if (ll == IB_LINK_LAYER_INFINIBAND)
2584e53505a8SAchiad Shochat 		return RDMA_CORE_PORT_IBA_IB;
2585e53505a8SAchiad Shochat 
2586e53505a8SAchiad Shochat 	if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP))
2587e53505a8SAchiad Shochat 		return 0;
2588e53505a8SAchiad Shochat 
2589e53505a8SAchiad Shochat 	if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV6_CAP))
2590e53505a8SAchiad Shochat 		return 0;
2591e53505a8SAchiad Shochat 
2592e53505a8SAchiad Shochat 	if (roce_version_cap & MLX5_ROCE_VERSION_1_CAP)
2593e53505a8SAchiad Shochat 		ret |= RDMA_CORE_PORT_IBA_ROCE;
2594e53505a8SAchiad Shochat 
2595e53505a8SAchiad Shochat 	if (roce_version_cap & MLX5_ROCE_VERSION_2_CAP)
2596e53505a8SAchiad Shochat 		ret |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
2597e53505a8SAchiad Shochat 
2598e53505a8SAchiad Shochat 	return ret;
2599e53505a8SAchiad Shochat }
2600e53505a8SAchiad Shochat 
26017738613eSIra Weiny static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
26027738613eSIra Weiny 			       struct ib_port_immutable *immutable)
26037738613eSIra Weiny {
26047738613eSIra Weiny 	struct ib_port_attr attr;
26057738613eSIra Weiny 	int err;
26067738613eSIra Weiny 
26077738613eSIra Weiny 	err = mlx5_ib_query_port(ibdev, port_num, &attr);
26087738613eSIra Weiny 	if (err)
26097738613eSIra Weiny 		return err;
26107738613eSIra Weiny 
26117738613eSIra Weiny 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
26127738613eSIra Weiny 	immutable->gid_tbl_len = attr.gid_tbl_len;
2613e53505a8SAchiad Shochat 	immutable->core_cap_flags = get_core_cap_flags(ibdev);
2614337877a4SIra Weiny 	immutable->max_mad_size = IB_MGMT_MAD_SIZE;
26157738613eSIra Weiny 
26167738613eSIra Weiny 	return 0;
26177738613eSIra Weiny }
26187738613eSIra Weiny 
2619c7342823SIra Weiny static void get_dev_fw_str(struct ib_device *ibdev, char *str,
2620c7342823SIra Weiny 			   size_t str_len)
2621c7342823SIra Weiny {
2622c7342823SIra Weiny 	struct mlx5_ib_dev *dev =
2623c7342823SIra Weiny 		container_of(ibdev, struct mlx5_ib_dev, ib_dev);
2624c7342823SIra Weiny 	snprintf(str, str_len, "%d.%d.%04d", fw_rev_maj(dev->mdev),
2625c7342823SIra Weiny 		       fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
2626c7342823SIra Weiny }
2627c7342823SIra Weiny 
2628fc24fc5eSAchiad Shochat static int mlx5_enable_roce(struct mlx5_ib_dev *dev)
2629fc24fc5eSAchiad Shochat {
2630e53505a8SAchiad Shochat 	int err;
2631e53505a8SAchiad Shochat 
2632fc24fc5eSAchiad Shochat 	dev->roce.nb.notifier_call = mlx5_netdev_event;
2633e53505a8SAchiad Shochat 	err = register_netdevice_notifier(&dev->roce.nb);
2634e53505a8SAchiad Shochat 	if (err)
2635e53505a8SAchiad Shochat 		return err;
2636e53505a8SAchiad Shochat 
2637e53505a8SAchiad Shochat 	err = mlx5_nic_vport_enable_roce(dev->mdev);
2638e53505a8SAchiad Shochat 	if (err)
2639e53505a8SAchiad Shochat 		goto err_unregister_netdevice_notifier;
2640e53505a8SAchiad Shochat 
2641e53505a8SAchiad Shochat 	return 0;
2642e53505a8SAchiad Shochat 
2643e53505a8SAchiad Shochat err_unregister_netdevice_notifier:
2644e53505a8SAchiad Shochat 	unregister_netdevice_notifier(&dev->roce.nb);
2645e53505a8SAchiad Shochat 	return err;
2646fc24fc5eSAchiad Shochat }
2647fc24fc5eSAchiad Shochat 
2648fc24fc5eSAchiad Shochat static void mlx5_disable_roce(struct mlx5_ib_dev *dev)
2649fc24fc5eSAchiad Shochat {
2650e53505a8SAchiad Shochat 	mlx5_nic_vport_disable_roce(dev->mdev);
2651fc24fc5eSAchiad Shochat 	unregister_netdevice_notifier(&dev->roce.nb);
2652fc24fc5eSAchiad Shochat }
2653fc24fc5eSAchiad Shochat 
26540837e86aSMark Bloch static void mlx5_ib_dealloc_q_counters(struct mlx5_ib_dev *dev)
26550837e86aSMark Bloch {
26560837e86aSMark Bloch 	unsigned int i;
26570837e86aSMark Bloch 
26580837e86aSMark Bloch 	for (i = 0; i < dev->num_ports; i++)
26590837e86aSMark Bloch 		mlx5_core_dealloc_q_counter(dev->mdev,
26600837e86aSMark Bloch 					    dev->port[i].q_cnt_id);
26610837e86aSMark Bloch }
26620837e86aSMark Bloch 
26630837e86aSMark Bloch static int mlx5_ib_alloc_q_counters(struct mlx5_ib_dev *dev)
26640837e86aSMark Bloch {
26650837e86aSMark Bloch 	int i;
26660837e86aSMark Bloch 	int ret;
26670837e86aSMark Bloch 
26680837e86aSMark Bloch 	for (i = 0; i < dev->num_ports; i++) {
26690837e86aSMark Bloch 		ret = mlx5_core_alloc_q_counter(dev->mdev,
26700837e86aSMark Bloch 						&dev->port[i].q_cnt_id);
26710837e86aSMark Bloch 		if (ret) {
26720837e86aSMark Bloch 			mlx5_ib_warn(dev,
26730837e86aSMark Bloch 				     "couldn't allocate queue counter for port %d, err %d\n",
26740837e86aSMark Bloch 				     i + 1, ret);
26750837e86aSMark Bloch 			goto dealloc_counters;
26760837e86aSMark Bloch 		}
26770837e86aSMark Bloch 	}
26780837e86aSMark Bloch 
26790837e86aSMark Bloch 	return 0;
26800837e86aSMark Bloch 
26810837e86aSMark Bloch dealloc_counters:
26820837e86aSMark Bloch 	while (--i >= 0)
26830837e86aSMark Bloch 		mlx5_core_dealloc_q_counter(dev->mdev,
26840837e86aSMark Bloch 					    dev->port[i].q_cnt_id);
26850837e86aSMark Bloch 
26860837e86aSMark Bloch 	return ret;
26870837e86aSMark Bloch }
26880837e86aSMark Bloch 
268961961500SWei Yongjun static const char * const names[] = {
26900ad17a8fSMark Bloch 	"rx_write_requests",
26910ad17a8fSMark Bloch 	"rx_read_requests",
26920ad17a8fSMark Bloch 	"rx_atomic_requests",
26930ad17a8fSMark Bloch 	"out_of_buffer",
26940ad17a8fSMark Bloch 	"out_of_sequence",
26950ad17a8fSMark Bloch 	"duplicate_request",
26960ad17a8fSMark Bloch 	"rnr_nak_retry_err",
26970ad17a8fSMark Bloch 	"packet_seq_err",
26980ad17a8fSMark Bloch 	"implied_nak_seq_err",
26990ad17a8fSMark Bloch 	"local_ack_timeout_err",
27000ad17a8fSMark Bloch };
27010ad17a8fSMark Bloch 
27020ad17a8fSMark Bloch static const size_t stats_offsets[] = {
27030ad17a8fSMark Bloch 	MLX5_BYTE_OFF(query_q_counter_out, rx_write_requests),
27040ad17a8fSMark Bloch 	MLX5_BYTE_OFF(query_q_counter_out, rx_read_requests),
27050ad17a8fSMark Bloch 	MLX5_BYTE_OFF(query_q_counter_out, rx_atomic_requests),
27060ad17a8fSMark Bloch 	MLX5_BYTE_OFF(query_q_counter_out, out_of_buffer),
27070ad17a8fSMark Bloch 	MLX5_BYTE_OFF(query_q_counter_out, out_of_sequence),
27080ad17a8fSMark Bloch 	MLX5_BYTE_OFF(query_q_counter_out, duplicate_request),
27090ad17a8fSMark Bloch 	MLX5_BYTE_OFF(query_q_counter_out, rnr_nak_retry_err),
27100ad17a8fSMark Bloch 	MLX5_BYTE_OFF(query_q_counter_out, packet_seq_err),
27110ad17a8fSMark Bloch 	MLX5_BYTE_OFF(query_q_counter_out, implied_nak_seq_err),
27120ad17a8fSMark Bloch 	MLX5_BYTE_OFF(query_q_counter_out, local_ack_timeout_err),
27130ad17a8fSMark Bloch };
27140ad17a8fSMark Bloch 
27150ad17a8fSMark Bloch static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
27160ad17a8fSMark Bloch 						    u8 port_num)
27170ad17a8fSMark Bloch {
27180ad17a8fSMark Bloch 	BUILD_BUG_ON(ARRAY_SIZE(names) != ARRAY_SIZE(stats_offsets));
27190ad17a8fSMark Bloch 
27200ad17a8fSMark Bloch 	/* We support only per port stats */
27210ad17a8fSMark Bloch 	if (port_num == 0)
27220ad17a8fSMark Bloch 		return NULL;
27230ad17a8fSMark Bloch 
27240ad17a8fSMark Bloch 	return rdma_alloc_hw_stats_struct(names, ARRAY_SIZE(names),
27250ad17a8fSMark Bloch 					  RDMA_HW_STATS_DEFAULT_LIFESPAN);
27260ad17a8fSMark Bloch }
27270ad17a8fSMark Bloch 
27280ad17a8fSMark Bloch static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
27290ad17a8fSMark Bloch 				struct rdma_hw_stats *stats,
27300ad17a8fSMark Bloch 				u8 port, int index)
27310ad17a8fSMark Bloch {
27320ad17a8fSMark Bloch 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
27330ad17a8fSMark Bloch 	int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out);
27340ad17a8fSMark Bloch 	void *out;
27350ad17a8fSMark Bloch 	__be32 val;
27360ad17a8fSMark Bloch 	int ret;
27370ad17a8fSMark Bloch 	int i;
27380ad17a8fSMark Bloch 
27390ad17a8fSMark Bloch 	if (!port || !stats)
27400ad17a8fSMark Bloch 		return -ENOSYS;
27410ad17a8fSMark Bloch 
27420ad17a8fSMark Bloch 	out = mlx5_vzalloc(outlen);
27430ad17a8fSMark Bloch 	if (!out)
27440ad17a8fSMark Bloch 		return -ENOMEM;
27450ad17a8fSMark Bloch 
27460ad17a8fSMark Bloch 	ret = mlx5_core_query_q_counter(dev->mdev,
27470ad17a8fSMark Bloch 					dev->port[port - 1].q_cnt_id, 0,
27480ad17a8fSMark Bloch 					out, outlen);
27490ad17a8fSMark Bloch 	if (ret)
27500ad17a8fSMark Bloch 		goto free;
27510ad17a8fSMark Bloch 
27520ad17a8fSMark Bloch 	for (i = 0; i < ARRAY_SIZE(names); i++) {
27530ad17a8fSMark Bloch 		val = *(__be32 *)(out + stats_offsets[i]);
27540ad17a8fSMark Bloch 		stats->value[i] = (u64)be32_to_cpu(val);
27550ad17a8fSMark Bloch 	}
27560ad17a8fSMark Bloch free:
27570ad17a8fSMark Bloch 	kvfree(out);
27580ad17a8fSMark Bloch 	return ARRAY_SIZE(names);
27590ad17a8fSMark Bloch }
27600ad17a8fSMark Bloch 
27619603b61dSJack Morgenstein static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
2762e126ba97SEli Cohen {
2763e126ba97SEli Cohen 	struct mlx5_ib_dev *dev;
2764ebd61f68SAchiad Shochat 	enum rdma_link_layer ll;
2765ebd61f68SAchiad Shochat 	int port_type_cap;
2766e126ba97SEli Cohen 	int err;
2767e126ba97SEli Cohen 	int i;
2768e126ba97SEli Cohen 
2769ebd61f68SAchiad Shochat 	port_type_cap = MLX5_CAP_GEN(mdev, port_type);
2770ebd61f68SAchiad Shochat 	ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
2771ebd61f68SAchiad Shochat 
2772e53505a8SAchiad Shochat 	if ((ll == IB_LINK_LAYER_ETHERNET) && !MLX5_CAP_GEN(mdev, roce))
2773647241eaSMajd Dibbiny 		return NULL;
2774647241eaSMajd Dibbiny 
2775e126ba97SEli Cohen 	printk_once(KERN_INFO "%s", mlx5_version);
2776e126ba97SEli Cohen 
2777e126ba97SEli Cohen 	dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
2778e126ba97SEli Cohen 	if (!dev)
27799603b61dSJack Morgenstein 		return NULL;
2780e126ba97SEli Cohen 
27819603b61dSJack Morgenstein 	dev->mdev = mdev;
2782e126ba97SEli Cohen 
27830837e86aSMark Bloch 	dev->port = kcalloc(MLX5_CAP_GEN(mdev, num_ports), sizeof(*dev->port),
27840837e86aSMark Bloch 			    GFP_KERNEL);
27850837e86aSMark Bloch 	if (!dev->port)
27860837e86aSMark Bloch 		goto err_dealloc;
27870837e86aSMark Bloch 
2788fc24fc5eSAchiad Shochat 	rwlock_init(&dev->roce.netdev_lock);
2789e126ba97SEli Cohen 	err = get_port_caps(dev);
2790e126ba97SEli Cohen 	if (err)
27910837e86aSMark Bloch 		goto err_free_port;
2792e126ba97SEli Cohen 
27931b5daf11SMajd Dibbiny 	if (mlx5_use_mad_ifc(dev))
2794e126ba97SEli Cohen 		get_ext_port_caps(dev);
2795e126ba97SEli Cohen 
2796e126ba97SEli Cohen 	MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock);
2797e126ba97SEli Cohen 
2798e126ba97SEli Cohen 	strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX);
2799e126ba97SEli Cohen 	dev->ib_dev.owner		= THIS_MODULE;
2800e126ba97SEli Cohen 	dev->ib_dev.node_type		= RDMA_NODE_IB_CA;
2801c6790aa9SSagi Grimberg 	dev->ib_dev.local_dma_lkey	= 0 /* not supported for now */;
2802938fe83cSSaeed Mahameed 	dev->num_ports		= MLX5_CAP_GEN(mdev, num_ports);
2803e126ba97SEli Cohen 	dev->ib_dev.phys_port_cnt     = dev->num_ports;
2804233d05d2SSaeed Mahameed 	dev->ib_dev.num_comp_vectors    =
2805233d05d2SSaeed Mahameed 		dev->mdev->priv.eq_table.num_comp_vectors;
2806e126ba97SEli Cohen 	dev->ib_dev.dma_device	= &mdev->pdev->dev;
2807e126ba97SEli Cohen 
2808e126ba97SEli Cohen 	dev->ib_dev.uverbs_abi_ver	= MLX5_IB_UVERBS_ABI_VERSION;
2809e126ba97SEli Cohen 	dev->ib_dev.uverbs_cmd_mask	=
2810e126ba97SEli Cohen 		(1ull << IB_USER_VERBS_CMD_GET_CONTEXT)		|
2811e126ba97SEli Cohen 		(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)	|
2812e126ba97SEli Cohen 		(1ull << IB_USER_VERBS_CMD_QUERY_PORT)		|
2813e126ba97SEli Cohen 		(1ull << IB_USER_VERBS_CMD_ALLOC_PD)		|
2814e126ba97SEli Cohen 		(1ull << IB_USER_VERBS_CMD_DEALLOC_PD)		|
2815e126ba97SEli Cohen 		(1ull << IB_USER_VERBS_CMD_REG_MR)		|
281656e11d62SNoa Osherovich 		(1ull << IB_USER_VERBS_CMD_REREG_MR)		|
2817e126ba97SEli Cohen 		(1ull << IB_USER_VERBS_CMD_DEREG_MR)		|
2818e126ba97SEli Cohen 		(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)	|
2819e126ba97SEli Cohen 		(1ull << IB_USER_VERBS_CMD_CREATE_CQ)		|
2820e126ba97SEli Cohen 		(1ull << IB_USER_VERBS_CMD_RESIZE_CQ)		|
2821e126ba97SEli Cohen 		(1ull << IB_USER_VERBS_CMD_DESTROY_CQ)		|
2822e126ba97SEli Cohen 		(1ull << IB_USER_VERBS_CMD_CREATE_QP)		|
2823e126ba97SEli Cohen 		(1ull << IB_USER_VERBS_CMD_MODIFY_QP)		|
2824e126ba97SEli Cohen 		(1ull << IB_USER_VERBS_CMD_QUERY_QP)		|
2825e126ba97SEli Cohen 		(1ull << IB_USER_VERBS_CMD_DESTROY_QP)		|
2826e126ba97SEli Cohen 		(1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)	|
2827e126ba97SEli Cohen 		(1ull << IB_USER_VERBS_CMD_DETACH_MCAST)	|
2828e126ba97SEli Cohen 		(1ull << IB_USER_VERBS_CMD_CREATE_SRQ)		|
2829e126ba97SEli Cohen 		(1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)		|
2830e126ba97SEli Cohen 		(1ull << IB_USER_VERBS_CMD_QUERY_SRQ)		|
2831e126ba97SEli Cohen 		(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)		|
2832e126ba97SEli Cohen 		(1ull << IB_USER_VERBS_CMD_CREATE_XSRQ)		|
2833e126ba97SEli Cohen 		(1ull << IB_USER_VERBS_CMD_OPEN_QP);
28341707cb4aSHaggai Eran 	dev->ib_dev.uverbs_ex_cmd_mask =
2835d4584ddfSMatan Barak 		(1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE)	|
2836d4584ddfSMatan Barak 		(1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ)	|
2837d4584ddfSMatan Barak 		(1ull << IB_USER_VERBS_EX_CMD_CREATE_QP);
2838e126ba97SEli Cohen 
2839e126ba97SEli Cohen 	dev->ib_dev.query_device	= mlx5_ib_query_device;
2840e126ba97SEli Cohen 	dev->ib_dev.query_port		= mlx5_ib_query_port;
2841ebd61f68SAchiad Shochat 	dev->ib_dev.get_link_layer	= mlx5_ib_port_link_layer;
2842fc24fc5eSAchiad Shochat 	if (ll == IB_LINK_LAYER_ETHERNET)
2843fc24fc5eSAchiad Shochat 		dev->ib_dev.get_netdev	= mlx5_ib_get_netdev;
2844e126ba97SEli Cohen 	dev->ib_dev.query_gid		= mlx5_ib_query_gid;
28453cca2606SAchiad Shochat 	dev->ib_dev.add_gid		= mlx5_ib_add_gid;
28463cca2606SAchiad Shochat 	dev->ib_dev.del_gid		= mlx5_ib_del_gid;
2847e126ba97SEli Cohen 	dev->ib_dev.query_pkey		= mlx5_ib_query_pkey;
2848e126ba97SEli Cohen 	dev->ib_dev.modify_device	= mlx5_ib_modify_device;
2849e126ba97SEli Cohen 	dev->ib_dev.modify_port		= mlx5_ib_modify_port;
2850e126ba97SEli Cohen 	dev->ib_dev.alloc_ucontext	= mlx5_ib_alloc_ucontext;
2851e126ba97SEli Cohen 	dev->ib_dev.dealloc_ucontext	= mlx5_ib_dealloc_ucontext;
2852e126ba97SEli Cohen 	dev->ib_dev.mmap		= mlx5_ib_mmap;
2853e126ba97SEli Cohen 	dev->ib_dev.alloc_pd		= mlx5_ib_alloc_pd;
2854e126ba97SEli Cohen 	dev->ib_dev.dealloc_pd		= mlx5_ib_dealloc_pd;
2855e126ba97SEli Cohen 	dev->ib_dev.create_ah		= mlx5_ib_create_ah;
2856e126ba97SEli Cohen 	dev->ib_dev.query_ah		= mlx5_ib_query_ah;
2857e126ba97SEli Cohen 	dev->ib_dev.destroy_ah		= mlx5_ib_destroy_ah;
2858e126ba97SEli Cohen 	dev->ib_dev.create_srq		= mlx5_ib_create_srq;
2859e126ba97SEli Cohen 	dev->ib_dev.modify_srq		= mlx5_ib_modify_srq;
2860e126ba97SEli Cohen 	dev->ib_dev.query_srq		= mlx5_ib_query_srq;
2861e126ba97SEli Cohen 	dev->ib_dev.destroy_srq		= mlx5_ib_destroy_srq;
2862e126ba97SEli Cohen 	dev->ib_dev.post_srq_recv	= mlx5_ib_post_srq_recv;
2863e126ba97SEli Cohen 	dev->ib_dev.create_qp		= mlx5_ib_create_qp;
2864e126ba97SEli Cohen 	dev->ib_dev.modify_qp		= mlx5_ib_modify_qp;
2865e126ba97SEli Cohen 	dev->ib_dev.query_qp		= mlx5_ib_query_qp;
2866e126ba97SEli Cohen 	dev->ib_dev.destroy_qp		= mlx5_ib_destroy_qp;
2867e126ba97SEli Cohen 	dev->ib_dev.post_send		= mlx5_ib_post_send;
2868e126ba97SEli Cohen 	dev->ib_dev.post_recv		= mlx5_ib_post_recv;
2869e126ba97SEli Cohen 	dev->ib_dev.create_cq		= mlx5_ib_create_cq;
2870e126ba97SEli Cohen 	dev->ib_dev.modify_cq		= mlx5_ib_modify_cq;
2871e126ba97SEli Cohen 	dev->ib_dev.resize_cq		= mlx5_ib_resize_cq;
2872e126ba97SEli Cohen 	dev->ib_dev.destroy_cq		= mlx5_ib_destroy_cq;
2873e126ba97SEli Cohen 	dev->ib_dev.poll_cq		= mlx5_ib_poll_cq;
2874e126ba97SEli Cohen 	dev->ib_dev.req_notify_cq	= mlx5_ib_arm_cq;
2875e126ba97SEli Cohen 	dev->ib_dev.get_dma_mr		= mlx5_ib_get_dma_mr;
2876e126ba97SEli Cohen 	dev->ib_dev.reg_user_mr		= mlx5_ib_reg_user_mr;
287756e11d62SNoa Osherovich 	dev->ib_dev.rereg_user_mr	= mlx5_ib_rereg_user_mr;
2878e126ba97SEli Cohen 	dev->ib_dev.dereg_mr		= mlx5_ib_dereg_mr;
2879e126ba97SEli Cohen 	dev->ib_dev.attach_mcast	= mlx5_ib_mcg_attach;
2880e126ba97SEli Cohen 	dev->ib_dev.detach_mcast	= mlx5_ib_mcg_detach;
2881e126ba97SEli Cohen 	dev->ib_dev.process_mad		= mlx5_ib_process_mad;
28829bee178bSSagi Grimberg 	dev->ib_dev.alloc_mr		= mlx5_ib_alloc_mr;
28838a187ee5SSagi Grimberg 	dev->ib_dev.map_mr_sg		= mlx5_ib_map_mr_sg;
2884d5436ba0SSagi Grimberg 	dev->ib_dev.check_mr_status	= mlx5_ib_check_mr_status;
28857738613eSIra Weiny 	dev->ib_dev.get_port_immutable  = mlx5_port_immutable;
2886c7342823SIra Weiny 	dev->ib_dev.get_dev_fw_str      = get_dev_fw_str;
2887eff901d3SEli Cohen 	if (mlx5_core_is_pf(mdev)) {
2888eff901d3SEli Cohen 		dev->ib_dev.get_vf_config	= mlx5_ib_get_vf_config;
2889eff901d3SEli Cohen 		dev->ib_dev.set_vf_link_state	= mlx5_ib_set_vf_link_state;
2890eff901d3SEli Cohen 		dev->ib_dev.get_vf_stats	= mlx5_ib_get_vf_stats;
2891eff901d3SEli Cohen 		dev->ib_dev.set_vf_guid		= mlx5_ib_set_vf_guid;
2892eff901d3SEli Cohen 	}
2893e126ba97SEli Cohen 
28947c2344c3SMaor Gottlieb 	dev->ib_dev.disassociate_ucontext = mlx5_ib_disassociate_ucontext;
28957c2344c3SMaor Gottlieb 
2896938fe83cSSaeed Mahameed 	mlx5_ib_internal_fill_odp_caps(dev);
28978cdd312cSHaggai Eran 
2898d2370e0aSMatan Barak 	if (MLX5_CAP_GEN(mdev, imaicl)) {
2899d2370e0aSMatan Barak 		dev->ib_dev.alloc_mw		= mlx5_ib_alloc_mw;
2900d2370e0aSMatan Barak 		dev->ib_dev.dealloc_mw		= mlx5_ib_dealloc_mw;
2901d2370e0aSMatan Barak 		dev->ib_dev.uverbs_cmd_mask |=
2902d2370e0aSMatan Barak 			(1ull << IB_USER_VERBS_CMD_ALLOC_MW)	|
2903d2370e0aSMatan Barak 			(1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
2904d2370e0aSMatan Barak 	}
2905d2370e0aSMatan Barak 
29060ad17a8fSMark Bloch 	if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt) &&
29070ad17a8fSMark Bloch 	    MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) {
29080ad17a8fSMark Bloch 		dev->ib_dev.get_hw_stats	= mlx5_ib_get_hw_stats;
29090ad17a8fSMark Bloch 		dev->ib_dev.alloc_hw_stats	= mlx5_ib_alloc_hw_stats;
29100ad17a8fSMark Bloch 	}
29110ad17a8fSMark Bloch 
2912938fe83cSSaeed Mahameed 	if (MLX5_CAP_GEN(mdev, xrc)) {
2913e126ba97SEli Cohen 		dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
2914e126ba97SEli Cohen 		dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
2915e126ba97SEli Cohen 		dev->ib_dev.uverbs_cmd_mask |=
2916e126ba97SEli Cohen 			(1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
2917e126ba97SEli Cohen 			(1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
2918e126ba97SEli Cohen 	}
2919e126ba97SEli Cohen 
2920048ccca8SLinus Torvalds 	if (mlx5_ib_port_link_layer(&dev->ib_dev, 1) ==
2921038d2ef8SMaor Gottlieb 	    IB_LINK_LAYER_ETHERNET) {
2922038d2ef8SMaor Gottlieb 		dev->ib_dev.create_flow	= mlx5_ib_create_flow;
2923038d2ef8SMaor Gottlieb 		dev->ib_dev.destroy_flow = mlx5_ib_destroy_flow;
292479b20a6cSYishai Hadas 		dev->ib_dev.create_wq	 = mlx5_ib_create_wq;
292579b20a6cSYishai Hadas 		dev->ib_dev.modify_wq	 = mlx5_ib_modify_wq;
292679b20a6cSYishai Hadas 		dev->ib_dev.destroy_wq	 = mlx5_ib_destroy_wq;
2927c5f90929SYishai Hadas 		dev->ib_dev.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table;
2928c5f90929SYishai Hadas 		dev->ib_dev.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table;
2929038d2ef8SMaor Gottlieb 		dev->ib_dev.uverbs_ex_cmd_mask |=
2930038d2ef8SMaor Gottlieb 			(1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
293179b20a6cSYishai Hadas 			(1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW) |
293279b20a6cSYishai Hadas 			(1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
293379b20a6cSYishai Hadas 			(1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
2934c5f90929SYishai Hadas 			(1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
2935c5f90929SYishai Hadas 			(1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
2936c5f90929SYishai Hadas 			(1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
2937038d2ef8SMaor Gottlieb 	}
2938e126ba97SEli Cohen 	err = init_node_data(dev);
2939e126ba97SEli Cohen 	if (err)
2940233d05d2SSaeed Mahameed 		goto err_dealloc;
2941e126ba97SEli Cohen 
2942038d2ef8SMaor Gottlieb 	mutex_init(&dev->flow_db.lock);
2943e126ba97SEli Cohen 	mutex_init(&dev->cap_mask_mutex);
294489ea94a7SMaor Gottlieb 	INIT_LIST_HEAD(&dev->qp_list);
294589ea94a7SMaor Gottlieb 	spin_lock_init(&dev->reset_flow_resource_lock);
2946e126ba97SEli Cohen 
2947fc24fc5eSAchiad Shochat 	if (ll == IB_LINK_LAYER_ETHERNET) {
2948fc24fc5eSAchiad Shochat 		err = mlx5_enable_roce(dev);
2949e126ba97SEli Cohen 		if (err)
2950233d05d2SSaeed Mahameed 			goto err_dealloc;
2951fc24fc5eSAchiad Shochat 	}
2952fc24fc5eSAchiad Shochat 
2953fc24fc5eSAchiad Shochat 	err = create_dev_resources(&dev->devr);
2954fc24fc5eSAchiad Shochat 	if (err)
2955fc24fc5eSAchiad Shochat 		goto err_disable_roce;
2956e126ba97SEli Cohen 
29576aec21f6SHaggai Eran 	err = mlx5_ib_odp_init_one(dev);
2958281d1a92SWei Yongjun 	if (err)
2959e126ba97SEli Cohen 		goto err_rsrc;
2960e126ba97SEli Cohen 
29610837e86aSMark Bloch 	err = mlx5_ib_alloc_q_counters(dev);
29626aec21f6SHaggai Eran 	if (err)
29636aec21f6SHaggai Eran 		goto err_odp;
29646aec21f6SHaggai Eran 
29650837e86aSMark Bloch 	err = ib_register_device(&dev->ib_dev, NULL);
29660837e86aSMark Bloch 	if (err)
29670837e86aSMark Bloch 		goto err_q_cnt;
29680837e86aSMark Bloch 
2969e126ba97SEli Cohen 	err = create_umr_res(dev);
2970e126ba97SEli Cohen 	if (err)
2971e126ba97SEli Cohen 		goto err_dev;
2972e126ba97SEli Cohen 
2973e126ba97SEli Cohen 	for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) {
2974281d1a92SWei Yongjun 		err = device_create_file(&dev->ib_dev.dev,
2975281d1a92SWei Yongjun 					 mlx5_class_attributes[i]);
2976281d1a92SWei Yongjun 		if (err)
2977e126ba97SEli Cohen 			goto err_umrc;
2978e126ba97SEli Cohen 	}
2979e126ba97SEli Cohen 
2980e126ba97SEli Cohen 	dev->ib_active = true;
2981e126ba97SEli Cohen 
29829603b61dSJack Morgenstein 	return dev;
2983e126ba97SEli Cohen 
2984e126ba97SEli Cohen err_umrc:
2985e126ba97SEli Cohen 	destroy_umrc_res(dev);
2986e126ba97SEli Cohen 
2987e126ba97SEli Cohen err_dev:
2988e126ba97SEli Cohen 	ib_unregister_device(&dev->ib_dev);
2989e126ba97SEli Cohen 
29900837e86aSMark Bloch err_q_cnt:
29910837e86aSMark Bloch 	mlx5_ib_dealloc_q_counters(dev);
29920837e86aSMark Bloch 
29936aec21f6SHaggai Eran err_odp:
29946aec21f6SHaggai Eran 	mlx5_ib_odp_remove_one(dev);
29956aec21f6SHaggai Eran 
2996e126ba97SEli Cohen err_rsrc:
2997e126ba97SEli Cohen 	destroy_dev_resources(&dev->devr);
2998e126ba97SEli Cohen 
2999fc24fc5eSAchiad Shochat err_disable_roce:
3000fc24fc5eSAchiad Shochat 	if (ll == IB_LINK_LAYER_ETHERNET)
3001fc24fc5eSAchiad Shochat 		mlx5_disable_roce(dev);
3002fc24fc5eSAchiad Shochat 
30030837e86aSMark Bloch err_free_port:
30040837e86aSMark Bloch 	kfree(dev->port);
30050837e86aSMark Bloch 
30069603b61dSJack Morgenstein err_dealloc:
3007e126ba97SEli Cohen 	ib_dealloc_device((struct ib_device *)dev);
3008e126ba97SEli Cohen 
30099603b61dSJack Morgenstein 	return NULL;
3010e126ba97SEli Cohen }
3011e126ba97SEli Cohen 
30129603b61dSJack Morgenstein static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
3013e126ba97SEli Cohen {
30149603b61dSJack Morgenstein 	struct mlx5_ib_dev *dev = context;
3015fc24fc5eSAchiad Shochat 	enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1);
30166aec21f6SHaggai Eran 
3017e126ba97SEli Cohen 	ib_unregister_device(&dev->ib_dev);
30180837e86aSMark Bloch 	mlx5_ib_dealloc_q_counters(dev);
3019eefd56e5SEli Cohen 	destroy_umrc_res(dev);
30206aec21f6SHaggai Eran 	mlx5_ib_odp_remove_one(dev);
3021e126ba97SEli Cohen 	destroy_dev_resources(&dev->devr);
3022fc24fc5eSAchiad Shochat 	if (ll == IB_LINK_LAYER_ETHERNET)
3023fc24fc5eSAchiad Shochat 		mlx5_disable_roce(dev);
30240837e86aSMark Bloch 	kfree(dev->port);
3025e126ba97SEli Cohen 	ib_dealloc_device(&dev->ib_dev);
3026e126ba97SEli Cohen }
3027e126ba97SEli Cohen 
30289603b61dSJack Morgenstein static struct mlx5_interface mlx5_ib_interface = {
30299603b61dSJack Morgenstein 	.add            = mlx5_ib_add,
30309603b61dSJack Morgenstein 	.remove         = mlx5_ib_remove,
30319603b61dSJack Morgenstein 	.event          = mlx5_ib_event,
303264613d94SSaeed Mahameed 	.protocol	= MLX5_INTERFACE_PROTOCOL_IB,
3033e126ba97SEli Cohen };
3034e126ba97SEli Cohen 
3035e126ba97SEli Cohen static int __init mlx5_ib_init(void)
3036e126ba97SEli Cohen {
30376aec21f6SHaggai Eran 	int err;
30386aec21f6SHaggai Eran 
30399603b61dSJack Morgenstein 	if (deprecated_prof_sel != 2)
30409603b61dSJack Morgenstein 		pr_warn("prof_sel is deprecated for mlx5_ib, set it for mlx5_core\n");
30419603b61dSJack Morgenstein 
30426aec21f6SHaggai Eran 	err = mlx5_ib_odp_init();
30436aec21f6SHaggai Eran 	if (err)
30446aec21f6SHaggai Eran 		return err;
30456aec21f6SHaggai Eran 
30466aec21f6SHaggai Eran 	err = mlx5_register_interface(&mlx5_ib_interface);
30476aec21f6SHaggai Eran 	if (err)
30486aec21f6SHaggai Eran 		goto clean_odp;
30496aec21f6SHaggai Eran 
30506aec21f6SHaggai Eran 	return err;
30516aec21f6SHaggai Eran 
30526aec21f6SHaggai Eran clean_odp:
30536aec21f6SHaggai Eran 	mlx5_ib_odp_cleanup();
30546aec21f6SHaggai Eran 	return err;
3055e126ba97SEli Cohen }
3056e126ba97SEli Cohen 
3057e126ba97SEli Cohen static void __exit mlx5_ib_cleanup(void)
3058e126ba97SEli Cohen {
30599603b61dSJack Morgenstein 	mlx5_unregister_interface(&mlx5_ib_interface);
30606aec21f6SHaggai Eran 	mlx5_ib_odp_cleanup();
3061e126ba97SEli Cohen }
3062e126ba97SEli Cohen 
3063e126ba97SEli Cohen module_init(mlx5_ib_init);
3064e126ba97SEli Cohen module_exit(mlx5_ib_cleanup);
3065