main.c (3df6e0234aebc55888069997239fe2847d4cf152) main.c (f6a8a19bb11b46d60250ddc4e3e1ba6aa166f488)
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:

--- 1557 unchanged lines hidden (view full) ---

1566
1567 bfregi = &context->bfregi;
1568 for (i = 0; i < bfregi->num_sys_pages; i++)
1569 if (i < bfregi->num_static_sys_pages ||
1570 bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX)
1571 mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
1572}
1573
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:

--- 1557 unchanged lines hidden (view full) ---

1566
1567 bfregi = &context->bfregi;
1568 for (i = 0; i < bfregi->num_sys_pages; i++)
1569 if (i < bfregi->num_static_sys_pages ||
1570 bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX)
1571 mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
1572}
1573
1574int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
1574static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn)
1575{
1575{
1576 int err = 0;
1577
1578 mutex_lock(&dev->lb.mutex);
1579 if (td)
1580 dev->lb.user_td++;
1581 if (qp)
1582 dev->lb.qps++;
1583
1584 if (dev->lb.user_td == 2 ||
1585 dev->lb.qps == 1) {
1586 if (!dev->lb.enabled) {
1587 err = mlx5_nic_vport_update_local_lb(dev->mdev, true);
1588 dev->lb.enabled = true;
1589 }
1590 }
1591
1592 mutex_unlock(&dev->lb.mutex);
1593
1594 return err;
1595}
1596
1597void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
1598{
1599 mutex_lock(&dev->lb.mutex);
1600 if (td)
1601 dev->lb.user_td--;
1602 if (qp)
1603 dev->lb.qps--;
1604
1605 if (dev->lb.user_td == 1 &&
1606 dev->lb.qps == 0) {
1607 if (dev->lb.enabled) {
1608 mlx5_nic_vport_update_local_lb(dev->mdev, false);
1609 dev->lb.enabled = false;
1610 }
1611 }
1612
1613 mutex_unlock(&dev->lb.mutex);
1614}
1615
1616static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn,
1617 u16 uid)
1618{
1619 int err;
1620
1621 if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1622 return 0;
1623
1576 int err;
1577
1578 if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1579 return 0;
1580
1624 err = mlx5_cmd_alloc_transport_domain(dev->mdev, tdn, uid);
1581 err = mlx5_core_alloc_transport_domain(dev->mdev, tdn);
1625 if (err)
1626 return err;
1627
1628 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1629 (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1630 !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
1631 return err;
1632
1582 if (err)
1583 return err;
1584
1585 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1586 (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1587 !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
1588 return err;
1589
1633 return mlx5_ib_enable_lb(dev, true, false);
1590 mutex_lock(&dev->lb_mutex);
1591 dev->user_td++;
1592
1593 if (dev->user_td == 2)
1594 err = mlx5_nic_vport_update_local_lb(dev->mdev, true);
1595
1596 mutex_unlock(&dev->lb_mutex);
1597 return err;
1634}
1635
1598}
1599
1636static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn,
1637 u16 uid)
1600static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn)
1638{
1639 if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1640 return;
1641
1601{
1602 if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1603 return;
1604
1642 mlx5_cmd_dealloc_transport_domain(dev->mdev, tdn, uid);
1605 mlx5_core_dealloc_transport_domain(dev->mdev, tdn);
1643
1644 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1645 (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1646 !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
1647 return;
1648
1606
1607 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1608 (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1609 !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
1610 return;
1611
1649 mlx5_ib_disable_lb(dev, true, false);
1612 mutex_lock(&dev->lb_mutex);
1613 dev->user_td--;
1614
1615 if (dev->user_td < 2)
1616 mlx5_nic_vport_update_local_lb(dev->mdev, false);
1617
1618 mutex_unlock(&dev->lb_mutex);
1650}
1651
1652static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
1653 struct ib_udata *udata)
1654{
1655 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1656 struct mlx5_ib_alloc_ucontext_req_v2 req = {};
1657 struct mlx5_ib_alloc_ucontext_resp resp = {};

--- 95 unchanged lines hidden (view full) ---

1753 err = allocate_uars(dev, context);
1754 if (err)
1755 goto out_sys_pages;
1756
1757#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1758 context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range;
1759#endif
1760
1619}
1620
1621static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
1622 struct ib_udata *udata)
1623{
1624 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1625 struct mlx5_ib_alloc_ucontext_req_v2 req = {};
1626 struct mlx5_ib_alloc_ucontext_resp resp = {};

--- 95 unchanged lines hidden (view full) ---

1722 err = allocate_uars(dev, context);
1723 if (err)
1724 goto out_sys_pages;
1725
1726#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1727 context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range;
1728#endif
1729
1730 err = mlx5_ib_alloc_transport_domain(dev, &context->tdn);
1731 if (err)
1732 goto out_uars;
1733
1761 if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
1734 if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
1762 err = mlx5_ib_devx_create(dev);
1763 if (err < 0)
1764 goto out_uars;
1765 context->devx_uid = err;
1735 /* Block DEVX on Infiniband as of SELinux */
1736 if (mlx5_ib_port_link_layer(ibdev, 1) != IB_LINK_LAYER_ETHERNET) {
1737 err = -EPERM;
1738 goto out_td;
1739 }
1740
1741 err = mlx5_ib_devx_create(dev, context);
1742 if (err)
1743 goto out_td;
1766 }
1767
1744 }
1745
1768 err = mlx5_ib_alloc_transport_domain(dev, &context->tdn,
1769 context->devx_uid);
1770 if (err)
1771 goto out_devx;
1772
1773 if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
1774 err = mlx5_cmd_dump_fill_mkey(dev->mdev, &dump_fill_mkey);
1775 if (err)
1776 goto out_mdev;
1777 }
1778
1746 if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
1747 err = mlx5_cmd_dump_fill_mkey(dev->mdev, &dump_fill_mkey);
1748 if (err)
1749 goto out_mdev;
1750 }
1751
1752 INIT_LIST_HEAD(&context->vma_private_list);
1753 mutex_init(&context->vma_private_list_mutex);
1779 INIT_LIST_HEAD(&context->db_page_list);
1780 mutex_init(&context->db_page_mutex);
1781
1782 resp.tot_bfregs = req.total_num_bfregs;
1783 resp.num_ports = dev->num_ports;
1784
1785 if (field_avail(typeof(resp), cqe_version, udata->outlen))
1786 resp.response_length += sizeof(resp.cqe_version);

--- 59 unchanged lines hidden (view full) ---

1846 goto out_mdev;
1847
1848 bfregi->ver = ver;
1849 bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs;
1850 context->cqe_version = resp.cqe_version;
1851 context->lib_caps = req.lib_caps;
1852 print_lib_caps(dev, context->lib_caps);
1853
1754 INIT_LIST_HEAD(&context->db_page_list);
1755 mutex_init(&context->db_page_mutex);
1756
1757 resp.tot_bfregs = req.total_num_bfregs;
1758 resp.num_ports = dev->num_ports;
1759
1760 if (field_avail(typeof(resp), cqe_version, udata->outlen))
1761 resp.response_length += sizeof(resp.cqe_version);

--- 59 unchanged lines hidden (view full) ---

1821 goto out_mdev;
1822
1823 bfregi->ver = ver;
1824 bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs;
1825 context->cqe_version = resp.cqe_version;
1826 context->lib_caps = req.lib_caps;
1827 print_lib_caps(dev, context->lib_caps);
1828
1854 if (mlx5_lag_is_active(dev->mdev)) {
1855 u8 port = mlx5_core_native_port_num(dev->mdev);
1856
1857 atomic_set(&context->tx_port_affinity,
1858 atomic_add_return(
1859 1, &dev->roce[port].tx_port_affinity));
1860 }
1861
1862 return &context->ibucontext;
1863
1864out_mdev:
1829 return &context->ibucontext;
1830
1831out_mdev:
1865 mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
1866out_devx:
1867 if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX)
1832 if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX)
1868 mlx5_ib_devx_destroy(dev, context->devx_uid);
1833 mlx5_ib_devx_destroy(dev, context);
1834out_td:
1835 mlx5_ib_dealloc_transport_domain(dev, context->tdn);
1869
1870out_uars:
1871 deallocate_uars(dev, context);
1872
1873out_sys_pages:
1874 kfree(bfregi->sys_pages);
1875
1876out_count:

--- 6 unchanged lines hidden (view full) ---

1883}
1884
1885static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
1886{
1887 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
1888 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
1889 struct mlx5_bfreg_info *bfregi;
1890
1836
1837out_uars:
1838 deallocate_uars(dev, context);
1839
1840out_sys_pages:
1841 kfree(bfregi->sys_pages);
1842
1843out_count:

--- 6 unchanged lines hidden (view full) ---

1850}
1851
1852static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
1853{
1854 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
1855 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
1856 struct mlx5_bfreg_info *bfregi;
1857
1891#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1892 /* All umem's must be destroyed before destroying the ucontext. */
1893 mutex_lock(&ibcontext->per_mm_list_lock);
1894 WARN_ON(!list_empty(&ibcontext->per_mm_list));
1895 mutex_unlock(&ibcontext->per_mm_list_lock);
1896#endif
1858 if (context->devx_uid)
1859 mlx5_ib_devx_destroy(dev, context);
1897
1898 bfregi = &context->bfregi;
1860
1861 bfregi = &context->bfregi;
1899 mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
1862 mlx5_ib_dealloc_transport_domain(dev, context->tdn);
1900
1863
1901 if (context->devx_uid)
1902 mlx5_ib_devx_destroy(dev, context->devx_uid);
1903
1904 deallocate_uars(dev, context);
1905 kfree(bfregi->sys_pages);
1906 kfree(bfregi->count);
1907 kfree(context);
1908
1909 return 0;
1910}
1911

--- 23 unchanged lines hidden (view full) ---

1935}
1936
1937/* Index resides in an extra byte to enable larger values than 255 */
1938static int get_extended_index(unsigned long offset)
1939{
1940 return get_arg(offset) | ((offset >> 16) & 0xff) << 8;
1941}
1942
1864 deallocate_uars(dev, context);
1865 kfree(bfregi->sys_pages);
1866 kfree(bfregi->count);
1867 kfree(context);
1868
1869 return 0;
1870}
1871

--- 23 unchanged lines hidden (view full) ---

1895}
1896
1897/* Index resides in an extra byte to enable larger values than 255 */
1898static int get_extended_index(unsigned long offset)
1899{
1900 return get_arg(offset) | ((offset >> 16) & 0xff) << 8;
1901}
1902
1903static void mlx5_ib_vma_open(struct vm_area_struct *area)
1904{
1905 /* vma_open is called when a new VMA is created on top of our VMA. This
1906 * is done through either mremap flow or split_vma (usually due to
1907 * mlock, madvise, munmap, etc.) We do not support a clone of the VMA,
1908 * as this VMA is strongly hardware related. Therefore we set the
1909 * vm_ops of the newly created/cloned VMA to NULL, to prevent it from
1910 * calling us again and trying to do incorrect actions. We assume that
1911 * the original VMA size is exactly a single page, and therefore all
1912 * "splitting" operation will not happen to it.
1913 */
1914 area->vm_ops = NULL;
1915}
1943
1916
1917static void mlx5_ib_vma_close(struct vm_area_struct *area)
1918{
1919 struct mlx5_ib_vma_private_data *mlx5_ib_vma_priv_data;
1920
1921 /* It's guaranteed that all VMAs opened on a FD are closed before the
1922 * file itself is closed, therefore no sync is needed with the regular
1923 * closing flow. (e.g. mlx5 ib_dealloc_ucontext)
1924 * However need a sync with accessing the vma as part of
1925 * mlx5_ib_disassociate_ucontext.
1926 * The close operation is usually called under mm->mmap_sem except when
1927 * process is exiting.
1928 * The exiting case is handled explicitly as part of
1929 * mlx5_ib_disassociate_ucontext.
1930 */
1931 mlx5_ib_vma_priv_data = (struct mlx5_ib_vma_private_data *)area->vm_private_data;
1932
1933 /* setting the vma context pointer to null in the mlx5_ib driver's
1934 * private data, to protect a race condition in
1935 * mlx5_ib_disassociate_ucontext().
1936 */
1937 mlx5_ib_vma_priv_data->vma = NULL;
1938 mutex_lock(mlx5_ib_vma_priv_data->vma_private_list_mutex);
1939 list_del(&mlx5_ib_vma_priv_data->list);
1940 mutex_unlock(mlx5_ib_vma_priv_data->vma_private_list_mutex);
1941 kfree(mlx5_ib_vma_priv_data);
1942}
1943
1944static const struct vm_operations_struct mlx5_ib_vm_ops = {
1945 .open = mlx5_ib_vma_open,
1946 .close = mlx5_ib_vma_close
1947};
1948
1949static int mlx5_ib_set_vma_data(struct vm_area_struct *vma,
1950 struct mlx5_ib_ucontext *ctx)
1951{
1952 struct mlx5_ib_vma_private_data *vma_prv;
1953 struct list_head *vma_head = &ctx->vma_private_list;
1954
1955 vma_prv = kzalloc(sizeof(*vma_prv), GFP_KERNEL);
1956 if (!vma_prv)
1957 return -ENOMEM;
1958
1959 vma_prv->vma = vma;
1960 vma_prv->vma_private_list_mutex = &ctx->vma_private_list_mutex;
1961 vma->vm_private_data = vma_prv;
1962 vma->vm_ops = &mlx5_ib_vm_ops;
1963
1964 mutex_lock(&ctx->vma_private_list_mutex);
1965 list_add(&vma_prv->list, vma_head);
1966 mutex_unlock(&ctx->vma_private_list_mutex);
1967
1968 return 0;
1969}
1970
1944static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
1945{
1971static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
1972{
1973 struct vm_area_struct *vma;
1974 struct mlx5_ib_vma_private_data *vma_private, *n;
1975 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
1976
1977 mutex_lock(&context->vma_private_list_mutex);
1978 list_for_each_entry_safe(vma_private, n, &context->vma_private_list,
1979 list) {
1980 vma = vma_private->vma;
1981 zap_vma_ptes(vma, vma->vm_start, PAGE_SIZE);
1982 /* context going to be destroyed, should
1983 * not access ops any more.
1984 */
1985 vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
1986 vma->vm_ops = NULL;
1987 list_del(&vma_private->list);
1988 kfree(vma_private);
1989 }
1990 mutex_unlock(&context->vma_private_list_mutex);
1946}
1947
1948static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
1949{
1950 switch (cmd) {
1951 case MLX5_IB_MMAP_WC_PAGE:
1952 return "WC";
1953 case MLX5_IB_MMAP_REGULAR_PAGE:

--- 6 unchanged lines hidden (view full) ---

1960 return NULL;
1961 }
1962}
1963
1964static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
1965 struct vm_area_struct *vma,
1966 struct mlx5_ib_ucontext *context)
1967{
1991}
1992
1993static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
1994{
1995 switch (cmd) {
1996 case MLX5_IB_MMAP_WC_PAGE:
1997 return "WC";
1998 case MLX5_IB_MMAP_REGULAR_PAGE:

--- 6 unchanged lines hidden (view full) ---

2005 return NULL;
2006 }
2007}
2008
2009static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
2010 struct vm_area_struct *vma,
2011 struct mlx5_ib_ucontext *context)
2012{
2013 phys_addr_t pfn;
2014 int err;
2015
1968 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1969 return -EINVAL;
1970
1971 if (get_index(vma->vm_pgoff) != MLX5_IB_CLOCK_INFO_V1)
1972 return -EOPNOTSUPP;
1973
1974 if (vma->vm_flags & VM_WRITE)
1975 return -EPERM;
1976
1977 if (!dev->mdev->clock_info_page)
1978 return -EOPNOTSUPP;
1979
2016 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
2017 return -EINVAL;
2018
2019 if (get_index(vma->vm_pgoff) != MLX5_IB_CLOCK_INFO_V1)
2020 return -EOPNOTSUPP;
2021
2022 if (vma->vm_flags & VM_WRITE)
2023 return -EPERM;
2024
2025 if (!dev->mdev->clock_info_page)
2026 return -EOPNOTSUPP;
2027
1980 return rdma_user_mmap_page(&context->ibucontext, vma,
1981 dev->mdev->clock_info_page, PAGE_SIZE);
2028 pfn = page_to_pfn(dev->mdev->clock_info_page);
2029 err = remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE,
2030 vma->vm_page_prot);
2031 if (err)
2032 return err;
2033
2034 return mlx5_ib_set_vma_data(vma, context);
1982}
1983
1984static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
1985 struct vm_area_struct *vma,
1986 struct mlx5_ib_ucontext *context)
1987{
1988 struct mlx5_bfreg_info *bfregi = &context->bfregi;
1989 int err;

--- 73 unchanged lines hidden (view full) ---

2063 }
2064 } else {
2065 uar_index = bfregi->sys_pages[idx];
2066 }
2067
2068 pfn = uar_index2pfn(dev, uar_index);
2069 mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
2070
2035}
2036
2037static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
2038 struct vm_area_struct *vma,
2039 struct mlx5_ib_ucontext *context)
2040{
2041 struct mlx5_bfreg_info *bfregi = &context->bfregi;
2042 int err;

--- 73 unchanged lines hidden (view full) ---

2116 }
2117 } else {
2118 uar_index = bfregi->sys_pages[idx];
2119 }
2120
2121 pfn = uar_index2pfn(dev, uar_index);
2122 mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
2123
2071 err = rdma_user_mmap_io(&context->ibucontext, vma, pfn, PAGE_SIZE,
2072 prot);
2124 vma->vm_page_prot = prot;
2125 err = io_remap_pfn_range(vma, vma->vm_start, pfn,
2126 PAGE_SIZE, vma->vm_page_prot);
2073 if (err) {
2074 mlx5_ib_err(dev,
2127 if (err) {
2128 mlx5_ib_err(dev,
2075 "rdma_user_mmap_io failed with error=%d, mmap_cmd=%s\n",
2129 "io_remap_pfn_range failed with error=%d, mmap_cmd=%s\n",
2076 err, mmap_cmd2str(cmd));
2130 err, mmap_cmd2str(cmd));
2131 err = -EAGAIN;
2077 goto err;
2078 }
2079
2132 goto err;
2133 }
2134
2135 err = mlx5_ib_set_vma_data(vma, context);
2136 if (err)
2137 goto err;
2138
2080 if (dyn_uar)
2081 bfregi->sys_pages[idx] = uar_index;
2082 return 0;
2083
2084err:
2085 if (!dyn_uar)
2086 return err;
2087

--- 8 unchanged lines hidden (view full) ---

2096static int dm_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
2097{
2098 struct mlx5_ib_ucontext *mctx = to_mucontext(context);
2099 struct mlx5_ib_dev *dev = to_mdev(context->device);
2100 u16 page_idx = get_extended_index(vma->vm_pgoff);
2101 size_t map_size = vma->vm_end - vma->vm_start;
2102 u32 npages = map_size >> PAGE_SHIFT;
2103 phys_addr_t pfn;
2139 if (dyn_uar)
2140 bfregi->sys_pages[idx] = uar_index;
2141 return 0;
2142
2143err:
2144 if (!dyn_uar)
2145 return err;
2146

--- 8 unchanged lines hidden (view full) ---

2155static int dm_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
2156{
2157 struct mlx5_ib_ucontext *mctx = to_mucontext(context);
2158 struct mlx5_ib_dev *dev = to_mdev(context->device);
2159 u16 page_idx = get_extended_index(vma->vm_pgoff);
2160 size_t map_size = vma->vm_end - vma->vm_start;
2161 u32 npages = map_size >> PAGE_SHIFT;
2162 phys_addr_t pfn;
2163 pgprot_t prot;
2104
2105 if (find_next_zero_bit(mctx->dm_pages, page_idx + npages, page_idx) !=
2106 page_idx + npages)
2107 return -EINVAL;
2108
2109 pfn = ((pci_resource_start(dev->mdev->pdev, 0) +
2110 MLX5_CAP64_DEV_MEM(dev->mdev, memic_bar_start_addr)) >>
2111 PAGE_SHIFT) +
2112 page_idx;
2164
2165 if (find_next_zero_bit(mctx->dm_pages, page_idx + npages, page_idx) !=
2166 page_idx + npages)
2167 return -EINVAL;
2168
2169 pfn = ((pci_resource_start(dev->mdev->pdev, 0) +
2170 MLX5_CAP64_DEV_MEM(dev->mdev, memic_bar_start_addr)) >>
2171 PAGE_SHIFT) +
2172 page_idx;
2113 return rdma_user_mmap_io(context, vma, pfn, map_size,
2114 pgprot_writecombine(vma->vm_page_prot));
2173 prot = pgprot_writecombine(vma->vm_page_prot);
2174 vma->vm_page_prot = prot;
2175
2176 if (io_remap_pfn_range(vma, vma->vm_start, pfn, map_size,
2177 vma->vm_page_prot))
2178 return -EAGAIN;
2179
2180 return mlx5_ib_set_vma_data(vma, mctx);
2115}
2116
2117static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
2118{
2119 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
2120 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
2121 unsigned long command;
2122 phys_addr_t pfn;

--- 124 unchanged lines hidden (view full) ---

2247
2248static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
2249 struct ib_ucontext *context,
2250 struct ib_udata *udata)
2251{
2252 struct mlx5_ib_alloc_pd_resp resp;
2253 struct mlx5_ib_pd *pd;
2254 int err;
2181}
2182
2183static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
2184{
2185 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
2186 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
2187 unsigned long command;
2188 phys_addr_t pfn;

--- 124 unchanged lines hidden (view full) ---

2313
2314static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
2315 struct ib_ucontext *context,
2316 struct ib_udata *udata)
2317{
2318 struct mlx5_ib_alloc_pd_resp resp;
2319 struct mlx5_ib_pd *pd;
2320 int err;
2255 u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
2256 u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};
2257 u16 uid = 0;
2258
2259 pd = kmalloc(sizeof(*pd), GFP_KERNEL);
2260 if (!pd)
2261 return ERR_PTR(-ENOMEM);
2262
2321
2322 pd = kmalloc(sizeof(*pd), GFP_KERNEL);
2323 if (!pd)
2324 return ERR_PTR(-ENOMEM);
2325
2263 uid = context ? to_mucontext(context)->devx_uid : 0;
2264 MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
2265 MLX5_SET(alloc_pd_in, in, uid, uid);
2266 err = mlx5_cmd_exec(to_mdev(ibdev)->mdev, in, sizeof(in),
2267 out, sizeof(out));
2326 err = mlx5_core_alloc_pd(to_mdev(ibdev)->mdev, &pd->pdn);
2268 if (err) {
2269 kfree(pd);
2270 return ERR_PTR(err);
2271 }
2272
2327 if (err) {
2328 kfree(pd);
2329 return ERR_PTR(err);
2330 }
2331
2273 pd->pdn = MLX5_GET(alloc_pd_out, out, pd);
2274 pd->uid = uid;
2275 if (context) {
2276 resp.pdn = pd->pdn;
2277 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
2332 if (context) {
2333 resp.pdn = pd->pdn;
2334 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
2278 mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid);
2335 mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn);
2279 kfree(pd);
2280 return ERR_PTR(-EFAULT);
2281 }
2282 }
2283
2284 return &pd->ibpd;
2285}
2286
2287static int mlx5_ib_dealloc_pd(struct ib_pd *pd)
2288{
2289 struct mlx5_ib_dev *mdev = to_mdev(pd->device);
2290 struct mlx5_ib_pd *mpd = to_mpd(pd);
2291
2336 kfree(pd);
2337 return ERR_PTR(-EFAULT);
2338 }
2339 }
2340
2341 return &pd->ibpd;
2342}
2343
2344static int mlx5_ib_dealloc_pd(struct ib_pd *pd)
2345{
2346 struct mlx5_ib_dev *mdev = to_mdev(pd->device);
2347 struct mlx5_ib_pd *mpd = to_mpd(pd);
2348
2292 mlx5_cmd_dealloc_pd(mdev->mdev, mpd->pdn, mpd->uid);
2349 mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn);
2293 kfree(mpd);
2294
2295 return 0;
2296}
2297
2298enum {
2299 MATCH_CRITERIA_ENABLE_OUTER_BIT,
2300 MATCH_CRITERIA_ENABLE_MISC_BIT,

--- 89 unchanged lines hidden (view full) ---

2390/* Field is the last supported field */
2391#define FIELDS_NOT_SUPPORTED(filter, field)\
2392 memchr_inv((void *)&filter.field +\
2393 sizeof(filter.field), 0,\
2394 sizeof(filter) -\
2395 offsetof(typeof(filter), field) -\
2396 sizeof(filter.field))
2397
2350 kfree(mpd);
2351
2352 return 0;
2353}
2354
2355enum {
2356 MATCH_CRITERIA_ENABLE_OUTER_BIT,
2357 MATCH_CRITERIA_ENABLE_MISC_BIT,

--- 89 unchanged lines hidden (view full) ---

2447/* Field is the last supported field */
2448#define FIELDS_NOT_SUPPORTED(filter, field)\
2449 memchr_inv((void *)&filter.field +\
2450 sizeof(filter.field), 0,\
2451 sizeof(filter) -\
2452 offsetof(typeof(filter), field) -\
2453 sizeof(filter.field))
2454
2398int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
2399 bool is_egress,
2400 struct mlx5_flow_act *action)
2455static int parse_flow_flow_action(const union ib_flow_spec *ib_spec,
2456 const struct ib_flow_attr *flow_attr,
2457 struct mlx5_flow_act *action)
2401{
2458{
2459 struct mlx5_ib_flow_action *maction = to_mflow_act(ib_spec->action.act);
2402
2403 switch (maction->ib_action.type) {
2404 case IB_FLOW_ACTION_ESP:
2460
2461 switch (maction->ib_action.type) {
2462 case IB_FLOW_ACTION_ESP:
2405 if (action->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
2406 MLX5_FLOW_CONTEXT_ACTION_DECRYPT))
2407 return -EINVAL;
2408 /* Currently only AES_GCM keymat is supported by the driver */
2409 action->esp_id = (uintptr_t)maction->esp_aes_gcm.ctx;
2463 /* Currently only AES_GCM keymat is supported by the driver */
2464 action->esp_id = (uintptr_t)maction->esp_aes_gcm.ctx;
2410 action->action |= is_egress ?
2465 action->action |= flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS ?
2411 MLX5_FLOW_CONTEXT_ACTION_ENCRYPT :
2412 MLX5_FLOW_CONTEXT_ACTION_DECRYPT;
2413 return 0;
2466 MLX5_FLOW_CONTEXT_ACTION_ENCRYPT :
2467 MLX5_FLOW_CONTEXT_ACTION_DECRYPT;
2468 return 0;
2414 case IB_FLOW_ACTION_UNSPECIFIED:
2415 if (maction->flow_action_raw.sub_type ==
2416 MLX5_IB_FLOW_ACTION_MODIFY_HEADER) {
2417 if (action->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
2418 return -EINVAL;
2419 action->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
2420 action->modify_id = maction->flow_action_raw.action_id;
2421 return 0;
2422 }
2423 if (maction->flow_action_raw.sub_type ==
2424 MLX5_IB_FLOW_ACTION_DECAP) {
2425 if (action->action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
2426 return -EINVAL;
2427 action->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
2428 return 0;
2429 }
2430 if (maction->flow_action_raw.sub_type ==
2431 MLX5_IB_FLOW_ACTION_PACKET_REFORMAT) {
2432 if (action->action &
2433 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
2434 return -EINVAL;
2435 action->action |=
2436 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
2437 action->reformat_id =
2438 maction->flow_action_raw.action_id;
2439 return 0;
2440 }
2441 /* fall through */
2442 default:
2443 return -EOPNOTSUPP;
2444 }
2445}
2446
2447static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
2448 u32 *match_v, const union ib_flow_spec *ib_spec,
2449 const struct ib_flow_attr *flow_attr,

--- 320 unchanged lines hidden (view full) ---

2770 break;
2771 case IB_FLOW_SPEC_ACTION_DROP:
2772 if (FIELDS_NOT_SUPPORTED(ib_spec->drop,
2773 LAST_DROP_FIELD))
2774 return -EOPNOTSUPP;
2775 action->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
2776 break;
2777 case IB_FLOW_SPEC_ACTION_HANDLE:
2469 default:
2470 return -EOPNOTSUPP;
2471 }
2472}
2473
2474static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
2475 u32 *match_v, const union ib_flow_spec *ib_spec,
2476 const struct ib_flow_attr *flow_attr,

--- 320 unchanged lines hidden (view full) ---

2797 break;
2798 case IB_FLOW_SPEC_ACTION_DROP:
2799 if (FIELDS_NOT_SUPPORTED(ib_spec->drop,
2800 LAST_DROP_FIELD))
2801 return -EOPNOTSUPP;
2802 action->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
2803 break;
2804 case IB_FLOW_SPEC_ACTION_HANDLE:
2778 ret = parse_flow_flow_action(to_mflow_act(ib_spec->action.act),
2779 flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS, action);
2805 ret = parse_flow_flow_action(ib_spec, flow_attr, action);
2780 if (ret)
2781 return ret;
2782 break;
2783 case IB_FLOW_SPEC_ACTION_COUNT:
2784 if (FIELDS_NOT_SUPPORTED(ib_spec->flow_count,
2785 LAST_COUNTERS_FIELD))
2786 return -EOPNOTSUPP;
2787

--- 64 unchanged lines hidden (view full) ---

2852 bool is_ipsec = mlx5_fs_is_ipsec_flow(match_c);
2853 bool is_drop = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_DROP;
2854
2855 /*
2856 * Currently only crypto is supported in egress, when regular egress
2857 * rules would be supported, always return VALID_SPEC_NA.
2858 */
2859 if (!is_crypto)
2806 if (ret)
2807 return ret;
2808 break;
2809 case IB_FLOW_SPEC_ACTION_COUNT:
2810 if (FIELDS_NOT_SUPPORTED(ib_spec->flow_count,
2811 LAST_COUNTERS_FIELD))
2812 return -EOPNOTSUPP;
2813

--- 64 unchanged lines hidden (view full) ---

2878 bool is_ipsec = mlx5_fs_is_ipsec_flow(match_c);
2879 bool is_drop = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_DROP;
2880
2881 /*
2882 * Currently only crypto is supported in egress, when regular egress
2883 * rules would be supported, always return VALID_SPEC_NA.
2884 */
2885 if (!is_crypto)
2860 return VALID_SPEC_NA;
2886 return egress ? VALID_SPEC_INVALID : VALID_SPEC_NA;
2861
2862 return is_crypto && is_ipsec &&
2863 (!egress || (!is_drop && !flow_act->has_flow_tag)) ?
2864 VALID_SPEC_VALID : VALID_SPEC_INVALID;
2865}
2866
2867static bool is_valid_spec(struct mlx5_core_dev *mdev,
2868 const struct mlx5_flow_spec *spec,

--- 126 unchanged lines hidden (view full) ---

2995};
2996
2997#define MLX5_FS_MAX_TYPES 6
2998#define MLX5_FS_MAX_ENTRIES BIT(16)
2999
3000static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_flow_namespace *ns,
3001 struct mlx5_ib_flow_prio *prio,
3002 int priority,
2887
2888 return is_crypto && is_ipsec &&
2889 (!egress || (!is_drop && !flow_act->has_flow_tag)) ?
2890 VALID_SPEC_VALID : VALID_SPEC_INVALID;
2891}
2892
2893static bool is_valid_spec(struct mlx5_core_dev *mdev,
2894 const struct mlx5_flow_spec *spec,

--- 126 unchanged lines hidden (view full) ---

3021};
3022
3023#define MLX5_FS_MAX_TYPES 6
3024#define MLX5_FS_MAX_ENTRIES BIT(16)
3025
3026static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_flow_namespace *ns,
3027 struct mlx5_ib_flow_prio *prio,
3028 int priority,
3003 int num_entries, int num_groups,
3004 u32 flags)
3029 int num_entries, int num_groups)
3005{
3006 struct mlx5_flow_table *ft;
3007
3008 ft = mlx5_create_auto_grouped_flow_table(ns, priority,
3009 num_entries,
3010 num_groups,
3030{
3031 struct mlx5_flow_table *ft;
3032
3033 ft = mlx5_create_auto_grouped_flow_table(ns, priority,
3034 num_entries,
3035 num_groups,
3011 0, flags);
3036 0, 0);
3012 if (IS_ERR(ft))
3013 return ERR_CAST(ft);
3014
3015 prio->flow_table = ft;
3016 prio->refcount = 0;
3017 return prio;
3018}
3019
3020static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
3021 struct ib_flow_attr *flow_attr,
3022 enum flow_table_type ft_type)
3023{
3024 bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP;
3025 struct mlx5_flow_namespace *ns = NULL;
3026 struct mlx5_ib_flow_prio *prio;
3027 struct mlx5_flow_table *ft;
3028 int max_table_size;
3029 int num_entries;
3030 int num_groups;
3037 if (IS_ERR(ft))
3038 return ERR_CAST(ft);
3039
3040 prio->flow_table = ft;
3041 prio->refcount = 0;
3042 return prio;
3043}
3044
3045static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
3046 struct ib_flow_attr *flow_attr,
3047 enum flow_table_type ft_type)
3048{
3049 bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP;
3050 struct mlx5_flow_namespace *ns = NULL;
3051 struct mlx5_ib_flow_prio *prio;
3052 struct mlx5_flow_table *ft;
3053 int max_table_size;
3054 int num_entries;
3055 int num_groups;
3031 u32 flags = 0;
3032 int priority;
3033
3034 max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3035 log_max_ft_size));
3036 if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
3056 int priority;
3057
3058 max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3059 log_max_ft_size));
3060 if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
3037 enum mlx5_flow_namespace_type fn_type;
3038
3039 if (flow_is_multicast_only(flow_attr) &&
3040 !dont_trap)
3061 if (ft_type == MLX5_IB_FT_TX)
3062 priority = 0;
3063 else if (flow_is_multicast_only(flow_attr) &&
3064 !dont_trap)
3041 priority = MLX5_IB_FLOW_MCAST_PRIO;
3042 else
3043 priority = ib_prio_to_core_prio(flow_attr->priority,
3044 dont_trap);
3065 priority = MLX5_IB_FLOW_MCAST_PRIO;
3066 else
3067 priority = ib_prio_to_core_prio(flow_attr->priority,
3068 dont_trap);
3045 if (ft_type == MLX5_IB_FT_RX) {
3046 fn_type = MLX5_FLOW_NAMESPACE_BYPASS;
3047 prio = &dev->flow_db->prios[priority];
3048 if (!dev->rep &&
3049 MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap))
3050 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
3051 if (!dev->rep &&
3052 MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3053 reformat_l3_tunnel_to_l2))
3054 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
3055 } else {
3056 max_table_size =
3057 BIT(MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev,
3058 log_max_ft_size));
3059 fn_type = MLX5_FLOW_NAMESPACE_EGRESS;
3060 prio = &dev->flow_db->egress_prios[priority];
3061 if (!dev->rep &&
3062 MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat))
3063 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
3064 }
3065 ns = mlx5_get_flow_namespace(dev->mdev, fn_type);
3069 ns = mlx5_get_flow_namespace(dev->mdev,
3070 ft_type == MLX5_IB_FT_TX ?
3071 MLX5_FLOW_NAMESPACE_EGRESS :
3072 MLX5_FLOW_NAMESPACE_BYPASS);
3066 num_entries = MLX5_FS_MAX_ENTRIES;
3067 num_groups = MLX5_FS_MAX_TYPES;
3073 num_entries = MLX5_FS_MAX_ENTRIES;
3074 num_groups = MLX5_FS_MAX_TYPES;
3075 prio = &dev->flow_db->prios[priority];
3068 } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
3069 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
3070 ns = mlx5_get_flow_namespace(dev->mdev,
3071 MLX5_FLOW_NAMESPACE_LEFTOVERS);
3072 build_leftovers_ft_param(&priority,
3073 &num_entries,
3074 &num_groups);
3075 prio = &dev->flow_db->prios[MLX5_IB_FLOW_LEFTOVERS_PRIO];

--- 15 unchanged lines hidden (view full) ---

3091 if (!ns)
3092 return ERR_PTR(-ENOTSUPP);
3093
3094 if (num_entries > max_table_size)
3095 return ERR_PTR(-ENOMEM);
3096
3097 ft = prio->flow_table;
3098 if (!ft)
3076 } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
3077 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
3078 ns = mlx5_get_flow_namespace(dev->mdev,
3079 MLX5_FLOW_NAMESPACE_LEFTOVERS);
3080 build_leftovers_ft_param(&priority,
3081 &num_entries,
3082 &num_groups);
3083 prio = &dev->flow_db->prios[MLX5_IB_FLOW_LEFTOVERS_PRIO];

--- 15 unchanged lines hidden (view full) ---

3099 if (!ns)
3100 return ERR_PTR(-ENOTSUPP);
3101
3102 if (num_entries > max_table_size)
3103 return ERR_PTR(-ENOMEM);
3104
3105 ft = prio->flow_table;
3106 if (!ft)
3099 return _get_prio(ns, prio, priority, num_entries, num_groups,
3100 flags);
3107 return _get_prio(ns, prio, priority, num_entries, num_groups);
3101
3102 return prio;
3103}
3104
3105static void set_underlay_qp(struct mlx5_ib_dev *dev,
3106 struct mlx5_flow_spec *spec,
3107 u32 underlay_qpn)
3108{

--- 150 unchanged lines hidden (view full) ---

3259 u32 prev_type = 0;
3260 int err = 0;
3261 int dest_num = 0;
3262 bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS;
3263
3264 if (!is_valid_attr(dev->mdev, flow_attr))
3265 return ERR_PTR(-EINVAL);
3266
3108
3109 return prio;
3110}
3111
3112static void set_underlay_qp(struct mlx5_ib_dev *dev,
3113 struct mlx5_flow_spec *spec,
3114 u32 underlay_qpn)
3115{

--- 150 unchanged lines hidden (view full) ---

3266 u32 prev_type = 0;
3267 int err = 0;
3268 int dest_num = 0;
3269 bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS;
3270
3271 if (!is_valid_attr(dev->mdev, flow_attr))
3272 return ERR_PTR(-EINVAL);
3273
3267 if (dev->rep && is_egress)
3268 return ERR_PTR(-EINVAL);
3269
3270 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
3271 handler = kzalloc(sizeof(*handler), GFP_KERNEL);
3272 if (!handler || !spec) {
3273 err = -ENOMEM;
3274 goto free;
3275 }
3276
3277 INIT_LIST_HEAD(&handler->list);

--- 371 unchanged lines hidden (view full) ---

3649unlock:
3650 mutex_unlock(&dev->flow_db->lock);
3651 kfree(dst);
3652free_ucmd:
3653 kfree(ucmd);
3654 return ERR_PTR(err);
3655}
3656
3274 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
3275 handler = kzalloc(sizeof(*handler), GFP_KERNEL);
3276 if (!handler || !spec) {
3277 err = -ENOMEM;
3278 goto free;
3279 }
3280
3281 INIT_LIST_HEAD(&handler->list);

--- 371 unchanged lines hidden (view full) ---

3653unlock:
3654 mutex_unlock(&dev->flow_db->lock);
3655 kfree(dst);
3656free_ucmd:
3657 kfree(ucmd);
3658 return ERR_PTR(err);
3659}
3660
3657static struct mlx5_ib_flow_prio *
3658_get_flow_table(struct mlx5_ib_dev *dev,
3659 struct mlx5_ib_flow_matcher *fs_matcher,
3660 bool mcast)
3661static struct mlx5_ib_flow_prio *_get_flow_table(struct mlx5_ib_dev *dev,
3662 int priority, bool mcast)
3661{
3663{
3664 int max_table_size;
3662 struct mlx5_flow_namespace *ns = NULL;
3663 struct mlx5_ib_flow_prio *prio;
3665 struct mlx5_flow_namespace *ns = NULL;
3666 struct mlx5_ib_flow_prio *prio;
3664 int max_table_size;
3665 u32 flags = 0;
3666 int priority;
3667
3667
3668 if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS) {
3669 max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3670 log_max_ft_size));
3671 if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap))
3672 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
3673 if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3674 reformat_l3_tunnel_to_l2))
3675 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
3676 } else { /* Can only be MLX5_FLOW_NAMESPACE_EGRESS */
3677 max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev,
3678 log_max_ft_size));
3679 if (MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat))
3680 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
3681 }
3682
3668 max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3669 log_max_ft_size));
3683 if (max_table_size < MLX5_FS_MAX_ENTRIES)
3684 return ERR_PTR(-ENOMEM);
3685
3686 if (mcast)
3687 priority = MLX5_IB_FLOW_MCAST_PRIO;
3688 else
3670 if (max_table_size < MLX5_FS_MAX_ENTRIES)
3671 return ERR_PTR(-ENOMEM);
3672
3673 if (mcast)
3674 priority = MLX5_IB_FLOW_MCAST_PRIO;
3675 else
3689 priority = ib_prio_to_core_prio(fs_matcher->priority, false);
3676 priority = ib_prio_to_core_prio(priority, false);
3690
3677
3691 ns = mlx5_get_flow_namespace(dev->mdev, fs_matcher->ns_type);
3678 ns = mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS);
3692 if (!ns)
3693 return ERR_PTR(-ENOTSUPP);
3694
3679 if (!ns)
3680 return ERR_PTR(-ENOTSUPP);
3681
3695 if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS)
3696 prio = &dev->flow_db->prios[priority];
3697 else
3698 prio = &dev->flow_db->egress_prios[priority];
3682 prio = &dev->flow_db->prios[priority];
3699
3700 if (prio->flow_table)
3701 return prio;
3702
3703 return _get_prio(ns, prio, priority, MLX5_FS_MAX_ENTRIES,
3683
3684 if (prio->flow_table)
3685 return prio;
3686
3687 return _get_prio(ns, prio, priority, MLX5_FS_MAX_ENTRIES,
3704 MLX5_FS_MAX_TYPES, flags);
3688 MLX5_FS_MAX_TYPES);
3705}
3706
3707static struct mlx5_ib_flow_handler *
3708_create_raw_flow_rule(struct mlx5_ib_dev *dev,
3709 struct mlx5_ib_flow_prio *ft_prio,
3710 struct mlx5_flow_destination *dst,
3711 struct mlx5_ib_flow_matcher *fs_matcher,
3689}
3690
3691static struct mlx5_ib_flow_handler *
3692_create_raw_flow_rule(struct mlx5_ib_dev *dev,
3693 struct mlx5_ib_flow_prio *ft_prio,
3694 struct mlx5_flow_destination *dst,
3695 struct mlx5_ib_flow_matcher *fs_matcher,
3712 struct mlx5_flow_act *flow_act,
3713 void *cmd_in, int inlen)
3714{
3715 struct mlx5_ib_flow_handler *handler;
3696 void *cmd_in, int inlen)
3697{
3698 struct mlx5_ib_flow_handler *handler;
3699 struct mlx5_flow_act flow_act = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG};
3716 struct mlx5_flow_spec *spec;
3717 struct mlx5_flow_table *ft = ft_prio->flow_table;
3718 int err = 0;
3719
3720 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
3721 handler = kzalloc(sizeof(*handler), GFP_KERNEL);
3722 if (!handler || !spec) {
3723 err = -ENOMEM;
3724 goto free;
3725 }
3726
3727 INIT_LIST_HEAD(&handler->list);
3728
3729 memcpy(spec->match_value, cmd_in, inlen);
3730 memcpy(spec->match_criteria, fs_matcher->matcher_mask.match_params,
3731 fs_matcher->mask_len);
3732 spec->match_criteria_enable = fs_matcher->match_criteria_enable;
3733
3700 struct mlx5_flow_spec *spec;
3701 struct mlx5_flow_table *ft = ft_prio->flow_table;
3702 int err = 0;
3703
3704 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
3705 handler = kzalloc(sizeof(*handler), GFP_KERNEL);
3706 if (!handler || !spec) {
3707 err = -ENOMEM;
3708 goto free;
3709 }
3710
3711 INIT_LIST_HEAD(&handler->list);
3712
3713 memcpy(spec->match_value, cmd_in, inlen);
3714 memcpy(spec->match_criteria, fs_matcher->matcher_mask.match_params,
3715 fs_matcher->mask_len);
3716 spec->match_criteria_enable = fs_matcher->match_criteria_enable;
3717
3718 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3734 handler->rule = mlx5_add_flow_rules(ft, spec,
3719 handler->rule = mlx5_add_flow_rules(ft, spec,
3735 flow_act, dst, 1);
3720 &flow_act, dst, 1);
3736
3737 if (IS_ERR(handler->rule)) {
3738 err = PTR_ERR(handler->rule);
3739 goto free;
3740 }
3741
3742 ft_prio->refcount++;
3743 handler->prio = ft_prio;

--- 45 unchanged lines hidden (view full) ---

3789 return true;
3790
3791 return false;
3792}
3793
3794struct mlx5_ib_flow_handler *
3795mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev,
3796 struct mlx5_ib_flow_matcher *fs_matcher,
3721
3722 if (IS_ERR(handler->rule)) {
3723 err = PTR_ERR(handler->rule);
3724 goto free;
3725 }
3726
3727 ft_prio->refcount++;
3728 handler->prio = ft_prio;

--- 45 unchanged lines hidden (view full) ---

3774 return true;
3775
3776 return false;
3777}
3778
3779struct mlx5_ib_flow_handler *
3780mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev,
3781 struct mlx5_ib_flow_matcher *fs_matcher,
3797 struct mlx5_flow_act *flow_act,
3798 void *cmd_in, int inlen, int dest_id,
3799 int dest_type)
3800{
3801 struct mlx5_flow_destination *dst;
3802 struct mlx5_ib_flow_prio *ft_prio;
3782 void *cmd_in, int inlen, int dest_id,
3783 int dest_type)
3784{
3785 struct mlx5_flow_destination *dst;
3786 struct mlx5_ib_flow_prio *ft_prio;
3787 int priority = fs_matcher->priority;
3803 struct mlx5_ib_flow_handler *handler;
3804 bool mcast;
3805 int err;
3806
3807 if (fs_matcher->flow_type != MLX5_IB_FLOW_TYPE_NORMAL)
3808 return ERR_PTR(-EOPNOTSUPP);
3809
3810 if (fs_matcher->priority > MLX5_IB_FLOW_LAST_PRIO)
3811 return ERR_PTR(-ENOMEM);
3812
3813 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
3814 if (!dst)
3815 return ERR_PTR(-ENOMEM);
3816
3817 mcast = raw_fs_is_multicast(fs_matcher, cmd_in);
3818 mutex_lock(&dev->flow_db->lock);
3819
3788 struct mlx5_ib_flow_handler *handler;
3789 bool mcast;
3790 int err;
3791
3792 if (fs_matcher->flow_type != MLX5_IB_FLOW_TYPE_NORMAL)
3793 return ERR_PTR(-EOPNOTSUPP);
3794
3795 if (fs_matcher->priority > MLX5_IB_FLOW_LAST_PRIO)
3796 return ERR_PTR(-ENOMEM);
3797
3798 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
3799 if (!dst)
3800 return ERR_PTR(-ENOMEM);
3801
3802 mcast = raw_fs_is_multicast(fs_matcher, cmd_in);
3803 mutex_lock(&dev->flow_db->lock);
3804
3820 ft_prio = _get_flow_table(dev, fs_matcher, mcast);
3805 ft_prio = _get_flow_table(dev, priority, mcast);
3821 if (IS_ERR(ft_prio)) {
3822 err = PTR_ERR(ft_prio);
3823 goto unlock;
3824 }
3825
3826 if (dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR) {
3827 dst->type = dest_type;
3828 dst->tir_num = dest_id;
3806 if (IS_ERR(ft_prio)) {
3807 err = PTR_ERR(ft_prio);
3808 goto unlock;
3809 }
3810
3811 if (dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR) {
3812 dst->type = dest_type;
3813 dst->tir_num = dest_id;
3829 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3830 } else if (dest_type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) {
3814 } else {
3831 dst->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM;
3832 dst->ft_num = dest_id;
3815 dst->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM;
3816 dst->ft_num = dest_id;
3833 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3834 } else {
3835 dst->type = MLX5_FLOW_DESTINATION_TYPE_PORT;
3836 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
3837 }
3838
3817 }
3818
3839 handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher, flow_act,
3840 cmd_in, inlen);
3819 handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher, cmd_in,
3820 inlen);
3841
3842 if (IS_ERR(handler)) {
3843 err = PTR_ERR(handler);
3844 goto destroy_ft;
3845 }
3846
3847 mutex_unlock(&dev->flow_db->lock);
3848 atomic_inc(&fs_matcher->usecnt);

--- 161 unchanged lines hidden (view full) ---

4010 switch (action->type) {
4011 case IB_FLOW_ACTION_ESP:
4012 /*
4013 * We only support aes_gcm by now, so we implicitly know this is
4014 * the underline crypto.
4015 */
4016 mlx5_accel_esp_destroy_xfrm(maction->esp_aes_gcm.ctx);
4017 break;
3821
3822 if (IS_ERR(handler)) {
3823 err = PTR_ERR(handler);
3824 goto destroy_ft;
3825 }
3826
3827 mutex_unlock(&dev->flow_db->lock);
3828 atomic_inc(&fs_matcher->usecnt);

--- 161 unchanged lines hidden (view full) ---

3990 switch (action->type) {
3991 case IB_FLOW_ACTION_ESP:
3992 /*
3993 * We only support aes_gcm by now, so we implicitly know this is
3994 * the underline crypto.
3995 */
3996 mlx5_accel_esp_destroy_xfrm(maction->esp_aes_gcm.ctx);
3997 break;
4018 case IB_FLOW_ACTION_UNSPECIFIED:
4019 mlx5_ib_destroy_flow_action_raw(maction);
4020 break;
4021 default:
4022 WARN_ON(true);
4023 break;
4024 }
4025
4026 kfree(maction);
4027 return 0;
4028}
4029
4030static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
4031{
4032 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
4033 struct mlx5_ib_qp *mqp = to_mqp(ibqp);
4034 int err;
3998 default:
3999 WARN_ON(true);
4000 break;
4001 }
4002
4003 kfree(maction);
4004 return 0;
4005}
4006
4007static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
4008{
4009 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
4010 struct mlx5_ib_qp *mqp = to_mqp(ibqp);
4011 int err;
4035 u16 uid;
4036
4012
4037 uid = ibqp->pd ?
4038 to_mpd(ibqp->pd)->uid : 0;
4039
4040 if (mqp->flags & MLX5_IB_QP_UNDERLAY) {
4041 mlx5_ib_dbg(dev, "Attaching a multi cast group to underlay QP is not supported\n");
4042 return -EOPNOTSUPP;
4043 }
4044
4013 if (mqp->flags & MLX5_IB_QP_UNDERLAY) {
4014 mlx5_ib_dbg(dev, "Attaching a multi cast group to underlay QP is not supported\n");
4015 return -EOPNOTSUPP;
4016 }
4017
4045 err = mlx5_cmd_attach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
4018 err = mlx5_core_attach_mcg(dev->mdev, gid, ibqp->qp_num);
4046 if (err)
4047 mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
4048 ibqp->qp_num, gid->raw);
4049
4050 return err;
4051}
4052
4053static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
4054{
4055 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
4056 int err;
4019 if (err)
4020 mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
4021 ibqp->qp_num, gid->raw);
4022
4023 return err;
4024}
4025
4026static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
4027{
4028 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
4029 int err;
4057 u16 uid;
4058
4030
4059 uid = ibqp->pd ?
4060 to_mpd(ibqp->pd)->uid : 0;
4061 err = mlx5_cmd_detach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
4031 err = mlx5_core_detach_mcg(dev->mdev, gid, ibqp->qp_num);
4062 if (err)
4063 mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
4064 ibqp->qp_num, gid->raw);
4065
4066 return err;
4067}
4068
4069static int init_node_data(struct mlx5_ib_dev *dev)

--- 1118 unchanged lines hidden (view full) ---

5188 if (ret)
5189 return ret;
5190 }
5191
5192done:
5193 return num_counters;
5194}
5195
4032 if (err)
4033 mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
4034 ibqp->qp_num, gid->raw);
4035
4036 return err;
4037}
4038
4039static int init_node_data(struct mlx5_ib_dev *dev)

--- 1118 unchanged lines hidden (view full) ---

5158 if (ret)
5159 return ret;
5160 }
5161
5162done:
5163 return num_counters;
5164}
5165
5196static struct net_device*
5197mlx5_ib_alloc_rdma_netdev(struct ib_device *hca,
5198 u8 port_num,
5199 enum rdma_netdev_t type,
5200 const char *name,
5201 unsigned char name_assign_type,
5202 void (*setup)(struct net_device *))
5166static int mlx5_ib_rn_get_params(struct ib_device *device, u8 port_num,
5167 enum rdma_netdev_t type,
5168 struct rdma_netdev_alloc_params *params)
5203{
5169{
5204 struct net_device *netdev;
5205
5206 if (type != RDMA_NETDEV_IPOIB)
5170 if (type != RDMA_NETDEV_IPOIB)
5207 return ERR_PTR(-EOPNOTSUPP);
5171 return -EOPNOTSUPP;
5208
5172
5209 netdev = mlx5_rdma_netdev_alloc(to_mdev(hca)->mdev, hca,
5210 name, setup);
5211 return netdev;
5173 return mlx5_rdma_rn_get_params(to_mdev(device)->mdev, device, params);
5212}
5213
5214static void delay_drop_debugfs_cleanup(struct mlx5_ib_dev *dev)
5215{
5216 if (!dev->delay_drop.dbg)
5217 return;
5218 debugfs_remove_recursive(dev->delay_drop.dbg->dir_debugfs);
5219 kfree(dev->delay_drop.dbg);

--- 441 unchanged lines hidden (view full) ---

5661 cleanup_srcu_struct(&dev->mr_srcu);
5662#endif
5663 kfree(dev->port);
5664}
5665
5666int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
5667{
5668 struct mlx5_core_dev *mdev = dev->mdev;
5174}
5175
5176static void delay_drop_debugfs_cleanup(struct mlx5_ib_dev *dev)
5177{
5178 if (!dev->delay_drop.dbg)
5179 return;
5180 debugfs_remove_recursive(dev->delay_drop.dbg->dir_debugfs);
5181 kfree(dev->delay_drop.dbg);

--- 441 unchanged lines hidden (view full) ---

5623 cleanup_srcu_struct(&dev->mr_srcu);
5624#endif
5625 kfree(dev->port);
5626}
5627
5628int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
5629{
5630 struct mlx5_core_dev *mdev = dev->mdev;
5631 const char *name;
5669 int err;
5670 int i;
5671
5672 dev->port = kcalloc(dev->num_ports, sizeof(*dev->port),
5673 GFP_KERNEL);
5674 if (!dev->port)
5675 return -ENOMEM;
5676

--- 16 unchanged lines hidden (view full) ---

5693 err = get_port_caps(dev, mlx5_core_native_port_num(mdev));
5694 }
5695 if (err)
5696 goto err_mp;
5697
5698 if (mlx5_use_mad_ifc(dev))
5699 get_ext_port_caps(dev);
5700
5632 int err;
5633 int i;
5634
5635 dev->port = kcalloc(dev->num_ports, sizeof(*dev->port),
5636 GFP_KERNEL);
5637 if (!dev->port)
5638 return -ENOMEM;
5639

--- 16 unchanged lines hidden (view full) ---

5656 err = get_port_caps(dev, mlx5_core_native_port_num(mdev));
5657 }
5658 if (err)
5659 goto err_mp;
5660
5661 if (mlx5_use_mad_ifc(dev))
5662 get_ext_port_caps(dev);
5663
5664 if (!mlx5_lag_is_active(mdev))
5665 name = "mlx5_%d";
5666 else
5667 name = "mlx5_bond_%d";
5668
5669 strlcpy(dev->ib_dev.name, name, IB_DEVICE_NAME_MAX);
5701 dev->ib_dev.owner = THIS_MODULE;
5702 dev->ib_dev.node_type = RDMA_NODE_IB_CA;
5703 dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
5704 dev->ib_dev.phys_port_cnt = dev->num_ports;
5705 dev->ib_dev.num_comp_vectors =
5706 dev->mdev->priv.eq_table.num_comp_vectors;
5707 dev->ib_dev.dev.parent = &mdev->pdev->dev;
5708

--- 133 unchanged lines hidden (view full) ---

5842 dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach;
5843 dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach;
5844 dev->ib_dev.process_mad = mlx5_ib_process_mad;
5845 dev->ib_dev.alloc_mr = mlx5_ib_alloc_mr;
5846 dev->ib_dev.map_mr_sg = mlx5_ib_map_mr_sg;
5847 dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
5848 dev->ib_dev.get_dev_fw_str = get_dev_fw_str;
5849 dev->ib_dev.get_vector_affinity = mlx5_ib_get_vector_affinity;
5670 dev->ib_dev.owner = THIS_MODULE;
5671 dev->ib_dev.node_type = RDMA_NODE_IB_CA;
5672 dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
5673 dev->ib_dev.phys_port_cnt = dev->num_ports;
5674 dev->ib_dev.num_comp_vectors =
5675 dev->mdev->priv.eq_table.num_comp_vectors;
5676 dev->ib_dev.dev.parent = &mdev->pdev->dev;
5677

--- 133 unchanged lines hidden (view full) ---

5811 dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach;
5812 dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach;
5813 dev->ib_dev.process_mad = mlx5_ib_process_mad;
5814 dev->ib_dev.alloc_mr = mlx5_ib_alloc_mr;
5815 dev->ib_dev.map_mr_sg = mlx5_ib_map_mr_sg;
5816 dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
5817 dev->ib_dev.get_dev_fw_str = get_dev_fw_str;
5818 dev->ib_dev.get_vector_affinity = mlx5_ib_get_vector_affinity;
5850 if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads))
5851 dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev;
5819 if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
5820 IS_ENABLED(CONFIG_MLX5_CORE_IPOIB))
5821 dev->ib_dev.rdma_netdev_get_params = mlx5_ib_rn_get_params;
5852
5853 if (mlx5_core_is_pf(mdev)) {
5854 dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config;
5855 dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state;
5856 dev->ib_dev.get_vf_stats = mlx5_ib_get_vf_stats;
5857 dev->ib_dev.set_vf_guid = mlx5_ib_set_vf_guid;
5858 }
5859

--- 38 unchanged lines hidden (view full) ---

5898
5899 err = init_node_data(dev);
5900 if (err)
5901 return err;
5902
5903 if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
5904 (MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) ||
5905 MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
5822
5823 if (mlx5_core_is_pf(mdev)) {
5824 dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config;
5825 dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state;
5826 dev->ib_dev.get_vf_stats = mlx5_ib_get_vf_stats;
5827 dev->ib_dev.set_vf_guid = mlx5_ib_set_vf_guid;
5828 }
5829

--- 38 unchanged lines hidden (view full) ---

5868
5869 err = init_node_data(dev);
5870 if (err)
5871 return err;
5872
5873 if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
5874 (MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) ||
5875 MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
5906 mutex_init(&dev->lb.mutex);
5876 mutex_init(&dev->lb_mutex);
5907
5908 return 0;
5909}
5910
5911static int mlx5_ib_stage_non_default_cb(struct mlx5_ib_dev *dev)
5912{
5913 dev->ib_dev.get_port_immutable = mlx5_port_immutable;
5914 dev->ib_dev.query_port = mlx5_ib_query_port;

--- 190 unchanged lines hidden (view full) ---

6105
6106static int mlx5_ib_stage_populate_specs(struct mlx5_ib_dev *dev)
6107{
6108 return populate_specs_root(dev);
6109}
6110
6111int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
6112{
5877
5878 return 0;
5879}
5880
5881static int mlx5_ib_stage_non_default_cb(struct mlx5_ib_dev *dev)
5882{
5883 dev->ib_dev.get_port_immutable = mlx5_port_immutable;
5884 dev->ib_dev.query_port = mlx5_ib_query_port;

--- 190 unchanged lines hidden (view full) ---

6075
6076static int mlx5_ib_stage_populate_specs(struct mlx5_ib_dev *dev)
6077{
6078 return populate_specs_root(dev);
6079}
6080
6081int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
6082{
6113 const char *name;
6114
6115 if (!mlx5_lag_is_active(dev->mdev))
6116 name = "mlx5_%d";
6117 else
6118 name = "mlx5_bond_%d";
6119 return ib_register_device(&dev->ib_dev, name, NULL);
6083 return ib_register_device(&dev->ib_dev, NULL);
6120}
6121
6122void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
6123{
6124 destroy_umrc_res(dev);
6125}
6126
6127void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)

--- 51 unchanged lines hidden (view full) ---

6179{
6180 /* Number of stages to cleanup */
6181 while (stage) {
6182 stage--;
6183 if (profile->stage[stage].cleanup)
6184 profile->stage[stage].cleanup(dev);
6185 }
6186
6084}
6085
6086void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
6087{
6088 destroy_umrc_res(dev);
6089}
6090
6091void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)

--- 51 unchanged lines hidden (view full) ---

6143{
6144 /* Number of stages to cleanup */
6145 while (stage) {
6146 stage--;
6147 if (profile->stage[stage].cleanup)
6148 profile->stage[stage].cleanup(dev);
6149 }
6150
6187 if (dev->devx_whitelist_uid)
6188 mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid);
6189 ib_dealloc_device((struct ib_device *)dev);
6190}
6191
6192void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
6193 const struct mlx5_ib_profile *profile)
6194{
6195 int err;
6196 int i;
6151 ib_dealloc_device((struct ib_device *)dev);
6152}
6153
6154void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
6155 const struct mlx5_ib_profile *profile)
6156{
6157 int err;
6158 int i;
6197 int uid;
6198
6159
6160 printk_once(KERN_INFO "%s", mlx5_version);
6161
6199 for (i = 0; i < MLX5_IB_STAGE_MAX; i++) {
6200 if (profile->stage[i].init) {
6201 err = profile->stage[i].init(dev);
6202 if (err)
6203 goto err_out;
6204 }
6205 }
6206
6162 for (i = 0; i < MLX5_IB_STAGE_MAX; i++) {
6163 if (profile->stage[i].init) {
6164 err = profile->stage[i].init(dev);
6165 if (err)
6166 goto err_out;
6167 }
6168 }
6169
6207 uid = mlx5_ib_devx_create(dev);
6208 if (uid > 0)
6209 dev->devx_whitelist_uid = uid;
6210
6211 dev->profile = profile;
6212 dev->ib_active = true;
6213
6214 return dev;
6215
6216err_out:
6217 __mlx5_ib_remove(dev, profile, i);
6218

--- 250 unchanged lines hidden ---
6170 dev->profile = profile;
6171 dev->ib_active = true;
6172
6173 return dev;
6174
6175err_out:
6176 __mlx5_ib_remove(dev, profile, i);
6177

--- 250 unchanged lines hidden ---