main.c (f27a0d50a4bc2861b472c2e3740d63a29d1ac460) main.c (a560f1d9af4be84ee91d1a47382cacf620eb4a79)
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:

--- 1557 unchanged lines hidden (view full) ---

1566
1567 bfregi = &context->bfregi;
1568 for (i = 0; i < bfregi->num_sys_pages; i++)
1569 if (i < bfregi->num_static_sys_pages ||
1570 bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX)
1571 mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
1572}
1573
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:

--- 1557 unchanged lines hidden (view full) ---

1566
1567 bfregi = &context->bfregi;
1568 for (i = 0; i < bfregi->num_sys_pages; i++)
1569 if (i < bfregi->num_static_sys_pages ||
1570 bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX)
1571 mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
1572}
1573
1574static int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev)
1575{
1576 int err = 0;
1577
1578 mutex_lock(&dev->lb.mutex);
1579 dev->lb.user_td++;
1580
1581 if (dev->lb.user_td == 2)
1582 err = mlx5_nic_vport_update_local_lb(dev->mdev, true);
1583
1584 mutex_unlock(&dev->lb.mutex);
1585
1586 return err;
1587}
1588
1589static void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev)
1590{
1591 mutex_lock(&dev->lb.mutex);
1592 dev->lb.user_td--;
1593
1594 if (dev->lb.user_td < 2)
1595 mlx5_nic_vport_update_local_lb(dev->mdev, false);
1596
1597 mutex_unlock(&dev->lb.mutex);
1598}
1599
1574static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn)
1575{
1576 int err;
1577
1578 if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1579 return 0;
1580
1581 err = mlx5_core_alloc_transport_domain(dev->mdev, tdn);
1582 if (err)
1583 return err;
1584
1585 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1586 (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1587 !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
1588 return err;
1589
1600static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn)
1601{
1602 int err;
1603
1604 if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1605 return 0;
1606
1607 err = mlx5_core_alloc_transport_domain(dev->mdev, tdn);
1608 if (err)
1609 return err;
1610
1611 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1612 (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1613 !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
1614 return err;
1615
1590 mutex_lock(&dev->lb_mutex);
1591 dev->user_td++;
1592
1593 if (dev->user_td == 2)
1594 err = mlx5_nic_vport_update_local_lb(dev->mdev, true);
1595
1596 mutex_unlock(&dev->lb_mutex);
1597 return err;
1616 return mlx5_ib_enable_lb(dev);
1598}
1599
1600static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn)
1601{
1602 if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1603 return;
1604
1605 mlx5_core_dealloc_transport_domain(dev->mdev, tdn);
1606
1607 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1608 (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1609 !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
1610 return;
1611
1617}
1618
1619static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn)
1620{
1621 if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1622 return;
1623
1624 mlx5_core_dealloc_transport_domain(dev->mdev, tdn);
1625
1626 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1627 (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1628 !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
1629 return;
1630
1612 mutex_lock(&dev->lb_mutex);
1613 dev->user_td--;
1614
1615 if (dev->user_td < 2)
1616 mlx5_nic_vport_update_local_lb(dev->mdev, false);
1617
1618 mutex_unlock(&dev->lb_mutex);
1631 mlx5_ib_disable_lb(dev);
1619}
1620
1621static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
1622 struct ib_udata *udata)
1623{
1624 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1625 struct mlx5_ib_alloc_ucontext_req_v2 req = {};
1626 struct mlx5_ib_alloc_ucontext_resp resp = {};

--- 117 unchanged lines hidden (view full) ---

1744 }
1745
1746 if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
1747 err = mlx5_cmd_dump_fill_mkey(dev->mdev, &dump_fill_mkey);
1748 if (err)
1749 goto out_mdev;
1750 }
1751
1632}
1633
1634static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
1635 struct ib_udata *udata)
1636{
1637 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1638 struct mlx5_ib_alloc_ucontext_req_v2 req = {};
1639 struct mlx5_ib_alloc_ucontext_resp resp = {};

--- 117 unchanged lines hidden (view full) ---

1757 }
1758
1759 if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
1760 err = mlx5_cmd_dump_fill_mkey(dev->mdev, &dump_fill_mkey);
1761 if (err)
1762 goto out_mdev;
1763 }
1764
1765 INIT_LIST_HEAD(&context->vma_private_list);
1766 mutex_init(&context->vma_private_list_mutex);
1752 INIT_LIST_HEAD(&context->db_page_list);
1753 mutex_init(&context->db_page_mutex);
1754
1755 resp.tot_bfregs = req.total_num_bfregs;
1756 resp.num_ports = dev->num_ports;
1757
1758 if (field_avail(typeof(resp), cqe_version, udata->outlen))
1759 resp.response_length += sizeof(resp.cqe_version);

--- 59 unchanged lines hidden (view full) ---

1819 goto out_mdev;
1820
1821 bfregi->ver = ver;
1822 bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs;
1823 context->cqe_version = resp.cqe_version;
1824 context->lib_caps = req.lib_caps;
1825 print_lib_caps(dev, context->lib_caps);
1826
1767 INIT_LIST_HEAD(&context->db_page_list);
1768 mutex_init(&context->db_page_mutex);
1769
1770 resp.tot_bfregs = req.total_num_bfregs;
1771 resp.num_ports = dev->num_ports;
1772
1773 if (field_avail(typeof(resp), cqe_version, udata->outlen))
1774 resp.response_length += sizeof(resp.cqe_version);

--- 59 unchanged lines hidden (view full) ---

1834 goto out_mdev;
1835
1836 bfregi->ver = ver;
1837 bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs;
1838 context->cqe_version = resp.cqe_version;
1839 context->lib_caps = req.lib_caps;
1840 print_lib_caps(dev, context->lib_caps);
1841
1827 if (mlx5_lag_is_active(dev->mdev)) {
1828 u8 port = mlx5_core_native_port_num(dev->mdev);
1829
1830 atomic_set(&context->tx_port_affinity,
1831 atomic_add_return(
1832 1, &dev->roce[port].tx_port_affinity));
1833 }
1834
1835 return &context->ibucontext;
1836
1837out_mdev:
1838 if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX)
1839 mlx5_ib_devx_destroy(dev, context);
1840out_td:
1841 mlx5_ib_dealloc_transport_domain(dev, context->tdn);
1842

--- 13 unchanged lines hidden (view full) ---

1856}
1857
1858static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
1859{
1860 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
1861 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
1862 struct mlx5_bfreg_info *bfregi;
1863
1842 return &context->ibucontext;
1843
1844out_mdev:
1845 if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX)
1846 mlx5_ib_devx_destroy(dev, context);
1847out_td:
1848 mlx5_ib_dealloc_transport_domain(dev, context->tdn);
1849

--- 13 unchanged lines hidden (view full) ---

1863}
1864
1865static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
1866{
1867 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
1868 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
1869 struct mlx5_bfreg_info *bfregi;
1870
1864#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1865 /* All umem's must be destroyed before destroying the ucontext. */
1866 mutex_lock(&ibcontext->per_mm_list_lock);
1867 WARN_ON(!list_empty(&ibcontext->per_mm_list));
1868 mutex_unlock(&ibcontext->per_mm_list_lock);
1869#endif
1870
1871 if (context->devx_uid)
1872 mlx5_ib_devx_destroy(dev, context);
1873
1874 bfregi = &context->bfregi;
1875 mlx5_ib_dealloc_transport_domain(dev, context->tdn);
1876
1877 deallocate_uars(dev, context);
1878 kfree(bfregi->sys_pages);

--- 29 unchanged lines hidden (view full) ---

1908}
1909
1910/* Index resides in an extra byte to enable larger values than 255 */
1911static int get_extended_index(unsigned long offset)
1912{
1913 return get_arg(offset) | ((offset >> 16) & 0xff) << 8;
1914}
1915
1871 if (context->devx_uid)
1872 mlx5_ib_devx_destroy(dev, context);
1873
1874 bfregi = &context->bfregi;
1875 mlx5_ib_dealloc_transport_domain(dev, context->tdn);
1876
1877 deallocate_uars(dev, context);
1878 kfree(bfregi->sys_pages);

--- 29 unchanged lines hidden (view full) ---

1908}
1909
1910/* Index resides in an extra byte to enable larger values than 255 */
1911static int get_extended_index(unsigned long offset)
1912{
1913 return get_arg(offset) | ((offset >> 16) & 0xff) << 8;
1914}
1915
1916static void mlx5_ib_vma_open(struct vm_area_struct *area)
1917{
1918 /* vma_open is called when a new VMA is created on top of our VMA. This
1919 * is done through either mremap flow or split_vma (usually due to
1920 * mlock, madvise, munmap, etc.) We do not support a clone of the VMA,
1921 * as this VMA is strongly hardware related. Therefore we set the
1922 * vm_ops of the newly created/cloned VMA to NULL, to prevent it from
1923 * calling us again and trying to do incorrect actions. We assume that
1924 * the original VMA size is exactly a single page, and therefore all
1925 * "splitting" operation will not happen to it.
1926 */
1927 area->vm_ops = NULL;
1928}
1916
1929
1930static void mlx5_ib_vma_close(struct vm_area_struct *area)
1931{
1932 struct mlx5_ib_vma_private_data *mlx5_ib_vma_priv_data;
1933
1934 /* It's guaranteed that all VMAs opened on a FD are closed before the
1935 * file itself is closed, therefore no sync is needed with the regular
1936 * closing flow. (e.g. mlx5 ib_dealloc_ucontext)
1937 * However need a sync with accessing the vma as part of
1938 * mlx5_ib_disassociate_ucontext.
1939 * The close operation is usually called under mm->mmap_sem except when
1940 * process is exiting.
1941 * The exiting case is handled explicitly as part of
1942 * mlx5_ib_disassociate_ucontext.
1943 */
1944 mlx5_ib_vma_priv_data = (struct mlx5_ib_vma_private_data *)area->vm_private_data;
1945
1946 /* setting the vma context pointer to null in the mlx5_ib driver's
1947 * private data, to protect a race condition in
1948 * mlx5_ib_disassociate_ucontext().
1949 */
1950 mlx5_ib_vma_priv_data->vma = NULL;
1951 mutex_lock(mlx5_ib_vma_priv_data->vma_private_list_mutex);
1952 list_del(&mlx5_ib_vma_priv_data->list);
1953 mutex_unlock(mlx5_ib_vma_priv_data->vma_private_list_mutex);
1954 kfree(mlx5_ib_vma_priv_data);
1955}
1956
1957static const struct vm_operations_struct mlx5_ib_vm_ops = {
1958 .open = mlx5_ib_vma_open,
1959 .close = mlx5_ib_vma_close
1960};
1961
1962static int mlx5_ib_set_vma_data(struct vm_area_struct *vma,
1963 struct mlx5_ib_ucontext *ctx)
1964{
1965 struct mlx5_ib_vma_private_data *vma_prv;
1966 struct list_head *vma_head = &ctx->vma_private_list;
1967
1968 vma_prv = kzalloc(sizeof(*vma_prv), GFP_KERNEL);
1969 if (!vma_prv)
1970 return -ENOMEM;
1971
1972 vma_prv->vma = vma;
1973 vma_prv->vma_private_list_mutex = &ctx->vma_private_list_mutex;
1974 vma->vm_private_data = vma_prv;
1975 vma->vm_ops = &mlx5_ib_vm_ops;
1976
1977 mutex_lock(&ctx->vma_private_list_mutex);
1978 list_add(&vma_prv->list, vma_head);
1979 mutex_unlock(&ctx->vma_private_list_mutex);
1980
1981 return 0;
1982}
1983
1917static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
1918{
1984static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
1985{
1986 struct vm_area_struct *vma;
1987 struct mlx5_ib_vma_private_data *vma_private, *n;
1988 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
1989
1990 mutex_lock(&context->vma_private_list_mutex);
1991 list_for_each_entry_safe(vma_private, n, &context->vma_private_list,
1992 list) {
1993 vma = vma_private->vma;
1994 zap_vma_ptes(vma, vma->vm_start, PAGE_SIZE);
1995 /* context going to be destroyed, should
1996 * not access ops any more.
1997 */
1998 vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
1999 vma->vm_ops = NULL;
2000 list_del(&vma_private->list);
2001 kfree(vma_private);
2002 }
2003 mutex_unlock(&context->vma_private_list_mutex);
1919}
1920
1921static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
1922{
1923 switch (cmd) {
1924 case MLX5_IB_MMAP_WC_PAGE:
1925 return "WC";
1926 case MLX5_IB_MMAP_REGULAR_PAGE:

--- 6 unchanged lines hidden (view full) ---

1933 return NULL;
1934 }
1935}
1936
1937static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
1938 struct vm_area_struct *vma,
1939 struct mlx5_ib_ucontext *context)
1940{
2004}
2005
2006static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
2007{
2008 switch (cmd) {
2009 case MLX5_IB_MMAP_WC_PAGE:
2010 return "WC";
2011 case MLX5_IB_MMAP_REGULAR_PAGE:

--- 6 unchanged lines hidden (view full) ---

2018 return NULL;
2019 }
2020}
2021
2022static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
2023 struct vm_area_struct *vma,
2024 struct mlx5_ib_ucontext *context)
2025{
2026 phys_addr_t pfn;
2027 int err;
2028
1941 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1942 return -EINVAL;
1943
1944 if (get_index(vma->vm_pgoff) != MLX5_IB_CLOCK_INFO_V1)
1945 return -EOPNOTSUPP;
1946
1947 if (vma->vm_flags & VM_WRITE)
1948 return -EPERM;
1949
1950 if (!dev->mdev->clock_info_page)
1951 return -EOPNOTSUPP;
1952
2029 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
2030 return -EINVAL;
2031
2032 if (get_index(vma->vm_pgoff) != MLX5_IB_CLOCK_INFO_V1)
2033 return -EOPNOTSUPP;
2034
2035 if (vma->vm_flags & VM_WRITE)
2036 return -EPERM;
2037
2038 if (!dev->mdev->clock_info_page)
2039 return -EOPNOTSUPP;
2040
1953 return rdma_user_mmap_page(&context->ibucontext, vma,
1954 dev->mdev->clock_info_page, PAGE_SIZE);
2041 pfn = page_to_pfn(dev->mdev->clock_info_page);
2042 err = remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE,
2043 vma->vm_page_prot);
2044 if (err)
2045 return err;
2046
2047 return mlx5_ib_set_vma_data(vma, context);
1955}
1956
1957static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
1958 struct vm_area_struct *vma,
1959 struct mlx5_ib_ucontext *context)
1960{
1961 struct mlx5_bfreg_info *bfregi = &context->bfregi;
1962 int err;

--- 73 unchanged lines hidden (view full) ---

2036 }
2037 } else {
2038 uar_index = bfregi->sys_pages[idx];
2039 }
2040
2041 pfn = uar_index2pfn(dev, uar_index);
2042 mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
2043
2048}
2049
2050static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
2051 struct vm_area_struct *vma,
2052 struct mlx5_ib_ucontext *context)
2053{
2054 struct mlx5_bfreg_info *bfregi = &context->bfregi;
2055 int err;

--- 73 unchanged lines hidden (view full) ---

2129 }
2130 } else {
2131 uar_index = bfregi->sys_pages[idx];
2132 }
2133
2134 pfn = uar_index2pfn(dev, uar_index);
2135 mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
2136
2044 err = rdma_user_mmap_io(&context->ibucontext, vma, pfn, PAGE_SIZE,
2045 prot);
2137 vma->vm_page_prot = prot;
2138 err = io_remap_pfn_range(vma, vma->vm_start, pfn,
2139 PAGE_SIZE, vma->vm_page_prot);
2046 if (err) {
2047 mlx5_ib_err(dev,
2140 if (err) {
2141 mlx5_ib_err(dev,
2048 "rdma_user_mmap_io failed with error=%d, mmap_cmd=%s\n",
2142 "io_remap_pfn_range failed with error=%d, mmap_cmd=%s\n",
2049 err, mmap_cmd2str(cmd));
2143 err, mmap_cmd2str(cmd));
2144 err = -EAGAIN;
2050 goto err;
2051 }
2052
2145 goto err;
2146 }
2147
2148 err = mlx5_ib_set_vma_data(vma, context);
2149 if (err)
2150 goto err;
2151
2053 if (dyn_uar)
2054 bfregi->sys_pages[idx] = uar_index;
2055 return 0;
2056
2057err:
2058 if (!dyn_uar)
2059 return err;
2060

--- 8 unchanged lines hidden (view full) ---

2069static int dm_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
2070{
2071 struct mlx5_ib_ucontext *mctx = to_mucontext(context);
2072 struct mlx5_ib_dev *dev = to_mdev(context->device);
2073 u16 page_idx = get_extended_index(vma->vm_pgoff);
2074 size_t map_size = vma->vm_end - vma->vm_start;
2075 u32 npages = map_size >> PAGE_SHIFT;
2076 phys_addr_t pfn;
2152 if (dyn_uar)
2153 bfregi->sys_pages[idx] = uar_index;
2154 return 0;
2155
2156err:
2157 if (!dyn_uar)
2158 return err;
2159

--- 8 unchanged lines hidden (view full) ---

2168static int dm_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
2169{
2170 struct mlx5_ib_ucontext *mctx = to_mucontext(context);
2171 struct mlx5_ib_dev *dev = to_mdev(context->device);
2172 u16 page_idx = get_extended_index(vma->vm_pgoff);
2173 size_t map_size = vma->vm_end - vma->vm_start;
2174 u32 npages = map_size >> PAGE_SHIFT;
2175 phys_addr_t pfn;
2176 pgprot_t prot;
2077
2078 if (find_next_zero_bit(mctx->dm_pages, page_idx + npages, page_idx) !=
2079 page_idx + npages)
2080 return -EINVAL;
2081
2082 pfn = ((pci_resource_start(dev->mdev->pdev, 0) +
2083 MLX5_CAP64_DEV_MEM(dev->mdev, memic_bar_start_addr)) >>
2084 PAGE_SHIFT) +
2085 page_idx;
2177
2178 if (find_next_zero_bit(mctx->dm_pages, page_idx + npages, page_idx) !=
2179 page_idx + npages)
2180 return -EINVAL;
2181
2182 pfn = ((pci_resource_start(dev->mdev->pdev, 0) +
2183 MLX5_CAP64_DEV_MEM(dev->mdev, memic_bar_start_addr)) >>
2184 PAGE_SHIFT) +
2185 page_idx;
2086 return rdma_user_mmap_io(context, vma, pfn, map_size,
2087 pgprot_writecombine(vma->vm_page_prot));
2186 prot = pgprot_writecombine(vma->vm_page_prot);
2187 vma->vm_page_prot = prot;
2188
2189 if (io_remap_pfn_range(vma, vma->vm_start, pfn, map_size,
2190 vma->vm_page_prot))
2191 return -EAGAIN;
2192
2193 return mlx5_ib_set_vma_data(vma, mctx);
2088}
2089
2090static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
2091{
2092 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
2093 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
2094 unsigned long command;
2095 phys_addr_t pfn;

--- 258 unchanged lines hidden (view full) ---

2354/* Field is the last supported field */
2355#define FIELDS_NOT_SUPPORTED(filter, field)\
2356 memchr_inv((void *)&filter.field +\
2357 sizeof(filter.field), 0,\
2358 sizeof(filter) -\
2359 offsetof(typeof(filter), field) -\
2360 sizeof(filter.field))
2361
2194}
2195
2196static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
2197{
2198 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
2199 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
2200 unsigned long command;
2201 phys_addr_t pfn;

--- 258 unchanged lines hidden (view full) ---

2460/* Field is the last supported field */
2461#define FIELDS_NOT_SUPPORTED(filter, field)\
2462 memchr_inv((void *)&filter.field +\
2463 sizeof(filter.field), 0,\
2464 sizeof(filter) -\
2465 offsetof(typeof(filter), field) -\
2466 sizeof(filter.field))
2467
2362int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
2363 bool is_egress,
2364 struct mlx5_flow_act *action)
2468static int parse_flow_flow_action(const union ib_flow_spec *ib_spec,
2469 const struct ib_flow_attr *flow_attr,
2470 struct mlx5_flow_act *action)
2365{
2471{
2472 struct mlx5_ib_flow_action *maction = to_mflow_act(ib_spec->action.act);
2366
2367 switch (maction->ib_action.type) {
2368 case IB_FLOW_ACTION_ESP:
2473
2474 switch (maction->ib_action.type) {
2475 case IB_FLOW_ACTION_ESP:
2369 if (action->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
2370 MLX5_FLOW_CONTEXT_ACTION_DECRYPT))
2371 return -EINVAL;
2372 /* Currently only AES_GCM keymat is supported by the driver */
2373 action->esp_id = (uintptr_t)maction->esp_aes_gcm.ctx;
2476 /* Currently only AES_GCM keymat is supported by the driver */
2477 action->esp_id = (uintptr_t)maction->esp_aes_gcm.ctx;
2374 action->action |= is_egress ?
2478 action->action |= flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS ?
2375 MLX5_FLOW_CONTEXT_ACTION_ENCRYPT :
2376 MLX5_FLOW_CONTEXT_ACTION_DECRYPT;
2377 return 0;
2479 MLX5_FLOW_CONTEXT_ACTION_ENCRYPT :
2480 MLX5_FLOW_CONTEXT_ACTION_DECRYPT;
2481 return 0;
2378 case IB_FLOW_ACTION_UNSPECIFIED:
2379 if (maction->flow_action_raw.sub_type ==
2380 MLX5_IB_FLOW_ACTION_MODIFY_HEADER) {
2381 if (action->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
2382 return -EINVAL;
2383 action->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
2384 action->modify_id = maction->flow_action_raw.action_id;
2385 return 0;
2386 }
2387 if (maction->flow_action_raw.sub_type ==
2388 MLX5_IB_FLOW_ACTION_DECAP) {
2389 if (action->action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
2390 return -EINVAL;
2391 action->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
2392 return 0;
2393 }
2394 if (maction->flow_action_raw.sub_type ==
2395 MLX5_IB_FLOW_ACTION_PACKET_REFORMAT) {
2396 if (action->action &
2397 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
2398 return -EINVAL;
2399 action->action |=
2400 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
2401 action->reformat_id =
2402 maction->flow_action_raw.action_id;
2403 return 0;
2404 }
2405 /* fall through */
2406 default:
2407 return -EOPNOTSUPP;
2408 }
2409}
2410
2411static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
2412 u32 *match_v, const union ib_flow_spec *ib_spec,
2413 const struct ib_flow_attr *flow_attr,

--- 320 unchanged lines hidden (view full) ---

2734 break;
2735 case IB_FLOW_SPEC_ACTION_DROP:
2736 if (FIELDS_NOT_SUPPORTED(ib_spec->drop,
2737 LAST_DROP_FIELD))
2738 return -EOPNOTSUPP;
2739 action->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
2740 break;
2741 case IB_FLOW_SPEC_ACTION_HANDLE:
2482 default:
2483 return -EOPNOTSUPP;
2484 }
2485}
2486
2487static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
2488 u32 *match_v, const union ib_flow_spec *ib_spec,
2489 const struct ib_flow_attr *flow_attr,

--- 320 unchanged lines hidden (view full) ---

2810 break;
2811 case IB_FLOW_SPEC_ACTION_DROP:
2812 if (FIELDS_NOT_SUPPORTED(ib_spec->drop,
2813 LAST_DROP_FIELD))
2814 return -EOPNOTSUPP;
2815 action->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
2816 break;
2817 case IB_FLOW_SPEC_ACTION_HANDLE:
2742 ret = parse_flow_flow_action(to_mflow_act(ib_spec->action.act),
2743 flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS, action);
2818 ret = parse_flow_flow_action(ib_spec, flow_attr, action);
2744 if (ret)
2745 return ret;
2746 break;
2747 case IB_FLOW_SPEC_ACTION_COUNT:
2748 if (FIELDS_NOT_SUPPORTED(ib_spec->flow_count,
2749 LAST_COUNTERS_FIELD))
2750 return -EOPNOTSUPP;
2751

--- 64 unchanged lines hidden (view full) ---

2816 bool is_ipsec = mlx5_fs_is_ipsec_flow(match_c);
2817 bool is_drop = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_DROP;
2818
2819 /*
2820 * Currently only crypto is supported in egress, when regular egress
2821 * rules would be supported, always return VALID_SPEC_NA.
2822 */
2823 if (!is_crypto)
2819 if (ret)
2820 return ret;
2821 break;
2822 case IB_FLOW_SPEC_ACTION_COUNT:
2823 if (FIELDS_NOT_SUPPORTED(ib_spec->flow_count,
2824 LAST_COUNTERS_FIELD))
2825 return -EOPNOTSUPP;
2826

--- 64 unchanged lines hidden (view full) ---

2891 bool is_ipsec = mlx5_fs_is_ipsec_flow(match_c);
2892 bool is_drop = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_DROP;
2893
2894 /*
2895 * Currently only crypto is supported in egress, when regular egress
2896 * rules would be supported, always return VALID_SPEC_NA.
2897 */
2898 if (!is_crypto)
2824 return VALID_SPEC_NA;
2899 return egress ? VALID_SPEC_INVALID : VALID_SPEC_NA;
2825
2826 return is_crypto && is_ipsec &&
2827 (!egress || (!is_drop && !flow_act->has_flow_tag)) ?
2828 VALID_SPEC_VALID : VALID_SPEC_INVALID;
2829}
2830
2831static bool is_valid_spec(struct mlx5_core_dev *mdev,
2832 const struct mlx5_flow_spec *spec,

--- 126 unchanged lines hidden (view full) ---

2959};
2960
2961#define MLX5_FS_MAX_TYPES 6
2962#define MLX5_FS_MAX_ENTRIES BIT(16)
2963
2964static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_flow_namespace *ns,
2965 struct mlx5_ib_flow_prio *prio,
2966 int priority,
2900
2901 return is_crypto && is_ipsec &&
2902 (!egress || (!is_drop && !flow_act->has_flow_tag)) ?
2903 VALID_SPEC_VALID : VALID_SPEC_INVALID;
2904}
2905
2906static bool is_valid_spec(struct mlx5_core_dev *mdev,
2907 const struct mlx5_flow_spec *spec,

--- 126 unchanged lines hidden (view full) ---

3034};
3035
3036#define MLX5_FS_MAX_TYPES 6
3037#define MLX5_FS_MAX_ENTRIES BIT(16)
3038
3039static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_flow_namespace *ns,
3040 struct mlx5_ib_flow_prio *prio,
3041 int priority,
2967 int num_entries, int num_groups,
2968 u32 flags)
3042 int num_entries, int num_groups)
2969{
2970 struct mlx5_flow_table *ft;
2971
2972 ft = mlx5_create_auto_grouped_flow_table(ns, priority,
2973 num_entries,
2974 num_groups,
3043{
3044 struct mlx5_flow_table *ft;
3045
3046 ft = mlx5_create_auto_grouped_flow_table(ns, priority,
3047 num_entries,
3048 num_groups,
2975 0, flags);
3049 0, 0);
2976 if (IS_ERR(ft))
2977 return ERR_CAST(ft);
2978
2979 prio->flow_table = ft;
2980 prio->refcount = 0;
2981 return prio;
2982}
2983
2984static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
2985 struct ib_flow_attr *flow_attr,
2986 enum flow_table_type ft_type)
2987{
2988 bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP;
2989 struct mlx5_flow_namespace *ns = NULL;
2990 struct mlx5_ib_flow_prio *prio;
2991 struct mlx5_flow_table *ft;
2992 int max_table_size;
2993 int num_entries;
2994 int num_groups;
3050 if (IS_ERR(ft))
3051 return ERR_CAST(ft);
3052
3053 prio->flow_table = ft;
3054 prio->refcount = 0;
3055 return prio;
3056}
3057
3058static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
3059 struct ib_flow_attr *flow_attr,
3060 enum flow_table_type ft_type)
3061{
3062 bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP;
3063 struct mlx5_flow_namespace *ns = NULL;
3064 struct mlx5_ib_flow_prio *prio;
3065 struct mlx5_flow_table *ft;
3066 int max_table_size;
3067 int num_entries;
3068 int num_groups;
2995 u32 flags = 0;
2996 int priority;
2997
2998 max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
2999 log_max_ft_size));
3000 if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
3069 int priority;
3070
3071 max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3072 log_max_ft_size));
3073 if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
3001 enum mlx5_flow_namespace_type fn_type;
3002
3003 if (flow_is_multicast_only(flow_attr) &&
3004 !dont_trap)
3074 if (ft_type == MLX5_IB_FT_TX)
3075 priority = 0;
3076 else if (flow_is_multicast_only(flow_attr) &&
3077 !dont_trap)
3005 priority = MLX5_IB_FLOW_MCAST_PRIO;
3006 else
3007 priority = ib_prio_to_core_prio(flow_attr->priority,
3008 dont_trap);
3078 priority = MLX5_IB_FLOW_MCAST_PRIO;
3079 else
3080 priority = ib_prio_to_core_prio(flow_attr->priority,
3081 dont_trap);
3009 if (ft_type == MLX5_IB_FT_RX) {
3010 fn_type = MLX5_FLOW_NAMESPACE_BYPASS;
3011 prio = &dev->flow_db->prios[priority];
3012 if (!dev->rep &&
3013 MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap))
3014 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
3015 if (!dev->rep &&
3016 MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3017 reformat_l3_tunnel_to_l2))
3018 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
3019 } else {
3020 max_table_size =
3021 BIT(MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev,
3022 log_max_ft_size));
3023 fn_type = MLX5_FLOW_NAMESPACE_EGRESS;
3024 prio = &dev->flow_db->egress_prios[priority];
3025 if (!dev->rep &&
3026 MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat))
3027 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
3028 }
3029 ns = mlx5_get_flow_namespace(dev->mdev, fn_type);
3082 ns = mlx5_get_flow_namespace(dev->mdev,
3083 ft_type == MLX5_IB_FT_TX ?
3084 MLX5_FLOW_NAMESPACE_EGRESS :
3085 MLX5_FLOW_NAMESPACE_BYPASS);
3030 num_entries = MLX5_FS_MAX_ENTRIES;
3031 num_groups = MLX5_FS_MAX_TYPES;
3086 num_entries = MLX5_FS_MAX_ENTRIES;
3087 num_groups = MLX5_FS_MAX_TYPES;
3088 prio = &dev->flow_db->prios[priority];
3032 } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
3033 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
3034 ns = mlx5_get_flow_namespace(dev->mdev,
3035 MLX5_FLOW_NAMESPACE_LEFTOVERS);
3036 build_leftovers_ft_param(&priority,
3037 &num_entries,
3038 &num_groups);
3039 prio = &dev->flow_db->prios[MLX5_IB_FLOW_LEFTOVERS_PRIO];

--- 15 unchanged lines hidden (view full) ---

3055 if (!ns)
3056 return ERR_PTR(-ENOTSUPP);
3057
3058 if (num_entries > max_table_size)
3059 return ERR_PTR(-ENOMEM);
3060
3061 ft = prio->flow_table;
3062 if (!ft)
3089 } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
3090 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
3091 ns = mlx5_get_flow_namespace(dev->mdev,
3092 MLX5_FLOW_NAMESPACE_LEFTOVERS);
3093 build_leftovers_ft_param(&priority,
3094 &num_entries,
3095 &num_groups);
3096 prio = &dev->flow_db->prios[MLX5_IB_FLOW_LEFTOVERS_PRIO];

--- 15 unchanged lines hidden (view full) ---

3112 if (!ns)
3113 return ERR_PTR(-ENOTSUPP);
3114
3115 if (num_entries > max_table_size)
3116 return ERR_PTR(-ENOMEM);
3117
3118 ft = prio->flow_table;
3119 if (!ft)
3063 return _get_prio(ns, prio, priority, num_entries, num_groups,
3064 flags);
3120 return _get_prio(ns, prio, priority, num_entries, num_groups);
3065
3066 return prio;
3067}
3068
3069static void set_underlay_qp(struct mlx5_ib_dev *dev,
3070 struct mlx5_flow_spec *spec,
3071 u32 underlay_qpn)
3072{

--- 150 unchanged lines hidden (view full) ---

3223 u32 prev_type = 0;
3224 int err = 0;
3225 int dest_num = 0;
3226 bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS;
3227
3228 if (!is_valid_attr(dev->mdev, flow_attr))
3229 return ERR_PTR(-EINVAL);
3230
3121
3122 return prio;
3123}
3124
3125static void set_underlay_qp(struct mlx5_ib_dev *dev,
3126 struct mlx5_flow_spec *spec,
3127 u32 underlay_qpn)
3128{

--- 150 unchanged lines hidden (view full) ---

3279 u32 prev_type = 0;
3280 int err = 0;
3281 int dest_num = 0;
3282 bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS;
3283
3284 if (!is_valid_attr(dev->mdev, flow_attr))
3285 return ERR_PTR(-EINVAL);
3286
3231 if (dev->rep && is_egress)
3232 return ERR_PTR(-EINVAL);
3233
3234 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
3235 handler = kzalloc(sizeof(*handler), GFP_KERNEL);
3236 if (!handler || !spec) {
3237 err = -ENOMEM;
3238 goto free;
3239 }
3240
3241 INIT_LIST_HEAD(&handler->list);

--- 371 unchanged lines hidden (view full) ---

3613unlock:
3614 mutex_unlock(&dev->flow_db->lock);
3615 kfree(dst);
3616free_ucmd:
3617 kfree(ucmd);
3618 return ERR_PTR(err);
3619}
3620
3287 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
3288 handler = kzalloc(sizeof(*handler), GFP_KERNEL);
3289 if (!handler || !spec) {
3290 err = -ENOMEM;
3291 goto free;
3292 }
3293
3294 INIT_LIST_HEAD(&handler->list);

--- 371 unchanged lines hidden (view full) ---

3666unlock:
3667 mutex_unlock(&dev->flow_db->lock);
3668 kfree(dst);
3669free_ucmd:
3670 kfree(ucmd);
3671 return ERR_PTR(err);
3672}
3673
3621static struct mlx5_ib_flow_prio *
3622_get_flow_table(struct mlx5_ib_dev *dev,
3623 struct mlx5_ib_flow_matcher *fs_matcher,
3624 bool mcast)
3674static struct mlx5_ib_flow_prio *_get_flow_table(struct mlx5_ib_dev *dev,
3675 int priority, bool mcast)
3625{
3676{
3677 int max_table_size;
3626 struct mlx5_flow_namespace *ns = NULL;
3627 struct mlx5_ib_flow_prio *prio;
3678 struct mlx5_flow_namespace *ns = NULL;
3679 struct mlx5_ib_flow_prio *prio;
3628 int max_table_size;
3629 u32 flags = 0;
3630 int priority;
3631
3680
3632 if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS) {
3633 max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3634 log_max_ft_size));
3635 if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap))
3636 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
3637 if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3638 reformat_l3_tunnel_to_l2))
3639 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
3640 } else { /* Can only be MLX5_FLOW_NAMESPACE_EGRESS */
3641 max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev,
3642 log_max_ft_size));
3643 if (MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat))
3644 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
3645 }
3646
3681 max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3682 log_max_ft_size));
3647 if (max_table_size < MLX5_FS_MAX_ENTRIES)
3648 return ERR_PTR(-ENOMEM);
3649
3650 if (mcast)
3651 priority = MLX5_IB_FLOW_MCAST_PRIO;
3652 else
3683 if (max_table_size < MLX5_FS_MAX_ENTRIES)
3684 return ERR_PTR(-ENOMEM);
3685
3686 if (mcast)
3687 priority = MLX5_IB_FLOW_MCAST_PRIO;
3688 else
3653 priority = ib_prio_to_core_prio(fs_matcher->priority, false);
3689 priority = ib_prio_to_core_prio(priority, false);
3654
3690
3655 ns = mlx5_get_flow_namespace(dev->mdev, fs_matcher->ns_type);
3691 ns = mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS);
3656 if (!ns)
3657 return ERR_PTR(-ENOTSUPP);
3658
3692 if (!ns)
3693 return ERR_PTR(-ENOTSUPP);
3694
3659 if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS)
3660 prio = &dev->flow_db->prios[priority];
3661 else
3662 prio = &dev->flow_db->egress_prios[priority];
3695 prio = &dev->flow_db->prios[priority];
3663
3664 if (prio->flow_table)
3665 return prio;
3666
3667 return _get_prio(ns, prio, priority, MLX5_FS_MAX_ENTRIES,
3696
3697 if (prio->flow_table)
3698 return prio;
3699
3700 return _get_prio(ns, prio, priority, MLX5_FS_MAX_ENTRIES,
3668 MLX5_FS_MAX_TYPES, flags);
3701 MLX5_FS_MAX_TYPES);
3669}
3670
3671static struct mlx5_ib_flow_handler *
3672_create_raw_flow_rule(struct mlx5_ib_dev *dev,
3673 struct mlx5_ib_flow_prio *ft_prio,
3674 struct mlx5_flow_destination *dst,
3675 struct mlx5_ib_flow_matcher *fs_matcher,
3702}
3703
3704static struct mlx5_ib_flow_handler *
3705_create_raw_flow_rule(struct mlx5_ib_dev *dev,
3706 struct mlx5_ib_flow_prio *ft_prio,
3707 struct mlx5_flow_destination *dst,
3708 struct mlx5_ib_flow_matcher *fs_matcher,
3676 struct mlx5_flow_act *flow_act,
3677 void *cmd_in, int inlen)
3678{
3679 struct mlx5_ib_flow_handler *handler;
3709 void *cmd_in, int inlen)
3710{
3711 struct mlx5_ib_flow_handler *handler;
3712 struct mlx5_flow_act flow_act = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG};
3680 struct mlx5_flow_spec *spec;
3681 struct mlx5_flow_table *ft = ft_prio->flow_table;
3682 int err = 0;
3683
3684 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
3685 handler = kzalloc(sizeof(*handler), GFP_KERNEL);
3686 if (!handler || !spec) {
3687 err = -ENOMEM;
3688 goto free;
3689 }
3690
3691 INIT_LIST_HEAD(&handler->list);
3692
3693 memcpy(spec->match_value, cmd_in, inlen);
3694 memcpy(spec->match_criteria, fs_matcher->matcher_mask.match_params,
3695 fs_matcher->mask_len);
3696 spec->match_criteria_enable = fs_matcher->match_criteria_enable;
3697
3713 struct mlx5_flow_spec *spec;
3714 struct mlx5_flow_table *ft = ft_prio->flow_table;
3715 int err = 0;
3716
3717 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
3718 handler = kzalloc(sizeof(*handler), GFP_KERNEL);
3719 if (!handler || !spec) {
3720 err = -ENOMEM;
3721 goto free;
3722 }
3723
3724 INIT_LIST_HEAD(&handler->list);
3725
3726 memcpy(spec->match_value, cmd_in, inlen);
3727 memcpy(spec->match_criteria, fs_matcher->matcher_mask.match_params,
3728 fs_matcher->mask_len);
3729 spec->match_criteria_enable = fs_matcher->match_criteria_enable;
3730
3731 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3698 handler->rule = mlx5_add_flow_rules(ft, spec,
3732 handler->rule = mlx5_add_flow_rules(ft, spec,
3699 flow_act, dst, 1);
3733 &flow_act, dst, 1);
3700
3701 if (IS_ERR(handler->rule)) {
3702 err = PTR_ERR(handler->rule);
3703 goto free;
3704 }
3705
3706 ft_prio->refcount++;
3707 handler->prio = ft_prio;

--- 45 unchanged lines hidden (view full) ---

3753 return true;
3754
3755 return false;
3756}
3757
3758struct mlx5_ib_flow_handler *
3759mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev,
3760 struct mlx5_ib_flow_matcher *fs_matcher,
3734
3735 if (IS_ERR(handler->rule)) {
3736 err = PTR_ERR(handler->rule);
3737 goto free;
3738 }
3739
3740 ft_prio->refcount++;
3741 handler->prio = ft_prio;

--- 45 unchanged lines hidden (view full) ---

3787 return true;
3788
3789 return false;
3790}
3791
3792struct mlx5_ib_flow_handler *
3793mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev,
3794 struct mlx5_ib_flow_matcher *fs_matcher,
3761 struct mlx5_flow_act *flow_act,
3762 void *cmd_in, int inlen, int dest_id,
3763 int dest_type)
3764{
3765 struct mlx5_flow_destination *dst;
3766 struct mlx5_ib_flow_prio *ft_prio;
3795 void *cmd_in, int inlen, int dest_id,
3796 int dest_type)
3797{
3798 struct mlx5_flow_destination *dst;
3799 struct mlx5_ib_flow_prio *ft_prio;
3800 int priority = fs_matcher->priority;
3767 struct mlx5_ib_flow_handler *handler;
3768 bool mcast;
3769 int err;
3770
3771 if (fs_matcher->flow_type != MLX5_IB_FLOW_TYPE_NORMAL)
3772 return ERR_PTR(-EOPNOTSUPP);
3773
3774 if (fs_matcher->priority > MLX5_IB_FLOW_LAST_PRIO)
3775 return ERR_PTR(-ENOMEM);
3776
3777 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
3778 if (!dst)
3779 return ERR_PTR(-ENOMEM);
3780
3781 mcast = raw_fs_is_multicast(fs_matcher, cmd_in);
3782 mutex_lock(&dev->flow_db->lock);
3783
3801 struct mlx5_ib_flow_handler *handler;
3802 bool mcast;
3803 int err;
3804
3805 if (fs_matcher->flow_type != MLX5_IB_FLOW_TYPE_NORMAL)
3806 return ERR_PTR(-EOPNOTSUPP);
3807
3808 if (fs_matcher->priority > MLX5_IB_FLOW_LAST_PRIO)
3809 return ERR_PTR(-ENOMEM);
3810
3811 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
3812 if (!dst)
3813 return ERR_PTR(-ENOMEM);
3814
3815 mcast = raw_fs_is_multicast(fs_matcher, cmd_in);
3816 mutex_lock(&dev->flow_db->lock);
3817
3784 ft_prio = _get_flow_table(dev, fs_matcher, mcast);
3818 ft_prio = _get_flow_table(dev, priority, mcast);
3785 if (IS_ERR(ft_prio)) {
3786 err = PTR_ERR(ft_prio);
3787 goto unlock;
3788 }
3789
3790 if (dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR) {
3791 dst->type = dest_type;
3792 dst->tir_num = dest_id;
3819 if (IS_ERR(ft_prio)) {
3820 err = PTR_ERR(ft_prio);
3821 goto unlock;
3822 }
3823
3824 if (dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR) {
3825 dst->type = dest_type;
3826 dst->tir_num = dest_id;
3793 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3794 } else if (dest_type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) {
3827 } else {
3795 dst->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM;
3796 dst->ft_num = dest_id;
3828 dst->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM;
3829 dst->ft_num = dest_id;
3797 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3798 } else {
3799 dst->type = MLX5_FLOW_DESTINATION_TYPE_PORT;
3800 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
3801 }
3802
3830 }
3831
3803 handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher, flow_act,
3804 cmd_in, inlen);
3832 handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher, cmd_in,
3833 inlen);
3805
3806 if (IS_ERR(handler)) {
3807 err = PTR_ERR(handler);
3808 goto destroy_ft;
3809 }
3810
3811 mutex_unlock(&dev->flow_db->lock);
3812 atomic_inc(&fs_matcher->usecnt);

--- 161 unchanged lines hidden (view full) ---

3974 switch (action->type) {
3975 case IB_FLOW_ACTION_ESP:
3976 /*
3977 * We only support aes_gcm by now, so we implicitly know this is
3978 * the underline crypto.
3979 */
3980 mlx5_accel_esp_destroy_xfrm(maction->esp_aes_gcm.ctx);
3981 break;
3834
3835 if (IS_ERR(handler)) {
3836 err = PTR_ERR(handler);
3837 goto destroy_ft;
3838 }
3839
3840 mutex_unlock(&dev->flow_db->lock);
3841 atomic_inc(&fs_matcher->usecnt);

--- 161 unchanged lines hidden (view full) ---

4003 switch (action->type) {
4004 case IB_FLOW_ACTION_ESP:
4005 /*
4006 * We only support aes_gcm by now, so we implicitly know this is
4007 * the underline crypto.
4008 */
4009 mlx5_accel_esp_destroy_xfrm(maction->esp_aes_gcm.ctx);
4010 break;
3982 case IB_FLOW_ACTION_UNSPECIFIED:
3983 mlx5_ib_destroy_flow_action_raw(maction);
3984 break;
3985 default:
3986 WARN_ON(true);
3987 break;
3988 }
3989
3990 kfree(maction);
3991 return 0;
3992}

--- 1869 unchanged lines hidden (view full) ---

5862
5863 err = init_node_data(dev);
5864 if (err)
5865 return err;
5866
5867 if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
5868 (MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) ||
5869 MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
4011 default:
4012 WARN_ON(true);
4013 break;
4014 }
4015
4016 kfree(maction);
4017 return 0;
4018}

--- 1869 unchanged lines hidden (view full) ---

5888
5889 err = init_node_data(dev);
5890 if (err)
5891 return err;
5892
5893 if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
5894 (MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) ||
5895 MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
5870 mutex_init(&dev->lb_mutex);
5896 mutex_init(&dev->lb.mutex);
5871
5872 return 0;
5873}
5874
5875static int mlx5_ib_stage_non_default_cb(struct mlx5_ib_dev *dev)
5876{
5877 dev->ib_dev.get_port_immutable = mlx5_port_immutable;
5878 dev->ib_dev.query_port = mlx5_ib_query_port;

--- 543 unchanged lines hidden ---
5897
5898 return 0;
5899}
5900
5901static int mlx5_ib_stage_non_default_cb(struct mlx5_ib_dev *dev)
5902{
5903 dev->ib_dev.get_port_immutable = mlx5_port_immutable;
5904 dev->ib_dev.query_port = mlx5_ib_query_port;

--- 543 unchanged lines hidden ---