main.c (f4375443b7861ca5d93a10dba5ef5a478d5df96a) main.c (45ec21c971eddfb5e8e953e49a9dbe780f4a4997)
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:

--- 1751 unchanged lines hidden (view full) ---

1760 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1761 (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1762 !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
1763 return;
1764
1765 mlx5_ib_disable_lb(dev, true, false);
1766}
1767
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:

--- 1751 unchanged lines hidden (view full) ---

1760 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1761 (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1762 !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
1763 return;
1764
1765 mlx5_ib_disable_lb(dev, true, false);
1766}
1767
1768static int set_ucontext_resp(struct ib_ucontext *uctx,
1769 struct mlx5_ib_alloc_ucontext_resp *resp)
1770{
1771 struct ib_device *ibdev = uctx->device;
1772 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1773 struct mlx5_ib_ucontext *context = to_mucontext(uctx);
1774 struct mlx5_bfreg_info *bfregi = &context->bfregi;
1775 int err;
1776
1777 if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
1778 err = mlx5_cmd_dump_fill_mkey(dev->mdev,
1779 &resp->dump_fill_mkey);
1780 if (err)
1781 return err;
1782 resp->comp_mask |=
1783 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY;
1784 }
1785
1786 resp->qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
1787 if (dev->wc_support)
1788 resp->bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev,
1789 log_bf_reg_size);
1790 resp->cache_line_size = cache_line_size();
1791 resp->max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
1792 resp->max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
1793 resp->max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1794 resp->max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1795 resp->max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
1796 resp->cqe_version = context->cqe_version;
1797 resp->log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1798 MLX5_ADAPTER_PAGE_SHIFT : PAGE_SHIFT;
1799 resp->num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1800 MLX5_CAP_GEN(dev->mdev,
1801 num_of_uars_per_page) : 1;
1802
1803 if (mlx5_accel_ipsec_device_caps(dev->mdev) &
1804 MLX5_ACCEL_IPSEC_CAP_DEVICE) {
1805 if (mlx5_get_flow_namespace(dev->mdev,
1806 MLX5_FLOW_NAMESPACE_EGRESS))
1807 resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM;
1808 if (mlx5_accel_ipsec_device_caps(dev->mdev) &
1809 MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA)
1810 resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA;
1811 if (MLX5_CAP_FLOWTABLE(dev->mdev, flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
1812 resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING;
1813 if (mlx5_accel_ipsec_device_caps(dev->mdev) &
1814 MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN)
1815 resp->flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN;
1816 /* MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD is currently always 0 */
1817 }
1818
1819 resp->tot_bfregs = bfregi->lib_uar_dyn ? 0 :
1820 bfregi->total_num_bfregs - bfregi->num_dyn_bfregs;
1821 resp->num_ports = dev->num_ports;
1822 resp->cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE |
1823 MLX5_USER_CMDS_SUPP_UHW_CREATE_AH;
1824
1825 if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) {
1826 mlx5_query_min_inline(dev->mdev, &resp->eth_min_inline);
1827 resp->eth_min_inline++;
1828 }
1829
1830 if (dev->mdev->clock_info)
1831 resp->clock_info_versions = BIT(MLX5_IB_CLOCK_INFO_V1);
1832
1833 /*
1834 * We don't want to expose information from the PCI bar that is located
1835 * after 4096 bytes, so if the arch only supports larger pages, let's
1836 * pretend we don't support reading the HCA's core clock. This is also
1837 * forced by mmap function.
1838 */
1839 if (PAGE_SIZE <= 4096) {
1840 resp->comp_mask |=
1841 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
1842 resp->hca_core_clock_offset =
1843 offsetof(struct mlx5_init_seg,
1844 internal_timer_h) % PAGE_SIZE;
1845 }
1846
1847 if (MLX5_CAP_GEN(dev->mdev, ece_support))
1848 resp->comp_mask |= MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_ECE;
1849
1850 resp->num_dyn_bfregs = bfregi->num_dyn_bfregs;
1851 return 0;
1852}
1853
1768static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
1769 struct ib_udata *udata)
1770{
1771 struct ib_device *ibdev = uctx->device;
1772 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1773 struct mlx5_ib_alloc_ucontext_req_v2 req = {};
1774 struct mlx5_ib_alloc_ucontext_resp resp = {};
1854static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
1855 struct ib_udata *udata)
1856{
1857 struct ib_device *ibdev = uctx->device;
1858 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1859 struct mlx5_ib_alloc_ucontext_req_v2 req = {};
1860 struct mlx5_ib_alloc_ucontext_resp resp = {};
1775 struct mlx5_core_dev *mdev = dev->mdev;
1776 struct mlx5_ib_ucontext *context = to_mucontext(uctx);
1777 struct mlx5_bfreg_info *bfregi;
1778 int ver;
1779 int err;
1780 size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
1781 max_cqe_version);
1861 struct mlx5_ib_ucontext *context = to_mucontext(uctx);
1862 struct mlx5_bfreg_info *bfregi;
1863 int ver;
1864 int err;
1865 size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
1866 max_cqe_version);
1782 u32 dump_fill_mkey;
1783 bool lib_uar_4k;
1784 bool lib_uar_dyn;
1785
1786 if (!dev->ib_active)
1787 return -EAGAIN;
1788
1789 if (udata->inlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
1790 ver = 0;

--- 12 unchanged lines hidden (view full) ---

1803 if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
1804 return -EOPNOTSUPP;
1805
1806 req.total_num_bfregs = ALIGN(req.total_num_bfregs,
1807 MLX5_NON_FP_BFREGS_PER_UAR);
1808 if (req.num_low_latency_bfregs > req.total_num_bfregs - 1)
1809 return -EINVAL;
1810
1867 bool lib_uar_4k;
1868 bool lib_uar_dyn;
1869
1870 if (!dev->ib_active)
1871 return -EAGAIN;
1872
1873 if (udata->inlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
1874 ver = 0;

--- 12 unchanged lines hidden (view full) ---

1887 if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
1888 return -EOPNOTSUPP;
1889
1890 req.total_num_bfregs = ALIGN(req.total_num_bfregs,
1891 MLX5_NON_FP_BFREGS_PER_UAR);
1892 if (req.num_low_latency_bfregs > req.total_num_bfregs - 1)
1893 return -EINVAL;
1894
1811 resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
1812 if (dev->wc_support)
1813 resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
1814 resp.cache_line_size = cache_line_size();
1815 resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
1816 resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
1817 resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1818 resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1819 resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
1820 resp.cqe_version = min_t(__u8,
1821 (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version),
1822 req.max_cqe_version);
1823 resp.log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1824 MLX5_ADAPTER_PAGE_SHIFT : PAGE_SHIFT;
1825 resp.num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1826 MLX5_CAP_GEN(dev->mdev, num_of_uars_per_page) : 1;
1827 resp.response_length = min(offsetof(typeof(resp), response_length) +
1828 sizeof(resp.response_length), udata->outlen);
1829
1830 if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) {
1831 if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_EGRESS))
1832 resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM;
1833 if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA)
1834 resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA;
1835 if (MLX5_CAP_FLOWTABLE(dev->mdev, flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
1836 resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING;
1837 if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN)
1838 resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN;
1839 /* MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD is currently always 0 */
1840 }
1841
1842 lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR;
1843 lib_uar_dyn = req.lib_caps & MLX5_LIB_CAP_DYN_UAR;
1844 bfregi = &context->bfregi;
1845
1846 if (lib_uar_dyn) {
1847 bfregi->lib_uar_dyn = lib_uar_dyn;
1848 goto uar_done;
1849 }

--- 32 unchanged lines hidden (view full) ---

1882 context->devx_uid = err;
1883 }
1884
1885 err = mlx5_ib_alloc_transport_domain(dev, &context->tdn,
1886 context->devx_uid);
1887 if (err)
1888 goto out_devx;
1889
1895 lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR;
1896 lib_uar_dyn = req.lib_caps & MLX5_LIB_CAP_DYN_UAR;
1897 bfregi = &context->bfregi;
1898
1899 if (lib_uar_dyn) {
1900 bfregi->lib_uar_dyn = lib_uar_dyn;
1901 goto uar_done;
1902 }

--- 32 unchanged lines hidden (view full) ---

1935 context->devx_uid = err;
1936 }
1937
1938 err = mlx5_ib_alloc_transport_domain(dev, &context->tdn,
1939 context->devx_uid);
1940 if (err)
1941 goto out_devx;
1942
1890 if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
1891 err = mlx5_cmd_dump_fill_mkey(dev->mdev, &dump_fill_mkey);
1892 if (err)
1893 goto out_mdev;
1894 }
1895
1896 INIT_LIST_HEAD(&context->db_page_list);
1897 mutex_init(&context->db_page_mutex);
1898
1943 INIT_LIST_HEAD(&context->db_page_list);
1944 mutex_init(&context->db_page_mutex);
1945
1899 resp.tot_bfregs = lib_uar_dyn ? 0 : req.total_num_bfregs;
1900 resp.num_ports = dev->num_ports;
1946 context->cqe_version = min_t(__u8,
1947 (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version),
1948 req.max_cqe_version);
1901
1949
1902 if (offsetofend(typeof(resp), cqe_version) <= udata->outlen)
1903 resp.response_length += sizeof(resp.cqe_version);
1950 err = set_ucontext_resp(uctx, &resp);
1951 if (err)
1952 goto out_mdev;
1904
1953
1905 if (offsetofend(typeof(resp), cmds_supp_uhw) <= udata->outlen) {
1906 resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE |
1907 MLX5_USER_CMDS_SUPP_UHW_CREATE_AH;
1908 resp.response_length += sizeof(resp.cmds_supp_uhw);
1909 }
1910
1911 if (offsetofend(typeof(resp), eth_min_inline) <= udata->outlen) {
1912 if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) {
1913 mlx5_query_min_inline(dev->mdev, &resp.eth_min_inline);
1914 resp.eth_min_inline++;
1915 }
1916 resp.response_length += sizeof(resp.eth_min_inline);
1917 }
1918
1919 if (offsetofend(typeof(resp), clock_info_versions) <= udata->outlen) {
1920 if (mdev->clock_info)
1921 resp.clock_info_versions = BIT(MLX5_IB_CLOCK_INFO_V1);
1922 resp.response_length += sizeof(resp.clock_info_versions);
1923 }
1924
1925 /*
1926 * We don't want to expose information from the PCI bar that is located
1927 * after 4096 bytes, so if the arch only supports larger pages, let's
1928 * pretend we don't support reading the HCA's core clock. This is also
1929 * forced by mmap function.
1930 */
1931 if (offsetofend(typeof(resp), hca_core_clock_offset) <= udata->outlen) {
1932 if (PAGE_SIZE <= 4096) {
1933 resp.comp_mask |=
1934 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
1935 resp.hca_core_clock_offset =
1936 offsetof(struct mlx5_init_seg, internal_timer_h) % PAGE_SIZE;
1937 }
1938 resp.response_length += sizeof(resp.hca_core_clock_offset);
1939 }
1940
1941 if (offsetofend(typeof(resp), log_uar_size) <= udata->outlen)
1942 resp.response_length += sizeof(resp.log_uar_size);
1943
1944 if (offsetofend(typeof(resp), num_uars_per_page) <= udata->outlen)
1945 resp.response_length += sizeof(resp.num_uars_per_page);
1946
1947 if (offsetofend(typeof(resp), num_dyn_bfregs) <= udata->outlen) {
1948 resp.num_dyn_bfregs = bfregi->num_dyn_bfregs;
1949 resp.response_length += sizeof(resp.num_dyn_bfregs);
1950 }
1951
1952 if (offsetofend(typeof(resp), dump_fill_mkey) <= udata->outlen) {
1953 if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
1954 resp.dump_fill_mkey = dump_fill_mkey;
1955 resp.comp_mask |=
1956 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY;
1957 }
1958 resp.response_length += sizeof(resp.dump_fill_mkey);
1959 }
1960
1961 if (MLX5_CAP_GEN(dev->mdev, ece_support))
1962 resp.comp_mask |= MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_ECE;
1963
1954 resp.response_length = min(udata->outlen, sizeof(resp));
1964 err = ib_copy_to_udata(udata, &resp, resp.response_length);
1965 if (err)
1966 goto out_mdev;
1967
1968 bfregi->ver = ver;
1969 bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs;
1955 err = ib_copy_to_udata(udata, &resp, resp.response_length);
1956 if (err)
1957 goto out_mdev;
1958
1959 bfregi->ver = ver;
1960 bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs;
1970 context->cqe_version = resp.cqe_version;
1971 context->lib_caps = req.lib_caps;
1972 print_lib_caps(dev, context->lib_caps);
1973
1974 if (mlx5_ib_lag_should_assign_affinity(dev)) {
1975 u8 port = mlx5_core_native_port_num(dev->mdev) - 1;
1976
1977 atomic_set(&context->tx_port_affinity,
1978 atomic_add_return(

--- 5425 unchanged lines hidden ---
1961 context->lib_caps = req.lib_caps;
1962 print_lib_caps(dev, context->lib_caps);
1963
1964 if (mlx5_ib_lag_should_assign_affinity(dev)) {
1965 u8 port = mlx5_core_native_port_num(dev->mdev) - 1;
1966
1967 atomic_set(&context->tx_port_affinity,
1968 atomic_add_return(

--- 5425 unchanged lines hidden ---