main.c (bf3347c4d15e26ab17fce3aa4041345198f4280c) main.c (a762d460a06abc8d462ac513ba57dc3c31dd8c73)
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:

--- 884 unchanged lines hidden (view full) ---

893 props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
894 props->raw_packet_caps |= IB_RAW_PACKET_CAP_IP_CSUM;
895 }
896
897 if (MLX5_CAP_ETH(dev->mdev, vlan_cap))
898 props->raw_packet_caps |=
899 IB_RAW_PACKET_CAP_CVLAN_STRIPPING;
900
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:

--- 884 unchanged lines hidden (view full) ---

893 props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
894 props->raw_packet_caps |= IB_RAW_PACKET_CAP_IP_CSUM;
895 }
896
897 if (MLX5_CAP_ETH(dev->mdev, vlan_cap))
898 props->raw_packet_caps |=
899 IB_RAW_PACKET_CAP_CVLAN_STRIPPING;
900
901 if (field_avail(typeof(resp), tso_caps, uhw_outlen)) {
901 if (offsetofend(typeof(resp), tso_caps) <= uhw_outlen) {
902 max_tso = MLX5_CAP_ETH(mdev, max_lso_cap);
903 if (max_tso) {
904 resp.tso_caps.max_tso = 1 << max_tso;
905 resp.tso_caps.supported_qpts |=
906 1 << IB_QPT_RAW_PACKET;
907 resp.response_length += sizeof(resp.tso_caps);
908 }
909 }
910
902 max_tso = MLX5_CAP_ETH(mdev, max_lso_cap);
903 if (max_tso) {
904 resp.tso_caps.max_tso = 1 << max_tso;
905 resp.tso_caps.supported_qpts |=
906 1 << IB_QPT_RAW_PACKET;
907 resp.response_length += sizeof(resp.tso_caps);
908 }
909 }
910
911 if (field_avail(typeof(resp), rss_caps, uhw_outlen)) {
911 if (offsetofend(typeof(resp), rss_caps) <= uhw_outlen) {
912 resp.rss_caps.rx_hash_function =
913 MLX5_RX_HASH_FUNC_TOEPLITZ;
914 resp.rss_caps.rx_hash_fields_mask =
915 MLX5_RX_HASH_SRC_IPV4 |
916 MLX5_RX_HASH_DST_IPV4 |
917 MLX5_RX_HASH_SRC_IPV6 |
918 MLX5_RX_HASH_DST_IPV6 |
919 MLX5_RX_HASH_SRC_PORT_TCP |
920 MLX5_RX_HASH_DST_PORT_TCP |
921 MLX5_RX_HASH_SRC_PORT_UDP |
922 MLX5_RX_HASH_DST_PORT_UDP |
923 MLX5_RX_HASH_INNER;
924 if (mlx5_accel_ipsec_device_caps(dev->mdev) &
925 MLX5_ACCEL_IPSEC_CAP_DEVICE)
926 resp.rss_caps.rx_hash_fields_mask |=
927 MLX5_RX_HASH_IPSEC_SPI;
928 resp.response_length += sizeof(resp.rss_caps);
929 }
930 } else {
912 resp.rss_caps.rx_hash_function =
913 MLX5_RX_HASH_FUNC_TOEPLITZ;
914 resp.rss_caps.rx_hash_fields_mask =
915 MLX5_RX_HASH_SRC_IPV4 |
916 MLX5_RX_HASH_DST_IPV4 |
917 MLX5_RX_HASH_SRC_IPV6 |
918 MLX5_RX_HASH_DST_IPV6 |
919 MLX5_RX_HASH_SRC_PORT_TCP |
920 MLX5_RX_HASH_DST_PORT_TCP |
921 MLX5_RX_HASH_SRC_PORT_UDP |
922 MLX5_RX_HASH_DST_PORT_UDP |
923 MLX5_RX_HASH_INNER;
924 if (mlx5_accel_ipsec_device_caps(dev->mdev) &
925 MLX5_ACCEL_IPSEC_CAP_DEVICE)
926 resp.rss_caps.rx_hash_fields_mask |=
927 MLX5_RX_HASH_IPSEC_SPI;
928 resp.response_length += sizeof(resp.rss_caps);
929 }
930 } else {
931 if (field_avail(typeof(resp), tso_caps, uhw_outlen))
931 if (offsetofend(typeof(resp), tso_caps) <= uhw_outlen)
932 resp.response_length += sizeof(resp.tso_caps);
932 resp.response_length += sizeof(resp.tso_caps);
933 if (field_avail(typeof(resp), rss_caps, uhw_outlen))
933 if (offsetofend(typeof(resp), rss_caps) <= uhw_outlen)
934 resp.response_length += sizeof(resp.rss_caps);
935 }
936
937 if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
938 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
939 props->device_cap_flags |= IB_DEVICE_UD_TSO;
940 }
941

--- 125 unchanged lines hidden (view full) ---

1067
1068 if (MLX5_CAP_GEN(dev->mdev, cq_moderation)) {
1069 props->cq_caps.max_cq_moderation_count =
1070 MLX5_MAX_CQ_COUNT;
1071 props->cq_caps.max_cq_moderation_period =
1072 MLX5_MAX_CQ_PERIOD;
1073 }
1074
934 resp.response_length += sizeof(resp.rss_caps);
935 }
936
937 if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
938 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
939 props->device_cap_flags |= IB_DEVICE_UD_TSO;
940 }
941

--- 125 unchanged lines hidden (view full) ---

1067
1068 if (MLX5_CAP_GEN(dev->mdev, cq_moderation)) {
1069 props->cq_caps.max_cq_moderation_count =
1070 MLX5_MAX_CQ_COUNT;
1071 props->cq_caps.max_cq_moderation_period =
1072 MLX5_MAX_CQ_PERIOD;
1073 }
1074
1075 if (field_avail(typeof(resp), cqe_comp_caps, uhw_outlen)) {
1075 if (offsetofend(typeof(resp), cqe_comp_caps) <= uhw_outlen) {
1076 resp.response_length += sizeof(resp.cqe_comp_caps);
1077
1078 if (MLX5_CAP_GEN(dev->mdev, cqe_compression)) {
1079 resp.cqe_comp_caps.max_num =
1080 MLX5_CAP_GEN(dev->mdev,
1081 cqe_compression_max_num);
1082
1083 resp.cqe_comp_caps.supported_format =
1084 MLX5_IB_CQE_RES_FORMAT_HASH |
1085 MLX5_IB_CQE_RES_FORMAT_CSUM;
1086
1087 if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index))
1088 resp.cqe_comp_caps.supported_format |=
1089 MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX;
1090 }
1091 }
1092
1076 resp.response_length += sizeof(resp.cqe_comp_caps);
1077
1078 if (MLX5_CAP_GEN(dev->mdev, cqe_compression)) {
1079 resp.cqe_comp_caps.max_num =
1080 MLX5_CAP_GEN(dev->mdev,
1081 cqe_compression_max_num);
1082
1083 resp.cqe_comp_caps.supported_format =
1084 MLX5_IB_CQE_RES_FORMAT_HASH |
1085 MLX5_IB_CQE_RES_FORMAT_CSUM;
1086
1087 if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index))
1088 resp.cqe_comp_caps.supported_format |=
1089 MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX;
1090 }
1091 }
1092
1093 if (field_avail(typeof(resp), packet_pacing_caps, uhw_outlen) &&
1093 if (offsetofend(typeof(resp), packet_pacing_caps) <= uhw_outlen &&
1094 raw_support) {
1095 if (MLX5_CAP_QOS(mdev, packet_pacing) &&
1096 MLX5_CAP_GEN(mdev, qos)) {
1097 resp.packet_pacing_caps.qp_rate_limit_max =
1098 MLX5_CAP_QOS(mdev, packet_pacing_max_rate);
1099 resp.packet_pacing_caps.qp_rate_limit_min =
1100 MLX5_CAP_QOS(mdev, packet_pacing_min_rate);
1101 resp.packet_pacing_caps.supported_qpts |=
1102 1 << IB_QPT_RAW_PACKET;
1103 if (MLX5_CAP_QOS(mdev, packet_pacing_burst_bound) &&
1104 MLX5_CAP_QOS(mdev, packet_pacing_typical_size))
1105 resp.packet_pacing_caps.cap_flags |=
1106 MLX5_IB_PP_SUPPORT_BURST;
1107 }
1108 resp.response_length += sizeof(resp.packet_pacing_caps);
1109 }
1110
1094 raw_support) {
1095 if (MLX5_CAP_QOS(mdev, packet_pacing) &&
1096 MLX5_CAP_GEN(mdev, qos)) {
1097 resp.packet_pacing_caps.qp_rate_limit_max =
1098 MLX5_CAP_QOS(mdev, packet_pacing_max_rate);
1099 resp.packet_pacing_caps.qp_rate_limit_min =
1100 MLX5_CAP_QOS(mdev, packet_pacing_min_rate);
1101 resp.packet_pacing_caps.supported_qpts |=
1102 1 << IB_QPT_RAW_PACKET;
1103 if (MLX5_CAP_QOS(mdev, packet_pacing_burst_bound) &&
1104 MLX5_CAP_QOS(mdev, packet_pacing_typical_size))
1105 resp.packet_pacing_caps.cap_flags |=
1106 MLX5_IB_PP_SUPPORT_BURST;
1107 }
1108 resp.response_length += sizeof(resp.packet_pacing_caps);
1109 }
1110
1111 if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes,
1112 uhw_outlen)) {
1111 if (offsetofend(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes) <=
1112 uhw_outlen) {
1113 if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe))
1114 resp.mlx5_ib_support_multi_pkt_send_wqes =
1115 MLX5_IB_ALLOW_MPW;
1116
1117 if (MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe))
1118 resp.mlx5_ib_support_multi_pkt_send_wqes |=
1119 MLX5_IB_SUPPORT_EMPW;
1120
1121 resp.response_length +=
1122 sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
1123 }
1124
1113 if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe))
1114 resp.mlx5_ib_support_multi_pkt_send_wqes =
1115 MLX5_IB_ALLOW_MPW;
1116
1117 if (MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe))
1118 resp.mlx5_ib_support_multi_pkt_send_wqes |=
1119 MLX5_IB_SUPPORT_EMPW;
1120
1121 resp.response_length +=
1122 sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
1123 }
1124
1125 if (field_avail(typeof(resp), flags, uhw_outlen)) {
1125 if (offsetofend(typeof(resp), flags) <= uhw_outlen) {
1126 resp.response_length += sizeof(resp.flags);
1127
1128 if (MLX5_CAP_GEN(mdev, cqe_compression_128))
1129 resp.flags |=
1130 MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP;
1131
1132 if (MLX5_CAP_GEN(mdev, cqe_128_always))
1133 resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD;
1134 if (MLX5_CAP_GEN(mdev, qp_packet_based))
1135 resp.flags |=
1136 MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE;
1137
1138 resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT;
1139 }
1140
1126 resp.response_length += sizeof(resp.flags);
1127
1128 if (MLX5_CAP_GEN(mdev, cqe_compression_128))
1129 resp.flags |=
1130 MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP;
1131
1132 if (MLX5_CAP_GEN(mdev, cqe_128_always))
1133 resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD;
1134 if (MLX5_CAP_GEN(mdev, qp_packet_based))
1135 resp.flags |=
1136 MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE;
1137
1138 resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT;
1139 }
1140
1141 if (field_avail(typeof(resp), sw_parsing_caps, uhw_outlen)) {
1141 if (offsetofend(typeof(resp), sw_parsing_caps) <= uhw_outlen) {
1142 resp.response_length += sizeof(resp.sw_parsing_caps);
1143 if (MLX5_CAP_ETH(mdev, swp)) {
1144 resp.sw_parsing_caps.sw_parsing_offloads |=
1145 MLX5_IB_SW_PARSING;
1146
1147 if (MLX5_CAP_ETH(mdev, swp_csum))
1148 resp.sw_parsing_caps.sw_parsing_offloads |=
1149 MLX5_IB_SW_PARSING_CSUM;
1150
1151 if (MLX5_CAP_ETH(mdev, swp_lso))
1152 resp.sw_parsing_caps.sw_parsing_offloads |=
1153 MLX5_IB_SW_PARSING_LSO;
1154
1155 if (resp.sw_parsing_caps.sw_parsing_offloads)
1156 resp.sw_parsing_caps.supported_qpts =
1157 BIT(IB_QPT_RAW_PACKET);
1158 }
1159 }
1160
1142 resp.response_length += sizeof(resp.sw_parsing_caps);
1143 if (MLX5_CAP_ETH(mdev, swp)) {
1144 resp.sw_parsing_caps.sw_parsing_offloads |=
1145 MLX5_IB_SW_PARSING;
1146
1147 if (MLX5_CAP_ETH(mdev, swp_csum))
1148 resp.sw_parsing_caps.sw_parsing_offloads |=
1149 MLX5_IB_SW_PARSING_CSUM;
1150
1151 if (MLX5_CAP_ETH(mdev, swp_lso))
1152 resp.sw_parsing_caps.sw_parsing_offloads |=
1153 MLX5_IB_SW_PARSING_LSO;
1154
1155 if (resp.sw_parsing_caps.sw_parsing_offloads)
1156 resp.sw_parsing_caps.supported_qpts =
1157 BIT(IB_QPT_RAW_PACKET);
1158 }
1159 }
1160
1161 if (field_avail(typeof(resp), striding_rq_caps, uhw_outlen) &&
1161 if (offsetofend(typeof(resp), striding_rq_caps) <= uhw_outlen &&
1162 raw_support) {
1163 resp.response_length += sizeof(resp.striding_rq_caps);
1164 if (MLX5_CAP_GEN(mdev, striding_rq)) {
1165 resp.striding_rq_caps.min_single_stride_log_num_of_bytes =
1166 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
1167 resp.striding_rq_caps.max_single_stride_log_num_of_bytes =
1168 MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES;
1169 if (MLX5_CAP_GEN(dev->mdev, ext_stride_num_range))

--- 6 unchanged lines hidden (view full) ---

1176 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
1177 resp.striding_rq_caps.max_single_wqe_log_num_of_strides =
1178 MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES;
1179 resp.striding_rq_caps.supported_qpts =
1180 BIT(IB_QPT_RAW_PACKET);
1181 }
1182 }
1183
1162 raw_support) {
1163 resp.response_length += sizeof(resp.striding_rq_caps);
1164 if (MLX5_CAP_GEN(mdev, striding_rq)) {
1165 resp.striding_rq_caps.min_single_stride_log_num_of_bytes =
1166 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
1167 resp.striding_rq_caps.max_single_stride_log_num_of_bytes =
1168 MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES;
1169 if (MLX5_CAP_GEN(dev->mdev, ext_stride_num_range))

--- 6 unchanged lines hidden (view full) ---

1176 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
1177 resp.striding_rq_caps.max_single_wqe_log_num_of_strides =
1178 MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES;
1179 resp.striding_rq_caps.supported_qpts =
1180 BIT(IB_QPT_RAW_PACKET);
1181 }
1182 }
1183
1184 if (field_avail(typeof(resp), tunnel_offloads_caps, uhw_outlen)) {
1184 if (offsetofend(typeof(resp), tunnel_offloads_caps) <= uhw_outlen) {
1185 resp.response_length += sizeof(resp.tunnel_offloads_caps);
1186 if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan))
1187 resp.tunnel_offloads_caps |=
1188 MLX5_IB_TUNNELED_OFFLOADS_VXLAN;
1189 if (MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx))
1190 resp.tunnel_offloads_caps |=
1191 MLX5_IB_TUNNELED_OFFLOADS_GENEVE;
1192 if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre))
1193 resp.tunnel_offloads_caps |=
1194 MLX5_IB_TUNNELED_OFFLOADS_GRE;
1185 resp.response_length += sizeof(resp.tunnel_offloads_caps);
1186 if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan))
1187 resp.tunnel_offloads_caps |=
1188 MLX5_IB_TUNNELED_OFFLOADS_VXLAN;
1189 if (MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx))
1190 resp.tunnel_offloads_caps |=
1191 MLX5_IB_TUNNELED_OFFLOADS_GENEVE;
1192 if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre))
1193 resp.tunnel_offloads_caps |=
1194 MLX5_IB_TUNNELED_OFFLOADS_GRE;
1195 if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
1196 MLX5_FLEX_PROTO_CW_MPLS_GRE)
1195 if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_gre))
1197 resp.tunnel_offloads_caps |=
1198 MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE;
1196 resp.tunnel_offloads_caps |=
1197 MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE;
1199 if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
1200 MLX5_FLEX_PROTO_CW_MPLS_UDP)
1198 if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_udp))
1201 resp.tunnel_offloads_caps |=
1202 MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP;
1203 }
1204
1205 if (uhw_outlen) {
1206 err = ib_copy_to_udata(uhw, &resp, resp.response_length);
1207
1208 if (err)

--- 687 unchanged lines hidden (view full) ---

1896 }
1897
1898 INIT_LIST_HEAD(&context->db_page_list);
1899 mutex_init(&context->db_page_mutex);
1900
1901 resp.tot_bfregs = req.total_num_bfregs;
1902 resp.num_ports = dev->num_ports;
1903
1199 resp.tunnel_offloads_caps |=
1200 MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP;
1201 }
1202
1203 if (uhw_outlen) {
1204 err = ib_copy_to_udata(uhw, &resp, resp.response_length);
1205
1206 if (err)

--- 687 unchanged lines hidden (view full) ---

1894 }
1895
1896 INIT_LIST_HEAD(&context->db_page_list);
1897 mutex_init(&context->db_page_mutex);
1898
1899 resp.tot_bfregs = req.total_num_bfregs;
1900 resp.num_ports = dev->num_ports;
1901
1904 if (field_avail(typeof(resp), cqe_version, udata->outlen))
1902 if (offsetofend(typeof(resp), cqe_version) <= udata->outlen)
1905 resp.response_length += sizeof(resp.cqe_version);
1906
1903 resp.response_length += sizeof(resp.cqe_version);
1904
1907 if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) {
1905 if (offsetofend(typeof(resp), cmds_supp_uhw) <= udata->outlen) {
1908 resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE |
1909 MLX5_USER_CMDS_SUPP_UHW_CREATE_AH;
1910 resp.response_length += sizeof(resp.cmds_supp_uhw);
1911 }
1912
1906 resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE |
1907 MLX5_USER_CMDS_SUPP_UHW_CREATE_AH;
1908 resp.response_length += sizeof(resp.cmds_supp_uhw);
1909 }
1910
1913 if (field_avail(typeof(resp), eth_min_inline, udata->outlen)) {
1911 if (offsetofend(typeof(resp), eth_min_inline) <= udata->outlen) {
1914 if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) {
1915 mlx5_query_min_inline(dev->mdev, &resp.eth_min_inline);
1916 resp.eth_min_inline++;
1917 }
1918 resp.response_length += sizeof(resp.eth_min_inline);
1919 }
1920
1912 if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) {
1913 mlx5_query_min_inline(dev->mdev, &resp.eth_min_inline);
1914 resp.eth_min_inline++;
1915 }
1916 resp.response_length += sizeof(resp.eth_min_inline);
1917 }
1918
1921 if (field_avail(typeof(resp), clock_info_versions, udata->outlen)) {
1919 if (offsetofend(typeof(resp), clock_info_versions) <= udata->outlen) {
1922 if (mdev->clock_info)
1923 resp.clock_info_versions = BIT(MLX5_IB_CLOCK_INFO_V1);
1924 resp.response_length += sizeof(resp.clock_info_versions);
1925 }
1926
1927 /*
1928 * We don't want to expose information from the PCI bar that is located
1929 * after 4096 bytes, so if the arch only supports larger pages, let's
1930 * pretend we don't support reading the HCA's core clock. This is also
1931 * forced by mmap function.
1932 */
1920 if (mdev->clock_info)
1921 resp.clock_info_versions = BIT(MLX5_IB_CLOCK_INFO_V1);
1922 resp.response_length += sizeof(resp.clock_info_versions);
1923 }
1924
1925 /*
1926 * We don't want to expose information from the PCI bar that is located
1927 * after 4096 bytes, so if the arch only supports larger pages, let's
1928 * pretend we don't support reading the HCA's core clock. This is also
1929 * forced by mmap function.
1930 */
1933 if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
1931 if (offsetofend(typeof(resp), hca_core_clock_offset) <= udata->outlen) {
1934 if (PAGE_SIZE <= 4096) {
1935 resp.comp_mask |=
1936 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
1937 resp.hca_core_clock_offset =
1938 offsetof(struct mlx5_init_seg, internal_timer_h) % PAGE_SIZE;
1939 }
1940 resp.response_length += sizeof(resp.hca_core_clock_offset);
1941 }
1942
1932 if (PAGE_SIZE <= 4096) {
1933 resp.comp_mask |=
1934 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
1935 resp.hca_core_clock_offset =
1936 offsetof(struct mlx5_init_seg, internal_timer_h) % PAGE_SIZE;
1937 }
1938 resp.response_length += sizeof(resp.hca_core_clock_offset);
1939 }
1940
1943 if (field_avail(typeof(resp), log_uar_size, udata->outlen))
1941 if (offsetofend(typeof(resp), log_uar_size) <= udata->outlen)
1944 resp.response_length += sizeof(resp.log_uar_size);
1945
1942 resp.response_length += sizeof(resp.log_uar_size);
1943
1946 if (field_avail(typeof(resp), num_uars_per_page, udata->outlen))
1944 if (offsetofend(typeof(resp), num_uars_per_page) <= udata->outlen)
1947 resp.response_length += sizeof(resp.num_uars_per_page);
1948
1945 resp.response_length += sizeof(resp.num_uars_per_page);
1946
1949 if (field_avail(typeof(resp), num_dyn_bfregs, udata->outlen)) {
1947 if (offsetofend(typeof(resp), num_dyn_bfregs) <= udata->outlen) {
1950 resp.num_dyn_bfregs = bfregi->num_dyn_bfregs;
1951 resp.response_length += sizeof(resp.num_dyn_bfregs);
1952 }
1953
1948 resp.num_dyn_bfregs = bfregi->num_dyn_bfregs;
1949 resp.response_length += sizeof(resp.num_dyn_bfregs);
1950 }
1951
1954 if (field_avail(typeof(resp), dump_fill_mkey, udata->outlen)) {
1952 if (offsetofend(typeof(resp), dump_fill_mkey) <= udata->outlen) {
1955 if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
1956 resp.dump_fill_mkey = dump_fill_mkey;
1957 resp.comp_mask |=
1958 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY;
1959 }
1960 resp.response_length += sizeof(resp.dump_fill_mkey);
1961 }
1962

--- 1602 unchanged lines hidden (view full) ---

3565 misc_parameters_2);
3566
3567 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
3568 mlx5_eswitch_get_vport_metadata_for_match(esw,
3569 rep->vport));
3570 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
3571 misc_parameters_2);
3572
1953 if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
1954 resp.dump_fill_mkey = dump_fill_mkey;
1955 resp.comp_mask |=
1956 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY;
1957 }
1958 resp.response_length += sizeof(resp.dump_fill_mkey);
1959 }
1960

--- 1602 unchanged lines hidden (view full) ---

3563 misc_parameters_2);
3564
3565 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
3566 mlx5_eswitch_get_vport_metadata_for_match(esw,
3567 rep->vport));
3568 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
3569 misc_parameters_2);
3570
3573 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
3574 mlx5_eswitch_get_vport_metadata_mask());
3571 MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0);
3575 } else {
3576 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
3577 misc_parameters);
3578
3579 MLX5_SET(fte_match_set_misc, misc, source_port, rep->vport);
3580
3581 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
3582 misc_parameters);

--- 2664 unchanged lines hidden (view full) ---

6247 UVERBS_OBJECT_FLOW_ACTION,
6248 UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
6249 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
6250 enum mlx5_ib_uapi_flow_action_flags));
6251
6252static const struct uapi_definition mlx5_ib_defs[] = {
6253 UAPI_DEF_CHAIN(mlx5_ib_devx_defs),
6254 UAPI_DEF_CHAIN(mlx5_ib_flow_defs),
3572 } else {
3573 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
3574 misc_parameters);
3575
3576 MLX5_SET(fte_match_set_misc, misc, source_port, rep->vport);
3577
3578 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
3579 misc_parameters);

--- 2664 unchanged lines hidden (view full) ---

6244 UVERBS_OBJECT_FLOW_ACTION,
6245 UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
6246 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
6247 enum mlx5_ib_uapi_flow_action_flags));
6248
6249static const struct uapi_definition mlx5_ib_defs[] = {
6250 UAPI_DEF_CHAIN(mlx5_ib_devx_defs),
6251 UAPI_DEF_CHAIN(mlx5_ib_flow_defs),
6252 UAPI_DEF_CHAIN(mlx5_ib_qos_defs),
6255
6256 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
6257 &mlx5_ib_flow_action),
6258 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DM, &mlx5_ib_dm),
6259 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_VAR,
6260 UAPI_DEF_IS_OBJ_SUPPORTED(var_is_supported)),
6261 {}
6262};

--- 278 unchanged lines hidden (view full) ---

6541 log_doorbell_bar_size);
6542 log_doorbell_stride = MLX5_CAP_DEV_VDPA_EMULATION(mdev,
6543 log_doorbell_stride);
6544 var_table->hw_start_addr = dev->mdev->bar_addr +
6545 MLX5_CAP64_DEV_VDPA_EMULATION(mdev,
6546 doorbell_bar_offset);
6547 bar_size = (1ULL << log_doorbell_bar_size) * 4096;
6548 var_table->stride_size = 1ULL << log_doorbell_stride;
6253
6254 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
6255 &mlx5_ib_flow_action),
6256 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DM, &mlx5_ib_dm),
6257 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_VAR,
6258 UAPI_DEF_IS_OBJ_SUPPORTED(var_is_supported)),
6259 {}
6260};

--- 278 unchanged lines hidden (view full) ---

6539 log_doorbell_bar_size);
6540 log_doorbell_stride = MLX5_CAP_DEV_VDPA_EMULATION(mdev,
6541 log_doorbell_stride);
6542 var_table->hw_start_addr = dev->mdev->bar_addr +
6543 MLX5_CAP64_DEV_VDPA_EMULATION(mdev,
6544 doorbell_bar_offset);
6545 bar_size = (1ULL << log_doorbell_bar_size) * 4096;
6546 var_table->stride_size = 1ULL << log_doorbell_stride;
6549 var_table->num_var_hw_entries = div64_u64(bar_size, var_table->stride_size);
6547 var_table->num_var_hw_entries = div_u64(bar_size,
6548 var_table->stride_size);
6550 mutex_init(&var_table->bitmap_lock);
6551 var_table->bitmap = bitmap_zalloc(var_table->num_var_hw_entries,
6552 GFP_KERNEL);
6553 return (var_table->bitmap) ? 0 : -ENOMEM;
6554}
6555
6556static void mlx5_ib_stage_caps_cleanup(struct mlx5_ib_dev *dev)
6557{

--- 515 unchanged lines hidden (view full) ---

7073 mlx5_ib_stage_dev_res_init,
7074 mlx5_ib_stage_dev_res_cleanup),
7075 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
7076 mlx5_ib_stage_dev_notifier_init,
7077 mlx5_ib_stage_dev_notifier_cleanup),
7078 STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
7079 mlx5_ib_stage_counters_init,
7080 mlx5_ib_stage_counters_cleanup),
6549 mutex_init(&var_table->bitmap_lock);
6550 var_table->bitmap = bitmap_zalloc(var_table->num_var_hw_entries,
6551 GFP_KERNEL);
6552 return (var_table->bitmap) ? 0 : -ENOMEM;
6553}
6554
6555static void mlx5_ib_stage_caps_cleanup(struct mlx5_ib_dev *dev)
6556{

--- 515 unchanged lines hidden (view full) ---

7072 mlx5_ib_stage_dev_res_init,
7073 mlx5_ib_stage_dev_res_cleanup),
7074 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
7075 mlx5_ib_stage_dev_notifier_init,
7076 mlx5_ib_stage_dev_notifier_cleanup),
7077 STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
7078 mlx5_ib_stage_counters_init,
7079 mlx5_ib_stage_counters_cleanup),
7080 STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
7081 mlx5_ib_stage_cong_debugfs_init,
7082 mlx5_ib_stage_cong_debugfs_cleanup),
7081 STAGE_CREATE(MLX5_IB_STAGE_UAR,
7082 mlx5_ib_stage_uar_init,
7083 mlx5_ib_stage_uar_cleanup),
7084 STAGE_CREATE(MLX5_IB_STAGE_BFREG,
7085 mlx5_ib_stage_bfrag_init,
7086 mlx5_ib_stage_bfrag_cleanup),
7087 STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
7088 NULL,

--- 174 unchanged lines hidden ---
7083 STAGE_CREATE(MLX5_IB_STAGE_UAR,
7084 mlx5_ib_stage_uar_init,
7085 mlx5_ib_stage_uar_cleanup),
7086 STAGE_CREATE(MLX5_IB_STAGE_BFREG,
7087 mlx5_ib_stage_bfrag_init,
7088 mlx5_ib_stage_bfrag_cleanup),
7089 STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
7090 NULL,

--- 174 unchanged lines hidden ---