Lines Matching refs:mdev

326 	if (!MLX5_CAP_GEN(dev->mdev, qpc_extension) ||  in mlx5_ib_qp_err_syndrome()
327 !MLX5_CAP_GEN(dev->mdev, qp_error_syndrome)) in mlx5_ib_qp_err_syndrome()
440 if (cap->max_recv_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) in set_rq_size()
475 if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq)) { in set_rq_size()
478 MLX5_CAP_GEN(dev->mdev, in set_rq_size()
605 if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) { in calc_sq_size()
607 wqe_size, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)); in calc_sq_size()
617 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) { in calc_sq_size()
621 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)); in calc_sq_size()
644 if (desc_sz > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) { in set_user_buf_size()
646 desc_sz, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)); in set_user_buf_size()
658 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) { in set_user_buf_size()
661 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)); in set_user_buf_size()
1094 mlx5_db_free(dev->mdev, &qp->db); in destroy_qp()
1096 mlx5_frag_buf_free(dev->mdev, &qp->buf); in destroy_qp()
1118 qp->bf.buf_size = (1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size)) / 2; in _create_kernel_qp()
1131 err = mlx5_frag_buf_alloc_node(dev->mdev, base->ubuffer.buf_size, in _create_kernel_qp()
1132 &qp->buf, dev->mdev->priv.numa_node); in _create_kernel_qp()
1164 MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(dev->mdev)); in _create_kernel_qp()
1178 err = mlx5_db_alloc(dev->mdev, &qp->db); in _create_kernel_qp()
1209 mlx5_db_free(dev->mdev, &qp->db); in _create_kernel_qp()
1215 mlx5_frag_buf_free(dev->mdev, &qp->buf); in _create_kernel_qp()
1241 mlx5_lag_is_lacp_owner(dev->mdev)) in create_raw_packet_qp_tis()
1246 return mlx5_core_create_tis(dev->mdev, in, &sq->tisn); in create_raw_packet_qp_tis()
1252 mlx5_cmd_destroy_tis(dev->mdev, sq->tisn, to_mpd(pd)->uid); in destroy_raw_packet_qp_tis()
1293 u8 ts_cap = MLX5_CAP_GEN(dev->mdev, rq_ts_format); in get_rq_ts_format()
1301 u8 ts_cap = MLX5_CAP_GEN(dev->mdev, sq_ts_format); in get_sq_ts_format()
1310 u8 ts_cap = MLX5_CAP_ROCE(dev->mdev, qp_ts_format); in get_qp_ts_format()
1382 if (MLX5_CAP_ETH(dev->mdev, multi_pkt_send_wqe)) in create_raw_packet_qp_sq()
1390 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && in create_raw_packet_qp_sq()
1391 MLX5_CAP_ETH(dev->mdev, swp)) in create_raw_packet_qp_sq()
1516 mlx5_cmd_destroy_tir(dev->mdev, rq->tirn, to_mpd(pd)->uid); in destroy_raw_packet_qp_tir()
1556 err = mlx5_cmd_exec_inout(dev->mdev, create_tir, in, out); in create_raw_packet_qp_tir()
1630 if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner) || in create_raw_packet_qp()
1631 MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner_v2)) { in create_raw_packet_qp()
1699 mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn, in destroy_rss_raw_qp_tir()
1876 err = mlx5_cmd_exec_inout(dev->mdev, create_tir, in, out); in create_rss_raw_qp_tir()
1883 mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn, in create_rss_raw_qp_tir()
1893 if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner) || in create_rss_raw_qp_tir()
1894 MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner_v2)) { in create_rss_raw_qp_tir()
1941 MLX5_CAP_GEN(dev->mdev, dc_req_scat_data_cqe)) in configure_requester_scat_cqe()
1967 u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations); in get_atomic_mode()
1968 u8 atomic = MLX5_CAP_GEN(dev->mdev, atomic); in get_atomic_mode()
1976 atomic_size_mask = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_dc); in get_atomic_mode()
1978 atomic_size_mask = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp); in get_atomic_mode()
2000 struct mlx5_core_dev *mdev = dev->mdev; in create_xrc_tgt_qp() local
2029 MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(dev->mdev)); in create_xrc_tgt_qp()
2039 if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1) in create_xrc_tgt_qp()
2057 if (MLX5_CAP_GEN(mdev, ece_support)) in create_xrc_tgt_qp()
2079 struct mlx5_core_dev *mdev = dev->mdev; in create_dci() local
2113 if (ucmd->sq_wqe_count > (1 << MLX5_CAP_GEN(mdev, log_max_qp_sz))) in create_dci()
2127 if (MLX5_CAP_GEN(mdev, ece_support)) in create_dci()
2184 if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1) in create_dci()
2202 if (MLX5_CAP_GEN(mdev, ece_support)) in create_dci()
2240 struct mlx5_core_dev *mdev = dev->mdev; in create_user_qp() local
2280 if (ucmd->sq_wqe_count > (1 << MLX5_CAP_GEN(mdev, log_max_qp_sz))) in create_user_qp()
2298 if (MLX5_CAP_GEN(mdev, ece_support)) in create_user_qp()
2379 if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1) in create_user_qp()
2405 if (MLX5_CAP_GEN(mdev, ece_support)) in create_user_qp()
2441 struct mlx5_core_dev *mdev = dev->mdev; in create_kernel_qp() local
2522 if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1) in create_kernel_qp()
2530 MLX5_CAP_GEN(mdev, go_back_n)) in create_kernel_qp()
2738 if (mlx5_lag_is_active(dev->mdev) && !MLX5_CAP_GEN(dev->mdev, lag_dct)) in create_dct()
2752 if (MLX5_CAP_GEN(dev->mdev, ece_support)) in create_dct()
2769 if (attr->qp_type == IB_QPT_DRIVER && !MLX5_CAP_GEN(dev->mdev, dct)) in check_qp_type()
2775 if (!MLX5_CAP_GEN(dev->mdev, xrc)) in check_qp_type()
2875 struct mlx5_core_dev *mdev = dev->mdev; in process_vendor_flags() local
2904 MLX5_CAP_GEN(mdev, log_max_dci_stream_channels), in process_vendor_flags()
2909 MLX5_CAP_GEN(mdev, sctr_data_cqe), qp); in process_vendor_flags()
2911 MLX5_CAP_GEN(mdev, sctr_data_cqe), qp); in process_vendor_flags()
2914 cond = MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) || in process_vendor_flags()
2915 MLX5_CAP_ETH(mdev, tunnel_stateless_gre) || in process_vendor_flags()
2916 MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx); in process_vendor_flags()
2930 MLX5_CAP_GEN(mdev, qp_packet_based), qp); in process_vendor_flags()
2977 struct mlx5_core_dev *mdev = dev->mdev; in process_create_flags() local
2988 mlx5_get_flow_namespace(dev->mdev, in process_create_flags()
2993 MLX5_CAP_GEN(mdev, sho), qp); in process_create_flags()
2996 MLX5_CAP_GEN(mdev, block_lb_mc), qp); in process_create_flags()
2998 MLX5_CAP_GEN(mdev, cd), qp); in process_create_flags()
3000 MLX5_CAP_GEN(mdev, cd), qp); in process_create_flags()
3002 MLX5_CAP_GEN(mdev, cd), qp); in process_create_flags()
3007 MLX5_CAP_GEN(mdev, ipoib_basic_offloads), in process_create_flags()
3009 cond = MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_IB; in process_create_flags()
3015 cond = MLX5_CAP_GEN(mdev, eth_net_offloads) && in process_create_flags()
3016 MLX5_CAP_ETH(mdev, scatter_fcs); in process_create_flags()
3020 cond = MLX5_CAP_GEN(mdev, eth_net_offloads) && in process_create_flags()
3021 MLX5_CAP_ETH(mdev, vlan_cap); in process_create_flags()
3028 MLX5_CAP_GEN(mdev, end_pad), qp); in process_create_flags()
3442 stat_rate_support = MLX5_CAP_GEN(dev->mdev, stat_rate_support); in ib_rate_to_mlx5()
3592 return modify_raw_packet_eth_prio(dev->mdev, in mlx5_set_path()
3801 if (MLX5_CAP_GEN(dev->mdev, modify_rq_counter_set_id)) { in modify_raw_packet_qp_rq()
3811 err = mlx5_core_modify_rq(dev->mdev, rq->base.mqp.qpn, in); in modify_raw_packet_qp_rq()
3954 err = modify_raw_packet_tx_affinity(dev->mdev, sq, in modify_raw_packet_qp()
3966 err = modify_raw_packet_qp_sq(dev->mdev, sq, sq_state, in modify_raw_packet_qp()
3990 u8 port_num = mlx5_core_native_port_num(dev->mdev) - 1; in get_tx_affinity_rr()
3999 (dev->lag_active ? dev->lag_ports : MLX5_CAP_GEN(dev->mdev, num_lag_ports)) + 1; in get_tx_affinity_rr()
4034 mlx5_lag_get_slave_port(dev->mdev, attr->xmit_slave); in get_tx_affinity()
4049 struct mlx5_core_dev *mdev) in __mlx5_ib_qp_set_raw_qp_counter() argument
4069 return mlx5_core_modify_rq(mdev, rq->base.mqp.qpn, in); in __mlx5_ib_qp_set_raw_qp_counter()
4088 return __mlx5_ib_qp_set_raw_qp_counter(mqp, set_id, dev->mdev); in __mlx5_ib_qp_set_counter()
4099 return mlx5_cmd_exec_in(dev->mdev, rts2rts_qp, in); in __mlx5_ib_qp_set_counter()
4195 MLX5_CAP_GEN(dev->mdev, init2_lag_tx_port_affinity)) in __mlx5_ib_modify_qp()
4215 MLX5_CAP_GEN(dev->mdev, log_max_msg)); in __mlx5_ib_modify_qp()
4354 MLX5_CAP_QOS(dev->mdev, packet_pacing_burst_bound)) { in __mlx5_ib_modify_qp()
4365 MLX5_CAP_QOS(dev->mdev, packet_pacing_typical_size)) { in __mlx5_ib_modify_qp()
4382 MLX5_CAP_GEN(dev->mdev, ece_support) ? in __mlx5_ib_modify_qp()
4509 if (MLX5_CAP_GEN(dev->mdev, ece_support) && ucmd->ece_options) in mlx5_ib_modify_dct()
4546 if (mlx5_lag_is_active(dev->mdev)) in mlx5_ib_modify_dct()
4584 err = mlx5_cmd_check(dev->mdev, err, qp->dct.in, out); in mlx5_ib_modify_dct()
4588 if (MLX5_CAP_GEN(dev->mdev, ece_support)) in mlx5_ib_modify_dct()
4627 log_max_ra_res = 1 << MLX5_CAP_GEN(dev->mdev, in validate_rd_atomic()
4629 log_max_ra_req = 1 << MLX5_CAP_GEN(dev->mdev, in validate_rd_atomic()
4632 log_max_ra_res = 1 << MLX5_CAP_GEN(dev->mdev, in validate_rd_atomic()
4634 log_max_ra_req = 1 << MLX5_CAP_GEN(dev->mdev, in validate_rd_atomic()
4825 err = mlx5_core_query_sq_state(dev->mdev, sq->base.mqp.qpn, sq_state); in query_raw_packet_qp_sq_state()
4848 err = mlx5_core_query_rq(dev->mdev, rq->base.mqp.qpn, out); in query_raw_packet_qp_rq_state()
5137 if (!MLX5_CAP_GEN(dev->mdev, xrc)) in mlx5_ib_alloc_xrcd()
5140 return mlx5_cmd_xrcd_alloc(dev->mdev, &xrcd->xrcdn, 0); in mlx5_ib_alloc_xrcd()
5148 return mlx5_cmd_xrcd_dealloc(dev->mdev, xrcdn, 0); in mlx5_ib_dealloc_xrcd()
5232 if (!MLX5_CAP_GEN(dev->mdev, end_pad)) { in create_rq()
5264 has_net_offloads = MLX5_CAP_GEN(dev->mdev, eth_net_offloads); in create_rq()
5266 if (!(has_net_offloads && MLX5_CAP_ETH(dev->mdev, vlan_cap))) { in create_rq()
5275 if (!(has_net_offloads && MLX5_CAP_ETH(dev->mdev, scatter_fcs))) { in create_rq()
5315 if (wq_init_attr->max_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_wq_sz))) in set_user_rq_size()
5337 if (!MLX5_CAP_GEN(dev->mdev, ext_stride_num_range) && in log_of_strides_valid()
5377 if (!MLX5_CAP_GEN(dev->mdev, striding_rq)) { in prepare_user_rq()
5397 MLX5_CAP_GEN(dev->mdev, ext_stride_num_range) ? in prepare_user_rq()
5525 MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)) { in mlx5_ib_create_rwq_ind_table()
5528 MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)); in mlx5_ib_create_rwq_ind_table()
5553 err = mlx5_core_create_rqt(dev->mdev, in, inlen, &rwq_ind_tbl->rqtn); in mlx5_ib_create_rwq_ind_table()
5571 mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid); in mlx5_ib_create_rwq_ind_table()
5580 return mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid); in mlx5_ib_destroy_rwq_ind_table()
5631 if (!(MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && in mlx5_ib_modify_wq()
5632 MLX5_CAP_ETH(dev->mdev, vlan_cap))) { in mlx5_ib_modify_wq()
5654 if (MLX5_CAP_GEN(dev->mdev, modify_rq_counter_set_id)) { in mlx5_ib_modify_wq()
5664 err = mlx5_core_modify_rq(dev->mdev, rwq->core_qp.qpn, in); in mlx5_ib_modify_wq()
5692 struct mlx5_core_dev *mdev = dev->mdev; in handle_drain_completion() local
5700 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { in handle_drain_completion()
5752 struct mlx5_core_dev *mdev = dev->mdev; in mlx5_ib_drain_sq() local
5755 if (ret && mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) { in mlx5_ib_drain_sq()
5781 struct mlx5_core_dev *mdev = dev->mdev; in mlx5_ib_drain_rq() local
5784 if (ret && mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) { in mlx5_ib_drain_rq()
5818 if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id)) { in mlx5_ib_qp_set_counter()