1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) 2 // Copyright (c) 2019 Hisilicon Limited. 3 4 #include <rdma/rdma_cm.h> 5 #include <rdma/restrack.h> 6 #include <uapi/rdma/rdma_netlink.h> 7 #include "hnae3.h" 8 #include "hns_roce_common.h" 9 #include "hns_roce_device.h" 10 #include "hns_roce_hw_v2.h" 11 12 #define MAX_ENTRY_NUM 256 13 14 int hns_roce_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq) 15 { 16 struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); 17 struct nlattr *table_attr; 18 19 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER); 20 if (!table_attr) 21 return -EMSGSIZE; 22 23 if (rdma_nl_put_driver_u32(msg, "cq_depth", hr_cq->cq_depth)) 24 goto err; 25 26 if (rdma_nl_put_driver_u32(msg, "cons_index", hr_cq->cons_index)) 27 goto err; 28 29 if (rdma_nl_put_driver_u32(msg, "cqe_size", hr_cq->cqe_size)) 30 goto err; 31 32 if (rdma_nl_put_driver_u32(msg, "arm_sn", hr_cq->arm_sn)) 33 goto err; 34 35 nla_nest_end(msg, table_attr); 36 37 return 0; 38 39 err: 40 nla_nest_cancel(msg, table_attr); 41 42 return -EMSGSIZE; 43 } 44 45 int hns_roce_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq) 46 { 47 struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device); 48 struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); 49 struct hns_roce_v2_cq_context context; 50 u32 data[MAX_ENTRY_NUM] = {}; 51 int offset = 0; 52 int ret; 53 54 if (!hr_dev->hw->query_cqc) 55 return -EINVAL; 56 57 ret = hr_dev->hw->query_cqc(hr_dev, hr_cq->cqn, &context); 58 if (ret) 59 return -EINVAL; 60 61 data[offset++] = hr_reg_read(&context, CQC_CQ_ST); 62 data[offset++] = hr_reg_read(&context, CQC_SHIFT); 63 data[offset++] = hr_reg_read(&context, CQC_CQE_SIZE); 64 data[offset++] = hr_reg_read(&context, CQC_CQE_CNT); 65 data[offset++] = hr_reg_read(&context, CQC_CQ_PRODUCER_IDX); 66 data[offset++] = hr_reg_read(&context, CQC_CQ_CONSUMER_IDX); 67 data[offset++] = hr_reg_read(&context, CQC_DB_RECORD_EN); 68 data[offset++] = hr_reg_read(&context, CQC_ARM_ST); 69 data[offset++] = hr_reg_read(&context, CQC_CMD_SN); 70 data[offset++] = hr_reg_read(&context, CQC_CEQN); 71 data[offset++] = hr_reg_read(&context, CQC_CQ_MAX_CNT); 72 data[offset++] = hr_reg_read(&context, CQC_CQ_PERIOD); 73 data[offset++] = hr_reg_read(&context, CQC_CQE_HOP_NUM); 74 data[offset++] = hr_reg_read(&context, CQC_CQE_BAR_PG_SZ); 75 data[offset++] = hr_reg_read(&context, CQC_CQE_BUF_PG_SZ); 76 77 ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, offset * sizeof(u32), data); 78 79 return ret; 80 } 81 82 int hns_roce_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ib_qp) 83 { 84 struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp); 85 struct nlattr *table_attr; 86 87 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER); 88 if (!table_attr) 89 return -EMSGSIZE; 90 91 if (rdma_nl_put_driver_u32_hex(msg, "sq_wqe_cnt", hr_qp->sq.wqe_cnt)) 92 goto err; 93 94 if (rdma_nl_put_driver_u32_hex(msg, "sq_max_gs", hr_qp->sq.max_gs)) 95 goto err; 96 97 if (rdma_nl_put_driver_u32_hex(msg, "rq_wqe_cnt", hr_qp->rq.wqe_cnt)) 98 goto err; 99 100 if (rdma_nl_put_driver_u32_hex(msg, "rq_max_gs", hr_qp->rq.max_gs)) 101 goto err; 102 103 if (rdma_nl_put_driver_u32_hex(msg, "ext_sge_sge_cnt", hr_qp->sge.sge_cnt)) 104 goto err; 105 106 nla_nest_end(msg, table_attr); 107 108 return 0; 109 110 err: 111 nla_nest_cancel(msg, table_attr); 112 113 return -EMSGSIZE; 114 } 115 116 int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp) 117 { 118 struct hns_roce_dev *hr_dev = to_hr_dev(ib_qp->device); 119 struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp); 120 struct hns_roce_v2_qp_context context; 121 u32 data[MAX_ENTRY_NUM] = {}; 122 int offset = 0; 123 int ret; 124 125 if (!hr_dev->hw->query_qpc) 126 return -EINVAL; 127 128 ret = hr_dev->hw->query_qpc(hr_dev, hr_qp->qpn, &context); 129 if (ret) 130 return -EINVAL; 131 132 data[offset++] = hr_reg_read(&context, QPC_QP_ST); 133 data[offset++] = hr_reg_read(&context, QPC_ERR_TYPE); 134 data[offset++] = hr_reg_read(&context, QPC_CHECK_FLG); 135 data[offset++] = hr_reg_read(&context, QPC_SRQ_EN); 136 data[offset++] = hr_reg_read(&context, QPC_SRQN); 137 data[offset++] = hr_reg_read(&context, QPC_QKEY_XRCD); 138 data[offset++] = hr_reg_read(&context, QPC_TX_CQN); 139 data[offset++] = hr_reg_read(&context, QPC_RX_CQN); 140 data[offset++] = hr_reg_read(&context, QPC_SQ_PRODUCER_IDX); 141 data[offset++] = hr_reg_read(&context, QPC_SQ_CONSUMER_IDX); 142 data[offset++] = hr_reg_read(&context, QPC_RQ_RECORD_EN); 143 data[offset++] = hr_reg_read(&context, QPC_RQ_PRODUCER_IDX); 144 data[offset++] = hr_reg_read(&context, QPC_RQ_CONSUMER_IDX); 145 data[offset++] = hr_reg_read(&context, QPC_SQ_SHIFT); 146 data[offset++] = hr_reg_read(&context, QPC_RQWS); 147 data[offset++] = hr_reg_read(&context, QPC_RQ_SHIFT); 148 data[offset++] = hr_reg_read(&context, QPC_SGE_SHIFT); 149 data[offset++] = hr_reg_read(&context, QPC_SQ_HOP_NUM); 150 data[offset++] = hr_reg_read(&context, QPC_RQ_HOP_NUM); 151 data[offset++] = hr_reg_read(&context, QPC_SGE_HOP_NUM); 152 data[offset++] = hr_reg_read(&context, QPC_WQE_SGE_BA_PG_SZ); 153 data[offset++] = hr_reg_read(&context, QPC_WQE_SGE_BUF_PG_SZ); 154 data[offset++] = hr_reg_read(&context, QPC_RETRY_NUM_INIT); 155 data[offset++] = hr_reg_read(&context, QPC_RETRY_CNT); 156 data[offset++] = hr_reg_read(&context, QPC_SQ_CUR_PSN); 157 data[offset++] = hr_reg_read(&context, QPC_SQ_MAX_PSN); 158 data[offset++] = hr_reg_read(&context, QPC_SQ_FLUSH_IDX); 159 data[offset++] = hr_reg_read(&context, QPC_SQ_MAX_IDX); 160 data[offset++] = hr_reg_read(&context, QPC_SQ_TX_ERR); 161 data[offset++] = hr_reg_read(&context, QPC_SQ_RX_ERR); 162 data[offset++] = hr_reg_read(&context, QPC_RQ_RX_ERR); 163 data[offset++] = hr_reg_read(&context, QPC_RQ_TX_ERR); 164 data[offset++] = hr_reg_read(&context, QPC_RQ_CQE_IDX); 165 data[offset++] = hr_reg_read(&context, QPC_RQ_RTY_TX_ERR); 166 167 ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, offset * sizeof(u32), data); 168 169 return ret; 170 } 171 172 int hns_roce_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr) 173 { 174 struct hns_roce_mr *hr_mr = to_hr_mr(ib_mr); 175 struct nlattr *table_attr; 176 177 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER); 178 if (!table_attr) 179 return -EMSGSIZE; 180 181 if (rdma_nl_put_driver_u32_hex(msg, "pbl_hop_num", hr_mr->pbl_hop_num)) 182 goto err; 183 184 if (rdma_nl_put_driver_u32_hex(msg, "ba_pg_shift", 185 hr_mr->pbl_mtr.hem_cfg.ba_pg_shift)) 186 goto err; 187 188 if (rdma_nl_put_driver_u32_hex(msg, "buf_pg_shift", 189 hr_mr->pbl_mtr.hem_cfg.buf_pg_shift)) 190 goto err; 191 192 nla_nest_end(msg, table_attr); 193 194 return 0; 195 196 err: 197 nla_nest_cancel(msg, table_attr); 198 199 return -EMSGSIZE; 200 } 201 202 int hns_roce_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr) 203 { 204 struct hns_roce_dev *hr_dev = to_hr_dev(ib_mr->device); 205 struct hns_roce_mr *hr_mr = to_hr_mr(ib_mr); 206 struct hns_roce_v2_mpt_entry context; 207 u32 data[MAX_ENTRY_NUM] = {}; 208 int offset = 0; 209 int ret; 210 211 if (!hr_dev->hw->query_mpt) 212 return -EINVAL; 213 214 ret = hr_dev->hw->query_mpt(hr_dev, hr_mr->key, &context); 215 if (ret) 216 return -EINVAL; 217 218 data[offset++] = hr_reg_read(&context, MPT_ST); 219 data[offset++] = hr_reg_read(&context, MPT_PD); 220 data[offset++] = hr_reg_read(&context, MPT_LKEY); 221 data[offset++] = hr_reg_read(&context, MPT_LEN_L); 222 data[offset++] = hr_reg_read(&context, MPT_LEN_H); 223 data[offset++] = hr_reg_read(&context, MPT_PBL_SIZE); 224 data[offset++] = hr_reg_read(&context, MPT_PBL_HOP_NUM); 225 data[offset++] = hr_reg_read(&context, MPT_PBL_BA_PG_SZ); 226 data[offset++] = hr_reg_read(&context, MPT_PBL_BUF_PG_SZ); 227 228 ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, offset * sizeof(u32), data); 229 230 return ret; 231 } 232