1 /* 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <rdma/ib_umem.h> 34 #include <rdma/ib_umem_odp.h> 35 #include "mlx5_ib.h" 36 #include <linux/jiffies.h> 37 38 /* 39 * Fill in a physical address list. ib_umem_num_dma_blocks() entries will be 40 * filled in the pas array. 41 */ 42 void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas, 43 u64 access_flags) 44 { 45 struct ib_block_iter biter; 46 47 rdma_umem_for_each_dma_block (umem, &biter, page_size) { 48 *pas = cpu_to_be64(rdma_block_iter_dma_address(&biter) | 49 access_flags); 50 pas++; 51 } 52 } 53 54 /* 55 * Compute the page shift and page_offset for mailboxes that use a quantized 56 * page_offset. The granulatity of the page offset scales according to page 57 * size. 58 */ 59 unsigned long __mlx5_umem_find_best_quantized_pgoff( 60 struct ib_umem *umem, unsigned long pgsz_bitmap, 61 unsigned int page_offset_bits, u64 pgoff_bitmask, unsigned int scale, 62 unsigned int *page_offset_quantized) 63 { 64 const u64 page_offset_mask = (1UL << page_offset_bits) - 1; 65 unsigned long page_size; 66 u64 page_offset; 67 68 page_size = ib_umem_find_best_pgoff(umem, pgsz_bitmap, pgoff_bitmask); 69 if (!page_size) 70 return 0; 71 72 /* 73 * page size is the largest possible page size. 74 * 75 * Reduce the page_size, and thus the page_offset and quanta, until the 76 * page_offset fits into the mailbox field. Once page_size < scale this 77 * loop is guaranteed to terminate. 78 */ 79 page_offset = ib_umem_dma_offset(umem, page_size); 80 while (page_offset & ~(u64)(page_offset_mask * (page_size / scale))) { 81 page_size /= 2; 82 page_offset = ib_umem_dma_offset(umem, page_size); 83 } 84 85 /* 86 * The address is not aligned, or otherwise cannot be represented by the 87 * page_offset. 88 */ 89 if (!(pgsz_bitmap & page_size)) 90 return 0; 91 92 *page_offset_quantized = 93 (unsigned long)page_offset / (page_size / scale); 94 if (WARN_ON(*page_offset_quantized > page_offset_mask)) 95 return 0; 96 return page_size; 97 } 98 99 #define WR_ID_BF 0xBF 100 #define WR_ID_END 0xBAD 101 #define TEST_WC_NUM_WQES 255 102 #define TEST_WC_POLLING_MAX_TIME_JIFFIES msecs_to_jiffies(100) 103 static int post_send_nop(struct mlx5_ib_dev *dev, struct ib_qp *ibqp, u64 wr_id, 104 bool signaled) 105 { 106 struct mlx5_ib_qp *qp = to_mqp(ibqp); 107 struct mlx5_wqe_ctrl_seg *ctrl; 108 struct mlx5_bf *bf = &qp->bf; 109 __be32 mmio_wqe[16] = {}; 110 unsigned long flags; 111 unsigned int idx; 112 int i; 113 114 if (unlikely(dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)) 115 return -EIO; 116 117 spin_lock_irqsave(&qp->sq.lock, flags); 118 119 idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1); 120 ctrl = mlx5_frag_buf_get_wqe(&qp->sq.fbc, idx); 121 122 memset(ctrl, 0, sizeof(struct mlx5_wqe_ctrl_seg)); 123 ctrl->fm_ce_se = signaled ? MLX5_WQE_CTRL_CQ_UPDATE : 0; 124 ctrl->opmod_idx_opcode = 125 cpu_to_be32(((u32)(qp->sq.cur_post) << 8) | MLX5_OPCODE_NOP); 126 ctrl->qpn_ds = cpu_to_be32((sizeof(struct mlx5_wqe_ctrl_seg) / 16) | 127 (qp->trans_qp.base.mqp.qpn << 8)); 128 129 qp->sq.wrid[idx] = wr_id; 130 qp->sq.w_list[idx].opcode = MLX5_OPCODE_NOP; 131 qp->sq.wqe_head[idx] = qp->sq.head + 1; 132 qp->sq.cur_post += DIV_ROUND_UP(sizeof(struct mlx5_wqe_ctrl_seg), 133 MLX5_SEND_WQE_BB); 134 qp->sq.w_list[idx].next = qp->sq.cur_post; 135 qp->sq.head++; 136 137 memcpy(mmio_wqe, ctrl, sizeof(*ctrl)); 138 ((struct mlx5_wqe_ctrl_seg *)&mmio_wqe)->fm_ce_se |= 139 MLX5_WQE_CTRL_CQ_UPDATE; 140 141 /* Make sure that descriptors are written before 142 * updating doorbell record and ringing the doorbell 143 */ 144 wmb(); 145 146 qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post); 147 148 /* Make sure doorbell record is visible to the HCA before 149 * we hit doorbell 150 */ 151 wmb(); 152 for (i = 0; i < 8; i++) 153 mlx5_write64(&mmio_wqe[i * 2], 154 bf->bfreg->map + bf->offset + i * 8); 155 156 bf->offset ^= bf->buf_size; 157 158 spin_unlock_irqrestore(&qp->sq.lock, flags); 159 160 return 0; 161 } 162 163 static int test_wc_poll_cq_result(struct mlx5_ib_dev *dev, struct ib_cq *cq) 164 { 165 int ret; 166 struct ib_wc wc = {}; 167 unsigned long end = jiffies + TEST_WC_POLLING_MAX_TIME_JIFFIES; 168 169 do { 170 ret = ib_poll_cq(cq, 1, &wc); 171 if (ret < 0 || wc.status) 172 return ret < 0 ? ret : -EINVAL; 173 if (ret) 174 break; 175 } while (!time_after(jiffies, end)); 176 177 if (!ret) 178 return -ETIMEDOUT; 179 180 if (wc.wr_id != WR_ID_BF) 181 ret = 0; 182 183 return ret; 184 } 185 186 static int test_wc_do_send(struct mlx5_ib_dev *dev, struct ib_qp *qp) 187 { 188 int err, i; 189 190 for (i = 0; i < TEST_WC_NUM_WQES; i++) { 191 err = post_send_nop(dev, qp, WR_ID_BF, false); 192 if (err) 193 return err; 194 } 195 196 return post_send_nop(dev, qp, WR_ID_END, true); 197 } 198 199 int mlx5_ib_test_wc(struct mlx5_ib_dev *dev) 200 { 201 struct ib_cq_init_attr cq_attr = { .cqe = TEST_WC_NUM_WQES + 1 }; 202 int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type); 203 struct ib_qp_init_attr qp_init_attr = { 204 .cap = { .max_send_wr = TEST_WC_NUM_WQES }, 205 .qp_type = IB_QPT_UD, 206 .sq_sig_type = IB_SIGNAL_REQ_WR, 207 .create_flags = MLX5_IB_QP_CREATE_WC_TEST, 208 }; 209 struct ib_qp_attr qp_attr = { .port_num = 1 }; 210 struct ib_device *ibdev = &dev->ib_dev; 211 struct ib_qp *qp; 212 struct ib_cq *cq; 213 struct ib_pd *pd; 214 int ret; 215 216 if (!MLX5_CAP_GEN(dev->mdev, bf)) 217 return 0; 218 219 if (!dev->mdev->roce.roce_en && 220 port_type_cap == MLX5_CAP_PORT_TYPE_ETH) { 221 if (mlx5_core_is_pf(dev->mdev)) 222 dev->wc_support = arch_can_pci_mmap_wc(); 223 return 0; 224 } 225 226 ret = mlx5_alloc_bfreg(dev->mdev, &dev->wc_bfreg, true, false); 227 if (ret) 228 goto print_err; 229 230 if (!dev->wc_bfreg.wc) 231 goto out1; 232 233 pd = ib_alloc_pd(ibdev, 0); 234 if (IS_ERR(pd)) { 235 ret = PTR_ERR(pd); 236 goto out1; 237 } 238 239 cq = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_attr); 240 if (IS_ERR(cq)) { 241 ret = PTR_ERR(cq); 242 goto out2; 243 } 244 245 qp_init_attr.recv_cq = cq; 246 qp_init_attr.send_cq = cq; 247 qp = ib_create_qp(pd, &qp_init_attr); 248 if (IS_ERR(qp)) { 249 ret = PTR_ERR(qp); 250 goto out3; 251 } 252 253 qp_attr.qp_state = IB_QPS_INIT; 254 ret = ib_modify_qp(qp, &qp_attr, 255 IB_QP_STATE | IB_QP_PORT | IB_QP_PKEY_INDEX | 256 IB_QP_QKEY); 257 if (ret) 258 goto out4; 259 260 qp_attr.qp_state = IB_QPS_RTR; 261 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE); 262 if (ret) 263 goto out4; 264 265 qp_attr.qp_state = IB_QPS_RTS; 266 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN); 267 if (ret) 268 goto out4; 269 270 ret = test_wc_do_send(dev, qp); 271 if (ret < 0) 272 goto out4; 273 274 ret = test_wc_poll_cq_result(dev, cq); 275 if (ret > 0) { 276 dev->wc_support = true; 277 ret = 0; 278 } 279 280 out4: 281 ib_destroy_qp(qp); 282 out3: 283 ib_destroy_cq(cq); 284 out2: 285 ib_dealloc_pd(pd); 286 out1: 287 mlx5_free_bfreg(dev->mdev, &dev->wc_bfreg); 288 print_err: 289 if (ret) 290 mlx5_ib_err( 291 dev, 292 "Error %d while trying to test write-combining support\n", 293 ret); 294 return ret; 295 } 296