1 /* 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/module.h> 34 #include <rdma/ib_umem.h> 35 #include <rdma/ib_umem_odp.h> 36 #include "mlx5_ib.h" 37 #include <linux/jiffies.h> 38 39 /* 40 * Fill in a physical address list. ib_umem_num_dma_blocks() entries will be 41 * filled in the pas array. 42 */ 43 void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas, 44 u64 access_flags) 45 { 46 struct ib_block_iter biter; 47 48 rdma_umem_for_each_dma_block (umem, &biter, page_size) { 49 *pas = cpu_to_be64(rdma_block_iter_dma_address(&biter) | 50 access_flags); 51 pas++; 52 } 53 } 54 55 /* 56 * Compute the page shift and page_offset for mailboxes that use a quantized 57 * page_offset. The granulatity of the page offset scales according to page 58 * size. 59 */ 60 unsigned long __mlx5_umem_find_best_quantized_pgoff( 61 struct ib_umem *umem, unsigned long pgsz_bitmap, 62 unsigned int page_offset_bits, u64 pgoff_bitmask, unsigned int scale, 63 unsigned int *page_offset_quantized) 64 { 65 const u64 page_offset_mask = (1UL << page_offset_bits) - 1; 66 unsigned long page_size; 67 u64 page_offset; 68 69 page_size = ib_umem_find_best_pgoff(umem, pgsz_bitmap, pgoff_bitmask); 70 if (!page_size) 71 return 0; 72 73 /* 74 * page size is the largest possible page size. 75 * 76 * Reduce the page_size, and thus the page_offset and quanta, until the 77 * page_offset fits into the mailbox field. Once page_size < scale this 78 * loop is guaranteed to terminate. 79 */ 80 page_offset = ib_umem_dma_offset(umem, page_size); 81 while (page_offset & ~(u64)(page_offset_mask * (page_size / scale))) { 82 page_size /= 2; 83 page_offset = ib_umem_dma_offset(umem, page_size); 84 } 85 86 /* 87 * The address is not aligned, or otherwise cannot be represented by the 88 * page_offset. 89 */ 90 if (!(pgsz_bitmap & page_size)) 91 return 0; 92 93 *page_offset_quantized = 94 (unsigned long)page_offset / (page_size / scale); 95 if (WARN_ON(*page_offset_quantized > page_offset_mask)) 96 return 0; 97 return page_size; 98 } 99 100 #define WR_ID_BF 0xBF 101 #define WR_ID_END 0xBAD 102 #define TEST_WC_NUM_WQES 255 103 #define TEST_WC_POLLING_MAX_TIME_JIFFIES msecs_to_jiffies(100) 104 static int post_send_nop(struct mlx5_ib_dev *dev, struct ib_qp *ibqp, u64 wr_id, 105 bool signaled) 106 { 107 struct mlx5_ib_qp *qp = to_mqp(ibqp); 108 struct mlx5_wqe_ctrl_seg *ctrl; 109 struct mlx5_bf *bf = &qp->bf; 110 __be32 mmio_wqe[16] = {}; 111 unsigned long flags; 112 unsigned int idx; 113 int i; 114 115 if (unlikely(dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)) 116 return -EIO; 117 118 spin_lock_irqsave(&qp->sq.lock, flags); 119 120 idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1); 121 ctrl = mlx5_frag_buf_get_wqe(&qp->sq.fbc, idx); 122 123 memset(ctrl, 0, sizeof(struct mlx5_wqe_ctrl_seg)); 124 ctrl->fm_ce_se = signaled ? MLX5_WQE_CTRL_CQ_UPDATE : 0; 125 ctrl->opmod_idx_opcode = 126 cpu_to_be32(((u32)(qp->sq.cur_post) << 8) | MLX5_OPCODE_NOP); 127 ctrl->qpn_ds = cpu_to_be32((sizeof(struct mlx5_wqe_ctrl_seg) / 16) | 128 (qp->trans_qp.base.mqp.qpn << 8)); 129 130 qp->sq.wrid[idx] = wr_id; 131 qp->sq.w_list[idx].opcode = MLX5_OPCODE_NOP; 132 qp->sq.wqe_head[idx] = qp->sq.head + 1; 133 qp->sq.cur_post += DIV_ROUND_UP(sizeof(struct mlx5_wqe_ctrl_seg), 134 MLX5_SEND_WQE_BB); 135 qp->sq.w_list[idx].next = qp->sq.cur_post; 136 qp->sq.head++; 137 138 memcpy(mmio_wqe, ctrl, sizeof(*ctrl)); 139 ((struct mlx5_wqe_ctrl_seg *)&mmio_wqe)->fm_ce_se |= 140 MLX5_WQE_CTRL_CQ_UPDATE; 141 142 /* Make sure that descriptors are written before 143 * updating doorbell record and ringing the doorbell 144 */ 145 wmb(); 146 147 qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post); 148 149 /* Make sure doorbell record is visible to the HCA before 150 * we hit doorbell 151 */ 152 wmb(); 153 for (i = 0; i < 8; i++) 154 mlx5_write64(&mmio_wqe[i * 2], 155 bf->bfreg->map + bf->offset + i * 8); 156 157 bf->offset ^= bf->buf_size; 158 159 spin_unlock_irqrestore(&qp->sq.lock, flags); 160 161 return 0; 162 } 163 164 static int test_wc_poll_cq_result(struct mlx5_ib_dev *dev, struct ib_cq *cq) 165 { 166 int ret; 167 struct ib_wc wc = {}; 168 unsigned long end = jiffies + TEST_WC_POLLING_MAX_TIME_JIFFIES; 169 170 do { 171 ret = ib_poll_cq(cq, 1, &wc); 172 if (ret < 0 || wc.status) 173 return ret < 0 ? ret : -EINVAL; 174 if (ret) 175 break; 176 } while (!time_after(jiffies, end)); 177 178 if (!ret) 179 return -ETIMEDOUT; 180 181 if (wc.wr_id != WR_ID_BF) 182 ret = 0; 183 184 return ret; 185 } 186 187 static int test_wc_do_send(struct mlx5_ib_dev *dev, struct ib_qp *qp) 188 { 189 int err, i; 190 191 for (i = 0; i < TEST_WC_NUM_WQES; i++) { 192 err = post_send_nop(dev, qp, WR_ID_BF, false); 193 if (err) 194 return err; 195 } 196 197 return post_send_nop(dev, qp, WR_ID_END, true); 198 } 199 200 int mlx5_ib_test_wc(struct mlx5_ib_dev *dev) 201 { 202 struct ib_cq_init_attr cq_attr = { .cqe = TEST_WC_NUM_WQES + 1 }; 203 int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type); 204 struct ib_qp_init_attr qp_init_attr = { 205 .cap = { .max_send_wr = TEST_WC_NUM_WQES }, 206 .qp_type = IB_QPT_UD, 207 .sq_sig_type = IB_SIGNAL_REQ_WR, 208 .create_flags = MLX5_IB_QP_CREATE_WC_TEST, 209 }; 210 struct ib_qp_attr qp_attr = { .port_num = 1 }; 211 struct ib_device *ibdev = &dev->ib_dev; 212 struct ib_qp *qp; 213 struct ib_cq *cq; 214 struct ib_pd *pd; 215 int ret; 216 217 if (!MLX5_CAP_GEN(dev->mdev, bf)) 218 return 0; 219 220 if (!dev->mdev->roce.roce_en && 221 port_type_cap == MLX5_CAP_PORT_TYPE_ETH) { 222 if (mlx5_core_is_pf(dev->mdev)) 223 dev->wc_support = arch_can_pci_mmap_wc(); 224 return 0; 225 } 226 227 ret = mlx5_alloc_bfreg(dev->mdev, &dev->wc_bfreg, true, false); 228 if (ret) 229 goto print_err; 230 231 if (!dev->wc_bfreg.wc) 232 goto out1; 233 234 pd = ib_alloc_pd(ibdev, 0); 235 if (IS_ERR(pd)) { 236 ret = PTR_ERR(pd); 237 goto out1; 238 } 239 240 cq = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_attr); 241 if (IS_ERR(cq)) { 242 ret = PTR_ERR(cq); 243 goto out2; 244 } 245 246 qp_init_attr.recv_cq = cq; 247 qp_init_attr.send_cq = cq; 248 qp = ib_create_qp(pd, &qp_init_attr); 249 if (IS_ERR(qp)) { 250 ret = PTR_ERR(qp); 251 goto out3; 252 } 253 254 qp_attr.qp_state = IB_QPS_INIT; 255 ret = ib_modify_qp(qp, &qp_attr, 256 IB_QP_STATE | IB_QP_PORT | IB_QP_PKEY_INDEX | 257 IB_QP_QKEY); 258 if (ret) 259 goto out4; 260 261 qp_attr.qp_state = IB_QPS_RTR; 262 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE); 263 if (ret) 264 goto out4; 265 266 qp_attr.qp_state = IB_QPS_RTS; 267 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN); 268 if (ret) 269 goto out4; 270 271 ret = test_wc_do_send(dev, qp); 272 if (ret < 0) 273 goto out4; 274 275 ret = test_wc_poll_cq_result(dev, cq); 276 if (ret > 0) { 277 dev->wc_support = true; 278 ret = 0; 279 } 280 281 out4: 282 ib_destroy_qp(qp); 283 out3: 284 ib_destroy_cq(cq); 285 out2: 286 ib_dealloc_pd(pd); 287 out1: 288 mlx5_free_bfreg(dev->mdev, &dev->wc_bfreg); 289 print_err: 290 if (ret) 291 mlx5_ib_err( 292 dev, 293 "Error %d while trying to test write-combining support\n", 294 ret); 295 return ret; 296 } 297