1 /*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/io.h>
34 #include <rdma/ib_umem_odp.h>
35 #include "mlx5_ib.h"
36 #include <linux/jiffies.h>
37
38 /*
39 * Fill in a physical address list. ib_umem_num_dma_blocks() entries will be
40 * filled in the pas array.
41 */
mlx5_ib_populate_pas(struct ib_umem * umem,size_t page_size,__be64 * pas,u64 access_flags)42 void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas,
43 u64 access_flags)
44 {
45 struct ib_block_iter biter;
46
47 rdma_umem_for_each_dma_block (umem, &biter, page_size) {
48 *pas = cpu_to_be64(rdma_block_iter_dma_address(&biter) |
49 access_flags);
50 pas++;
51 }
52 }
53
54 /*
55 * Compute the page shift and page_offset for mailboxes that use a quantized
56 * page_offset. The granulatity of the page offset scales according to page
57 * size.
58 */
__mlx5_umem_find_best_quantized_pgoff(struct ib_umem * umem,unsigned long pgsz_bitmap,unsigned int page_offset_bits,u64 pgoff_bitmask,unsigned int scale,unsigned int * page_offset_quantized)59 unsigned long __mlx5_umem_find_best_quantized_pgoff(
60 struct ib_umem *umem, unsigned long pgsz_bitmap,
61 unsigned int page_offset_bits, u64 pgoff_bitmask, unsigned int scale,
62 unsigned int *page_offset_quantized)
63 {
64 const u64 page_offset_mask = (1UL << page_offset_bits) - 1;
65 unsigned long page_size;
66 u64 page_offset;
67
68 page_size = ib_umem_find_best_pgoff(umem, pgsz_bitmap, pgoff_bitmask);
69 if (!page_size)
70 return 0;
71
72 /*
73 * page size is the largest possible page size.
74 *
75 * Reduce the page_size, and thus the page_offset and quanta, until the
76 * page_offset fits into the mailbox field. Once page_size < scale this
77 * loop is guaranteed to terminate.
78 */
79 page_offset = ib_umem_dma_offset(umem, page_size);
80 while (page_offset & ~(u64)(page_offset_mask * (page_size / scale))) {
81 page_size /= 2;
82 page_offset = ib_umem_dma_offset(umem, page_size);
83 }
84
85 /*
86 * The address is not aligned, or otherwise cannot be represented by the
87 * page_offset.
88 */
89 if (!(pgsz_bitmap & page_size))
90 return 0;
91
92 *page_offset_quantized =
93 (unsigned long)page_offset / (page_size / scale);
94 if (WARN_ON(*page_offset_quantized > page_offset_mask))
95 return 0;
96 return page_size;
97 }
98
99 #define WR_ID_BF 0xBF
100 #define WR_ID_END 0xBAD
101 #define TEST_WC_NUM_WQES 255
102 #define TEST_WC_POLLING_MAX_TIME_JIFFIES msecs_to_jiffies(100)
post_send_nop(struct mlx5_ib_dev * dev,struct ib_qp * ibqp,u64 wr_id,bool signaled)103 static int post_send_nop(struct mlx5_ib_dev *dev, struct ib_qp *ibqp, u64 wr_id,
104 bool signaled)
105 {
106 struct mlx5_ib_qp *qp = to_mqp(ibqp);
107 struct mlx5_wqe_ctrl_seg *ctrl;
108 struct mlx5_bf *bf = &qp->bf;
109 __be32 mmio_wqe[16] = {};
110 unsigned long flags;
111 unsigned int idx;
112
113 if (unlikely(dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR))
114 return -EIO;
115
116 spin_lock_irqsave(&qp->sq.lock, flags);
117
118 idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
119 ctrl = mlx5_frag_buf_get_wqe(&qp->sq.fbc, idx);
120
121 memset(ctrl, 0, sizeof(struct mlx5_wqe_ctrl_seg));
122 ctrl->fm_ce_se = signaled ? MLX5_WQE_CTRL_CQ_UPDATE : 0;
123 ctrl->opmod_idx_opcode =
124 cpu_to_be32(((u32)(qp->sq.cur_post) << 8) | MLX5_OPCODE_NOP);
125 ctrl->qpn_ds = cpu_to_be32((sizeof(struct mlx5_wqe_ctrl_seg) / 16) |
126 (qp->trans_qp.base.mqp.qpn << 8));
127
128 qp->sq.wrid[idx] = wr_id;
129 qp->sq.w_list[idx].opcode = MLX5_OPCODE_NOP;
130 qp->sq.wqe_head[idx] = qp->sq.head + 1;
131 qp->sq.cur_post += DIV_ROUND_UP(sizeof(struct mlx5_wqe_ctrl_seg),
132 MLX5_SEND_WQE_BB);
133 qp->sq.w_list[idx].next = qp->sq.cur_post;
134 qp->sq.head++;
135
136 memcpy(mmio_wqe, ctrl, sizeof(*ctrl));
137 ((struct mlx5_wqe_ctrl_seg *)&mmio_wqe)->fm_ce_se |=
138 MLX5_WQE_CTRL_CQ_UPDATE;
139
140 /* Make sure that descriptors are written before
141 * updating doorbell record and ringing the doorbell
142 */
143 wmb();
144
145 qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
146
147 /* Make sure doorbell record is visible to the HCA before
148 * we hit doorbell
149 */
150 wmb();
151 __iowrite64_copy(bf->bfreg->map + bf->offset, mmio_wqe,
152 sizeof(mmio_wqe) / 8);
153
154 bf->offset ^= bf->buf_size;
155
156 spin_unlock_irqrestore(&qp->sq.lock, flags);
157
158 return 0;
159 }
160
test_wc_poll_cq_result(struct mlx5_ib_dev * dev,struct ib_cq * cq)161 static int test_wc_poll_cq_result(struct mlx5_ib_dev *dev, struct ib_cq *cq)
162 {
163 int ret;
164 struct ib_wc wc = {};
165 unsigned long end = jiffies + TEST_WC_POLLING_MAX_TIME_JIFFIES;
166
167 do {
168 ret = ib_poll_cq(cq, 1, &wc);
169 if (ret < 0 || wc.status)
170 return ret < 0 ? ret : -EINVAL;
171 if (ret)
172 break;
173 } while (!time_after(jiffies, end));
174
175 if (!ret)
176 return -ETIMEDOUT;
177
178 if (wc.wr_id != WR_ID_BF)
179 ret = 0;
180
181 return ret;
182 }
183
test_wc_do_send(struct mlx5_ib_dev * dev,struct ib_qp * qp)184 static int test_wc_do_send(struct mlx5_ib_dev *dev, struct ib_qp *qp)
185 {
186 int err, i;
187
188 for (i = 0; i < TEST_WC_NUM_WQES; i++) {
189 err = post_send_nop(dev, qp, WR_ID_BF, false);
190 if (err)
191 return err;
192 }
193
194 return post_send_nop(dev, qp, WR_ID_END, true);
195 }
196
mlx5_ib_test_wc(struct mlx5_ib_dev * dev)197 int mlx5_ib_test_wc(struct mlx5_ib_dev *dev)
198 {
199 struct ib_cq_init_attr cq_attr = { .cqe = TEST_WC_NUM_WQES + 1 };
200 int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
201 struct ib_qp_init_attr qp_init_attr = {
202 .cap = { .max_send_wr = TEST_WC_NUM_WQES },
203 .qp_type = IB_QPT_UD,
204 .sq_sig_type = IB_SIGNAL_REQ_WR,
205 .create_flags = MLX5_IB_QP_CREATE_WC_TEST,
206 };
207 struct ib_qp_attr qp_attr = { .port_num = 1 };
208 struct ib_device *ibdev = &dev->ib_dev;
209 struct ib_qp *qp;
210 struct ib_cq *cq;
211 struct ib_pd *pd;
212 int ret;
213
214 if (!MLX5_CAP_GEN(dev->mdev, bf))
215 return 0;
216
217 if (!dev->mdev->roce.roce_en &&
218 port_type_cap == MLX5_CAP_PORT_TYPE_ETH) {
219 if (mlx5_core_is_pf(dev->mdev))
220 dev->wc_support = arch_can_pci_mmap_wc();
221 return 0;
222 }
223
224 ret = mlx5_alloc_bfreg(dev->mdev, &dev->wc_bfreg, true, false);
225 if (ret)
226 goto print_err;
227
228 if (!dev->wc_bfreg.wc)
229 goto out1;
230
231 pd = ib_alloc_pd(ibdev, 0);
232 if (IS_ERR(pd)) {
233 ret = PTR_ERR(pd);
234 goto out1;
235 }
236
237 cq = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_attr);
238 if (IS_ERR(cq)) {
239 ret = PTR_ERR(cq);
240 goto out2;
241 }
242
243 qp_init_attr.recv_cq = cq;
244 qp_init_attr.send_cq = cq;
245 qp = ib_create_qp(pd, &qp_init_attr);
246 if (IS_ERR(qp)) {
247 ret = PTR_ERR(qp);
248 goto out3;
249 }
250
251 qp_attr.qp_state = IB_QPS_INIT;
252 ret = ib_modify_qp(qp, &qp_attr,
253 IB_QP_STATE | IB_QP_PORT | IB_QP_PKEY_INDEX |
254 IB_QP_QKEY);
255 if (ret)
256 goto out4;
257
258 qp_attr.qp_state = IB_QPS_RTR;
259 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
260 if (ret)
261 goto out4;
262
263 qp_attr.qp_state = IB_QPS_RTS;
264 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN);
265 if (ret)
266 goto out4;
267
268 ret = test_wc_do_send(dev, qp);
269 if (ret < 0)
270 goto out4;
271
272 ret = test_wc_poll_cq_result(dev, cq);
273 if (ret > 0) {
274 dev->wc_support = true;
275 ret = 0;
276 }
277
278 out4:
279 ib_destroy_qp(qp);
280 out3:
281 ib_destroy_cq(cq);
282 out2:
283 ib_dealloc_pd(pd);
284 out1:
285 mlx5_free_bfreg(dev->mdev, &dev->wc_bfreg);
286 print_err:
287 if (ret)
288 mlx5_ib_err(
289 dev,
290 "Error %d while trying to test write-combining support\n",
291 ret);
292 return ret;
293 }
294