xref: /openbmc/linux/drivers/infiniband/hw/mlx5/mem.c (revision 83b975b5)
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <rdma/ib_umem_odp.h>
34 #include "mlx5_ib.h"
35 #include <linux/jiffies.h>
36 
37 /*
38  * Fill in a physical address list. ib_umem_num_dma_blocks() entries will be
39  * filled in the pas array.
40  */
41 void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas,
42 			  u64 access_flags)
43 {
44 	struct ib_block_iter biter;
45 
46 	rdma_umem_for_each_dma_block (umem, &biter, page_size) {
47 		*pas = cpu_to_be64(rdma_block_iter_dma_address(&biter) |
48 				   access_flags);
49 		pas++;
50 	}
51 }
52 
53 /*
54  * Compute the page shift and page_offset for mailboxes that use a quantized
55  * page_offset. The granulatity of the page offset scales according to page
56  * size.
57  */
58 unsigned long __mlx5_umem_find_best_quantized_pgoff(
59 	struct ib_umem *umem, unsigned long pgsz_bitmap,
60 	unsigned int page_offset_bits, u64 pgoff_bitmask, unsigned int scale,
61 	unsigned int *page_offset_quantized)
62 {
63 	const u64 page_offset_mask = (1UL << page_offset_bits) - 1;
64 	unsigned long page_size;
65 	u64 page_offset;
66 
67 	page_size = ib_umem_find_best_pgoff(umem, pgsz_bitmap, pgoff_bitmask);
68 	if (!page_size)
69 		return 0;
70 
71 	/*
72 	 * page size is the largest possible page size.
73 	 *
74 	 * Reduce the page_size, and thus the page_offset and quanta, until the
75 	 * page_offset fits into the mailbox field. Once page_size < scale this
76 	 * loop is guaranteed to terminate.
77 	 */
78 	page_offset = ib_umem_dma_offset(umem, page_size);
79 	while (page_offset & ~(u64)(page_offset_mask * (page_size / scale))) {
80 		page_size /= 2;
81 		page_offset = ib_umem_dma_offset(umem, page_size);
82 	}
83 
84 	/*
85 	 * The address is not aligned, or otherwise cannot be represented by the
86 	 * page_offset.
87 	 */
88 	if (!(pgsz_bitmap & page_size))
89 		return 0;
90 
91 	*page_offset_quantized =
92 		(unsigned long)page_offset / (page_size / scale);
93 	if (WARN_ON(*page_offset_quantized > page_offset_mask))
94 		return 0;
95 	return page_size;
96 }
97 
98 #define WR_ID_BF 0xBF
99 #define WR_ID_END 0xBAD
100 #define TEST_WC_NUM_WQES 255
101 #define TEST_WC_POLLING_MAX_TIME_JIFFIES msecs_to_jiffies(100)
102 static int post_send_nop(struct mlx5_ib_dev *dev, struct ib_qp *ibqp, u64 wr_id,
103 			 bool signaled)
104 {
105 	struct mlx5_ib_qp *qp = to_mqp(ibqp);
106 	struct mlx5_wqe_ctrl_seg *ctrl;
107 	struct mlx5_bf *bf = &qp->bf;
108 	__be32 mmio_wqe[16] = {};
109 	unsigned long flags;
110 	unsigned int idx;
111 	int i;
112 
113 	if (unlikely(dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR))
114 		return -EIO;
115 
116 	spin_lock_irqsave(&qp->sq.lock, flags);
117 
118 	idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
119 	ctrl = mlx5_frag_buf_get_wqe(&qp->sq.fbc, idx);
120 
121 	memset(ctrl, 0, sizeof(struct mlx5_wqe_ctrl_seg));
122 	ctrl->fm_ce_se = signaled ? MLX5_WQE_CTRL_CQ_UPDATE : 0;
123 	ctrl->opmod_idx_opcode =
124 		cpu_to_be32(((u32)(qp->sq.cur_post) << 8) | MLX5_OPCODE_NOP);
125 	ctrl->qpn_ds = cpu_to_be32((sizeof(struct mlx5_wqe_ctrl_seg) / 16) |
126 				   (qp->trans_qp.base.mqp.qpn << 8));
127 
128 	qp->sq.wrid[idx] = wr_id;
129 	qp->sq.w_list[idx].opcode = MLX5_OPCODE_NOP;
130 	qp->sq.wqe_head[idx] = qp->sq.head + 1;
131 	qp->sq.cur_post += DIV_ROUND_UP(sizeof(struct mlx5_wqe_ctrl_seg),
132 					MLX5_SEND_WQE_BB);
133 	qp->sq.w_list[idx].next = qp->sq.cur_post;
134 	qp->sq.head++;
135 
136 	memcpy(mmio_wqe, ctrl, sizeof(*ctrl));
137 	((struct mlx5_wqe_ctrl_seg *)&mmio_wqe)->fm_ce_se |=
138 		MLX5_WQE_CTRL_CQ_UPDATE;
139 
140 	/* Make sure that descriptors are written before
141 	 * updating doorbell record and ringing the doorbell
142 	 */
143 	wmb();
144 
145 	qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
146 
147 	/* Make sure doorbell record is visible to the HCA before
148 	 * we hit doorbell
149 	 */
150 	wmb();
151 	for (i = 0; i < 8; i++)
152 		mlx5_write64(&mmio_wqe[i * 2],
153 			     bf->bfreg->map + bf->offset + i * 8);
154 	io_stop_wc();
155 
156 	bf->offset ^= bf->buf_size;
157 
158 	spin_unlock_irqrestore(&qp->sq.lock, flags);
159 
160 	return 0;
161 }
162 
163 static int test_wc_poll_cq_result(struct mlx5_ib_dev *dev, struct ib_cq *cq)
164 {
165 	int ret;
166 	struct ib_wc wc = {};
167 	unsigned long end = jiffies + TEST_WC_POLLING_MAX_TIME_JIFFIES;
168 
169 	do {
170 		ret = ib_poll_cq(cq, 1, &wc);
171 		if (ret < 0 || wc.status)
172 			return ret < 0 ? ret : -EINVAL;
173 		if (ret)
174 			break;
175 	} while (!time_after(jiffies, end));
176 
177 	if (!ret)
178 		return -ETIMEDOUT;
179 
180 	if (wc.wr_id != WR_ID_BF)
181 		ret = 0;
182 
183 	return ret;
184 }
185 
186 static int test_wc_do_send(struct mlx5_ib_dev *dev, struct ib_qp *qp)
187 {
188 	int err, i;
189 
190 	for (i = 0; i < TEST_WC_NUM_WQES; i++) {
191 		err = post_send_nop(dev, qp, WR_ID_BF, false);
192 		if (err)
193 			return err;
194 	}
195 
196 	return post_send_nop(dev, qp, WR_ID_END, true);
197 }
198 
199 int mlx5_ib_test_wc(struct mlx5_ib_dev *dev)
200 {
201 	struct ib_cq_init_attr cq_attr = { .cqe = TEST_WC_NUM_WQES + 1 };
202 	int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
203 	struct ib_qp_init_attr qp_init_attr = {
204 		.cap = { .max_send_wr = TEST_WC_NUM_WQES },
205 		.qp_type = IB_QPT_UD,
206 		.sq_sig_type = IB_SIGNAL_REQ_WR,
207 		.create_flags = MLX5_IB_QP_CREATE_WC_TEST,
208 	};
209 	struct ib_qp_attr qp_attr = { .port_num = 1 };
210 	struct ib_device *ibdev = &dev->ib_dev;
211 	struct ib_qp *qp;
212 	struct ib_cq *cq;
213 	struct ib_pd *pd;
214 	int ret;
215 
216 	if (!MLX5_CAP_GEN(dev->mdev, bf))
217 		return 0;
218 
219 	if (!dev->mdev->roce.roce_en &&
220 	    port_type_cap == MLX5_CAP_PORT_TYPE_ETH) {
221 		if (mlx5_core_is_pf(dev->mdev))
222 			dev->wc_support = arch_can_pci_mmap_wc();
223 		return 0;
224 	}
225 
226 	ret = mlx5_alloc_bfreg(dev->mdev, &dev->wc_bfreg, true, false);
227 	if (ret)
228 		goto print_err;
229 
230 	if (!dev->wc_bfreg.wc)
231 		goto out1;
232 
233 	pd = ib_alloc_pd(ibdev, 0);
234 	if (IS_ERR(pd)) {
235 		ret = PTR_ERR(pd);
236 		goto out1;
237 	}
238 
239 	cq = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_attr);
240 	if (IS_ERR(cq)) {
241 		ret = PTR_ERR(cq);
242 		goto out2;
243 	}
244 
245 	qp_init_attr.recv_cq = cq;
246 	qp_init_attr.send_cq = cq;
247 	qp = ib_create_qp(pd, &qp_init_attr);
248 	if (IS_ERR(qp)) {
249 		ret = PTR_ERR(qp);
250 		goto out3;
251 	}
252 
253 	qp_attr.qp_state = IB_QPS_INIT;
254 	ret = ib_modify_qp(qp, &qp_attr,
255 			   IB_QP_STATE | IB_QP_PORT | IB_QP_PKEY_INDEX |
256 				   IB_QP_QKEY);
257 	if (ret)
258 		goto out4;
259 
260 	qp_attr.qp_state = IB_QPS_RTR;
261 	ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
262 	if (ret)
263 		goto out4;
264 
265 	qp_attr.qp_state = IB_QPS_RTS;
266 	ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN);
267 	if (ret)
268 		goto out4;
269 
270 	ret = test_wc_do_send(dev, qp);
271 	if (ret < 0)
272 		goto out4;
273 
274 	ret = test_wc_poll_cq_result(dev, cq);
275 	if (ret > 0) {
276 		dev->wc_support = true;
277 		ret = 0;
278 	}
279 
280 out4:
281 	ib_destroy_qp(qp);
282 out3:
283 	ib_destroy_cq(cq);
284 out2:
285 	ib_dealloc_pd(pd);
286 out1:
287 	mlx5_free_bfreg(dev->mdev, &dev->wc_bfreg);
288 print_err:
289 	if (ret)
290 		mlx5_ib_err(
291 			dev,
292 			"Error %d while trying to test write-combining support\n",
293 			ret);
294 	return ret;
295 }
296