1e126ba97SEli Cohen /*
26cf0a15fSSaeed Mahameed * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3e126ba97SEli Cohen *
4e126ba97SEli Cohen * This software is available to you under a choice of one of two
5e126ba97SEli Cohen * licenses. You may choose to be licensed under the terms of the GNU
6e126ba97SEli Cohen * General Public License (GPL) Version 2, available from the file
7e126ba97SEli Cohen * COPYING in the main directory of this source tree, or the
8e126ba97SEli Cohen * OpenIB.org BSD license below:
9e126ba97SEli Cohen *
10e126ba97SEli Cohen * Redistribution and use in source and binary forms, with or
11e126ba97SEli Cohen * without modification, are permitted provided that the following
12e126ba97SEli Cohen * conditions are met:
13e126ba97SEli Cohen *
14e126ba97SEli Cohen * - Redistributions of source code must retain the above
15e126ba97SEli Cohen * copyright notice, this list of conditions and the following
16e126ba97SEli Cohen * disclaimer.
17e126ba97SEli Cohen *
18e126ba97SEli Cohen * - Redistributions in binary form must reproduce the above
19e126ba97SEli Cohen * copyright notice, this list of conditions and the following
20e126ba97SEli Cohen * disclaimer in the documentation and/or other materials
21e126ba97SEli Cohen * provided with the distribution.
22e126ba97SEli Cohen *
23e126ba97SEli Cohen * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24e126ba97SEli Cohen * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25e126ba97SEli Cohen * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26e126ba97SEli Cohen * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27e126ba97SEli Cohen * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28e126ba97SEli Cohen * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29e126ba97SEli Cohen * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30e126ba97SEli Cohen * SOFTWARE.
31e126ba97SEli Cohen */
32e126ba97SEli Cohen
338696be3aSJason Gunthorpe #include <linux/io.h>
34cc149f75SHaggai Eran #include <rdma/ib_umem_odp.h>
35e126ba97SEli Cohen #include "mlx5_ib.h"
3611f552e2SMichael Guralnik #include <linux/jiffies.h>
37e126ba97SEli Cohen
38cc149f75SHaggai Eran /*
39aab8d396SJason Gunthorpe * Fill in a physical address list. ib_umem_num_dma_blocks() entries will be
40aab8d396SJason Gunthorpe * filled in the pas array.
41aab8d396SJason Gunthorpe */
mlx5_ib_populate_pas(struct ib_umem * umem,size_t page_size,__be64 * pas,u64 access_flags)42aab8d396SJason Gunthorpe void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas,
43aab8d396SJason Gunthorpe u64 access_flags)
44832a6b06SHaggai Eran {
45aab8d396SJason Gunthorpe struct ib_block_iter biter;
46aab8d396SJason Gunthorpe
47aab8d396SJason Gunthorpe rdma_umem_for_each_dma_block (umem, &biter, page_size) {
48aab8d396SJason Gunthorpe *pas = cpu_to_be64(rdma_block_iter_dma_address(&biter) |
49aab8d396SJason Gunthorpe access_flags);
50aab8d396SJason Gunthorpe pas++;
51832a6b06SHaggai Eran }
52aab8d396SJason Gunthorpe }
53aab8d396SJason Gunthorpe
54b045db62SJason Gunthorpe /*
55b045db62SJason Gunthorpe * Compute the page shift and page_offset for mailboxes that use a quantized
56b045db62SJason Gunthorpe * page_offset. The granulatity of the page offset scales according to page
57b045db62SJason Gunthorpe * size.
58b045db62SJason Gunthorpe */
__mlx5_umem_find_best_quantized_pgoff(struct ib_umem * umem,unsigned long pgsz_bitmap,unsigned int page_offset_bits,u64 pgoff_bitmask,unsigned int scale,unsigned int * page_offset_quantized)59b045db62SJason Gunthorpe unsigned long __mlx5_umem_find_best_quantized_pgoff(
60b045db62SJason Gunthorpe struct ib_umem *umem, unsigned long pgsz_bitmap,
61b045db62SJason Gunthorpe unsigned int page_offset_bits, u64 pgoff_bitmask, unsigned int scale,
62b045db62SJason Gunthorpe unsigned int *page_offset_quantized)
63b045db62SJason Gunthorpe {
64d4b2d19dSLeon Romanovsky const u64 page_offset_mask = (1UL << page_offset_bits) - 1;
65b045db62SJason Gunthorpe unsigned long page_size;
66b045db62SJason Gunthorpe u64 page_offset;
67b045db62SJason Gunthorpe
68b045db62SJason Gunthorpe page_size = ib_umem_find_best_pgoff(umem, pgsz_bitmap, pgoff_bitmask);
69b045db62SJason Gunthorpe if (!page_size)
70b045db62SJason Gunthorpe return 0;
71b045db62SJason Gunthorpe
72b045db62SJason Gunthorpe /*
73b045db62SJason Gunthorpe * page size is the largest possible page size.
74b045db62SJason Gunthorpe *
75b045db62SJason Gunthorpe * Reduce the page_size, and thus the page_offset and quanta, until the
76b045db62SJason Gunthorpe * page_offset fits into the mailbox field. Once page_size < scale this
77b045db62SJason Gunthorpe * loop is guaranteed to terminate.
78b045db62SJason Gunthorpe */
79b045db62SJason Gunthorpe page_offset = ib_umem_dma_offset(umem, page_size);
80b045db62SJason Gunthorpe while (page_offset & ~(u64)(page_offset_mask * (page_size / scale))) {
81b045db62SJason Gunthorpe page_size /= 2;
82b045db62SJason Gunthorpe page_offset = ib_umem_dma_offset(umem, page_size);
83b045db62SJason Gunthorpe }
84b045db62SJason Gunthorpe
85b045db62SJason Gunthorpe /*
86b045db62SJason Gunthorpe * The address is not aligned, or otherwise cannot be represented by the
87b045db62SJason Gunthorpe * page_offset.
88b045db62SJason Gunthorpe */
89b045db62SJason Gunthorpe if (!(pgsz_bitmap & page_size))
90b045db62SJason Gunthorpe return 0;
91b045db62SJason Gunthorpe
92b045db62SJason Gunthorpe *page_offset_quantized =
93b045db62SJason Gunthorpe (unsigned long)page_offset / (page_size / scale);
94b045db62SJason Gunthorpe if (WARN_ON(*page_offset_quantized > page_offset_mask))
95b045db62SJason Gunthorpe return 0;
96b045db62SJason Gunthorpe return page_size;
97b045db62SJason Gunthorpe }
98b045db62SJason Gunthorpe
9911f552e2SMichael Guralnik #define WR_ID_BF 0xBF
10011f552e2SMichael Guralnik #define WR_ID_END 0xBAD
10111f552e2SMichael Guralnik #define TEST_WC_NUM_WQES 255
10211f552e2SMichael Guralnik #define TEST_WC_POLLING_MAX_TIME_JIFFIES msecs_to_jiffies(100)
post_send_nop(struct mlx5_ib_dev * dev,struct ib_qp * ibqp,u64 wr_id,bool signaled)10311f552e2SMichael Guralnik static int post_send_nop(struct mlx5_ib_dev *dev, struct ib_qp *ibqp, u64 wr_id,
10411f552e2SMichael Guralnik bool signaled)
10511f552e2SMichael Guralnik {
10611f552e2SMichael Guralnik struct mlx5_ib_qp *qp = to_mqp(ibqp);
10711f552e2SMichael Guralnik struct mlx5_wqe_ctrl_seg *ctrl;
10811f552e2SMichael Guralnik struct mlx5_bf *bf = &qp->bf;
10911f552e2SMichael Guralnik __be32 mmio_wqe[16] = {};
11011f552e2SMichael Guralnik unsigned long flags;
11111f552e2SMichael Guralnik unsigned int idx;
11211f552e2SMichael Guralnik
11311f552e2SMichael Guralnik if (unlikely(dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR))
11411f552e2SMichael Guralnik return -EIO;
11511f552e2SMichael Guralnik
11611f552e2SMichael Guralnik spin_lock_irqsave(&qp->sq.lock, flags);
11711f552e2SMichael Guralnik
11811f552e2SMichael Guralnik idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
11911f552e2SMichael Guralnik ctrl = mlx5_frag_buf_get_wqe(&qp->sq.fbc, idx);
12011f552e2SMichael Guralnik
12111f552e2SMichael Guralnik memset(ctrl, 0, sizeof(struct mlx5_wqe_ctrl_seg));
12211f552e2SMichael Guralnik ctrl->fm_ce_se = signaled ? MLX5_WQE_CTRL_CQ_UPDATE : 0;
12311f552e2SMichael Guralnik ctrl->opmod_idx_opcode =
12411f552e2SMichael Guralnik cpu_to_be32(((u32)(qp->sq.cur_post) << 8) | MLX5_OPCODE_NOP);
12511f552e2SMichael Guralnik ctrl->qpn_ds = cpu_to_be32((sizeof(struct mlx5_wqe_ctrl_seg) / 16) |
12611f552e2SMichael Guralnik (qp->trans_qp.base.mqp.qpn << 8));
12711f552e2SMichael Guralnik
12811f552e2SMichael Guralnik qp->sq.wrid[idx] = wr_id;
12911f552e2SMichael Guralnik qp->sq.w_list[idx].opcode = MLX5_OPCODE_NOP;
13011f552e2SMichael Guralnik qp->sq.wqe_head[idx] = qp->sq.head + 1;
13111f552e2SMichael Guralnik qp->sq.cur_post += DIV_ROUND_UP(sizeof(struct mlx5_wqe_ctrl_seg),
13211f552e2SMichael Guralnik MLX5_SEND_WQE_BB);
13311f552e2SMichael Guralnik qp->sq.w_list[idx].next = qp->sq.cur_post;
13411f552e2SMichael Guralnik qp->sq.head++;
13511f552e2SMichael Guralnik
13611f552e2SMichael Guralnik memcpy(mmio_wqe, ctrl, sizeof(*ctrl));
13711f552e2SMichael Guralnik ((struct mlx5_wqe_ctrl_seg *)&mmio_wqe)->fm_ce_se |=
13811f552e2SMichael Guralnik MLX5_WQE_CTRL_CQ_UPDATE;
13911f552e2SMichael Guralnik
14011f552e2SMichael Guralnik /* Make sure that descriptors are written before
14111f552e2SMichael Guralnik * updating doorbell record and ringing the doorbell
14211f552e2SMichael Guralnik */
14311f552e2SMichael Guralnik wmb();
14411f552e2SMichael Guralnik
14511f552e2SMichael Guralnik qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
14611f552e2SMichael Guralnik
14711f552e2SMichael Guralnik /* Make sure doorbell record is visible to the HCA before
14811f552e2SMichael Guralnik * we hit doorbell
14911f552e2SMichael Guralnik */
15011f552e2SMichael Guralnik wmb();
1518696be3aSJason Gunthorpe __iowrite64_copy(bf->bfreg->map + bf->offset, mmio_wqe,
1528696be3aSJason Gunthorpe sizeof(mmio_wqe) / 8);
15311f552e2SMichael Guralnik
15411f552e2SMichael Guralnik bf->offset ^= bf->buf_size;
15511f552e2SMichael Guralnik
15611f552e2SMichael Guralnik spin_unlock_irqrestore(&qp->sq.lock, flags);
15711f552e2SMichael Guralnik
15811f552e2SMichael Guralnik return 0;
15911f552e2SMichael Guralnik }
16011f552e2SMichael Guralnik
test_wc_poll_cq_result(struct mlx5_ib_dev * dev,struct ib_cq * cq)16111f552e2SMichael Guralnik static int test_wc_poll_cq_result(struct mlx5_ib_dev *dev, struct ib_cq *cq)
16211f552e2SMichael Guralnik {
16311f552e2SMichael Guralnik int ret;
16411f552e2SMichael Guralnik struct ib_wc wc = {};
16511f552e2SMichael Guralnik unsigned long end = jiffies + TEST_WC_POLLING_MAX_TIME_JIFFIES;
16611f552e2SMichael Guralnik
16711f552e2SMichael Guralnik do {
16811f552e2SMichael Guralnik ret = ib_poll_cq(cq, 1, &wc);
16911f552e2SMichael Guralnik if (ret < 0 || wc.status)
17011f552e2SMichael Guralnik return ret < 0 ? ret : -EINVAL;
17111f552e2SMichael Guralnik if (ret)
17211f552e2SMichael Guralnik break;
17311f552e2SMichael Guralnik } while (!time_after(jiffies, end));
17411f552e2SMichael Guralnik
17511f552e2SMichael Guralnik if (!ret)
17611f552e2SMichael Guralnik return -ETIMEDOUT;
17711f552e2SMichael Guralnik
17811f552e2SMichael Guralnik if (wc.wr_id != WR_ID_BF)
17911f552e2SMichael Guralnik ret = 0;
18011f552e2SMichael Guralnik
18111f552e2SMichael Guralnik return ret;
18211f552e2SMichael Guralnik }
18311f552e2SMichael Guralnik
test_wc_do_send(struct mlx5_ib_dev * dev,struct ib_qp * qp)18411f552e2SMichael Guralnik static int test_wc_do_send(struct mlx5_ib_dev *dev, struct ib_qp *qp)
18511f552e2SMichael Guralnik {
18611f552e2SMichael Guralnik int err, i;
18711f552e2SMichael Guralnik
18811f552e2SMichael Guralnik for (i = 0; i < TEST_WC_NUM_WQES; i++) {
18911f552e2SMichael Guralnik err = post_send_nop(dev, qp, WR_ID_BF, false);
19011f552e2SMichael Guralnik if (err)
19111f552e2SMichael Guralnik return err;
19211f552e2SMichael Guralnik }
19311f552e2SMichael Guralnik
19411f552e2SMichael Guralnik return post_send_nop(dev, qp, WR_ID_END, true);
19511f552e2SMichael Guralnik }
19611f552e2SMichael Guralnik
mlx5_ib_test_wc(struct mlx5_ib_dev * dev)19711f552e2SMichael Guralnik int mlx5_ib_test_wc(struct mlx5_ib_dev *dev)
19811f552e2SMichael Guralnik {
19911f552e2SMichael Guralnik struct ib_cq_init_attr cq_attr = { .cqe = TEST_WC_NUM_WQES + 1 };
20011f552e2SMichael Guralnik int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
20111f552e2SMichael Guralnik struct ib_qp_init_attr qp_init_attr = {
20211f552e2SMichael Guralnik .cap = { .max_send_wr = TEST_WC_NUM_WQES },
20311f552e2SMichael Guralnik .qp_type = IB_QPT_UD,
20411f552e2SMichael Guralnik .sq_sig_type = IB_SIGNAL_REQ_WR,
20511f552e2SMichael Guralnik .create_flags = MLX5_IB_QP_CREATE_WC_TEST,
20611f552e2SMichael Guralnik };
20711f552e2SMichael Guralnik struct ib_qp_attr qp_attr = { .port_num = 1 };
20811f552e2SMichael Guralnik struct ib_device *ibdev = &dev->ib_dev;
20911f552e2SMichael Guralnik struct ib_qp *qp;
21011f552e2SMichael Guralnik struct ib_cq *cq;
21111f552e2SMichael Guralnik struct ib_pd *pd;
21211f552e2SMichael Guralnik int ret;
21311f552e2SMichael Guralnik
21411f552e2SMichael Guralnik if (!MLX5_CAP_GEN(dev->mdev, bf))
21511f552e2SMichael Guralnik return 0;
21611f552e2SMichael Guralnik
21711f552e2SMichael Guralnik if (!dev->mdev->roce.roce_en &&
21811f552e2SMichael Guralnik port_type_cap == MLX5_CAP_PORT_TYPE_ETH) {
21911f552e2SMichael Guralnik if (mlx5_core_is_pf(dev->mdev))
2201f3db161SYishai Hadas dev->wc_support = arch_can_pci_mmap_wc();
22111f552e2SMichael Guralnik return 0;
22211f552e2SMichael Guralnik }
22311f552e2SMichael Guralnik
22411f552e2SMichael Guralnik ret = mlx5_alloc_bfreg(dev->mdev, &dev->wc_bfreg, true, false);
22511f552e2SMichael Guralnik if (ret)
22611f552e2SMichael Guralnik goto print_err;
22711f552e2SMichael Guralnik
22811f552e2SMichael Guralnik if (!dev->wc_bfreg.wc)
22911f552e2SMichael Guralnik goto out1;
23011f552e2SMichael Guralnik
23111f552e2SMichael Guralnik pd = ib_alloc_pd(ibdev, 0);
23211f552e2SMichael Guralnik if (IS_ERR(pd)) {
23311f552e2SMichael Guralnik ret = PTR_ERR(pd);
23411f552e2SMichael Guralnik goto out1;
23511f552e2SMichael Guralnik }
23611f552e2SMichael Guralnik
23711f552e2SMichael Guralnik cq = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_attr);
23811f552e2SMichael Guralnik if (IS_ERR(cq)) {
23911f552e2SMichael Guralnik ret = PTR_ERR(cq);
24011f552e2SMichael Guralnik goto out2;
24111f552e2SMichael Guralnik }
24211f552e2SMichael Guralnik
24311f552e2SMichael Guralnik qp_init_attr.recv_cq = cq;
24411f552e2SMichael Guralnik qp_init_attr.send_cq = cq;
24511f552e2SMichael Guralnik qp = ib_create_qp(pd, &qp_init_attr);
24611f552e2SMichael Guralnik if (IS_ERR(qp)) {
24711f552e2SMichael Guralnik ret = PTR_ERR(qp);
24811f552e2SMichael Guralnik goto out3;
24911f552e2SMichael Guralnik }
25011f552e2SMichael Guralnik
25111f552e2SMichael Guralnik qp_attr.qp_state = IB_QPS_INIT;
25211f552e2SMichael Guralnik ret = ib_modify_qp(qp, &qp_attr,
25311f552e2SMichael Guralnik IB_QP_STATE | IB_QP_PORT | IB_QP_PKEY_INDEX |
25411f552e2SMichael Guralnik IB_QP_QKEY);
25511f552e2SMichael Guralnik if (ret)
25611f552e2SMichael Guralnik goto out4;
25711f552e2SMichael Guralnik
25811f552e2SMichael Guralnik qp_attr.qp_state = IB_QPS_RTR;
25911f552e2SMichael Guralnik ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
26011f552e2SMichael Guralnik if (ret)
26111f552e2SMichael Guralnik goto out4;
26211f552e2SMichael Guralnik
26311f552e2SMichael Guralnik qp_attr.qp_state = IB_QPS_RTS;
26411f552e2SMichael Guralnik ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN);
26511f552e2SMichael Guralnik if (ret)
26611f552e2SMichael Guralnik goto out4;
26711f552e2SMichael Guralnik
26811f552e2SMichael Guralnik ret = test_wc_do_send(dev, qp);
26911f552e2SMichael Guralnik if (ret < 0)
27011f552e2SMichael Guralnik goto out4;
27111f552e2SMichael Guralnik
27211f552e2SMichael Guralnik ret = test_wc_poll_cq_result(dev, cq);
27311f552e2SMichael Guralnik if (ret > 0) {
27411f552e2SMichael Guralnik dev->wc_support = true;
27511f552e2SMichael Guralnik ret = 0;
27611f552e2SMichael Guralnik }
27711f552e2SMichael Guralnik
27811f552e2SMichael Guralnik out4:
27911f552e2SMichael Guralnik ib_destroy_qp(qp);
28011f552e2SMichael Guralnik out3:
28111f552e2SMichael Guralnik ib_destroy_cq(cq);
28211f552e2SMichael Guralnik out2:
28311f552e2SMichael Guralnik ib_dealloc_pd(pd);
28411f552e2SMichael Guralnik out1:
28511f552e2SMichael Guralnik mlx5_free_bfreg(dev->mdev, &dev->wc_bfreg);
28611f552e2SMichael Guralnik print_err:
28711f552e2SMichael Guralnik if (ret)
28811f552e2SMichael Guralnik mlx5_ib_err(
28911f552e2SMichael Guralnik dev,
29011f552e2SMichael Guralnik "Error %d while trying to test write-combining support\n",
29111f552e2SMichael Guralnik ret);
29211f552e2SMichael Guralnik return ret;
29311f552e2SMichael Guralnik }
294