xref: /openbmc/linux/drivers/infiniband/hw/mlx5/odp.c (revision 6aec21f6a8322fa8d43df3ea7f051dfd8967f1b9)
1 /*
2  * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include "mlx5_ib.h"
34 
35 struct workqueue_struct *mlx5_ib_page_fault_wq;
36 
37 #define COPY_ODP_BIT_MLX_TO_IB(reg, ib_caps, field_name, bit_name) do {	\
38 	if (be32_to_cpu(reg.field_name) & MLX5_ODP_SUPPORT_##bit_name)	\
39 		ib_caps->field_name |= IB_ODP_SUPPORT_##bit_name;	\
40 } while (0)
41 
42 int mlx5_ib_internal_query_odp_caps(struct mlx5_ib_dev *dev)
43 {
44 	int err;
45 	struct mlx5_odp_caps hw_caps;
46 	struct ib_odp_caps *caps = &dev->odp_caps;
47 
48 	memset(caps, 0, sizeof(*caps));
49 
50 	if (!(dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG))
51 		return 0;
52 
53 	err = mlx5_query_odp_caps(dev->mdev, &hw_caps);
54 	if (err)
55 		goto out;
56 
57 	/* At this point we would copy the capability bits that the driver
58 	 * supports from the hw_caps struct to the caps struct. However, no
59 	 * such capabilities are supported so far. */
60 out:
61 	return err;
62 }
63 
64 static struct mlx5_ib_mr *mlx5_ib_odp_find_mr_lkey(struct mlx5_ib_dev *dev,
65 						   u32 key)
66 {
67 	u32 base_key = mlx5_base_mkey(key);
68 	struct mlx5_core_mr *mmr = __mlx5_mr_lookup(dev->mdev, base_key);
69 
70 	if (!mmr || mmr->key != key)
71 		return NULL;
72 
73 	return container_of(mmr, struct mlx5_ib_mr, mmr);
74 }
75 
76 static void mlx5_ib_page_fault_resume(struct mlx5_ib_qp *qp,
77 				      struct mlx5_ib_pfault *pfault,
78 				      int error) {
79 	struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device);
80 	int ret = mlx5_core_page_fault_resume(dev->mdev, qp->mqp.qpn,
81 					      pfault->mpfault.flags,
82 					      error);
83 	if (ret)
84 		pr_err("Failed to resolve the page fault on QP 0x%x\n",
85 		       qp->mqp.qpn);
86 }
87 
88 void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp,
89 			       struct mlx5_ib_pfault *pfault)
90 {
91 	u8 event_subtype = pfault->mpfault.event_subtype;
92 
93 	switch (event_subtype) {
94 	default:
95 		pr_warn("Invalid page fault event subtype: 0x%x\n",
96 			event_subtype);
97 		mlx5_ib_page_fault_resume(qp, pfault, 1);
98 		break;
99 	}
100 }
101 
102 static void mlx5_ib_qp_pfault_action(struct work_struct *work)
103 {
104 	struct mlx5_ib_pfault *pfault = container_of(work,
105 						     struct mlx5_ib_pfault,
106 						     work);
107 	enum mlx5_ib_pagefault_context context =
108 		mlx5_ib_get_pagefault_context(&pfault->mpfault);
109 	struct mlx5_ib_qp *qp = container_of(pfault, struct mlx5_ib_qp,
110 					     pagefaults[context]);
111 	mlx5_ib_mr_pfault_handler(qp, pfault);
112 }
113 
114 void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp)
115 {
116 	unsigned long flags;
117 
118 	spin_lock_irqsave(&qp->disable_page_faults_lock, flags);
119 	qp->disable_page_faults = 1;
120 	spin_unlock_irqrestore(&qp->disable_page_faults_lock, flags);
121 
122 	/*
123 	 * Note that at this point, we are guarenteed that no more
124 	 * work queue elements will be posted to the work queue with
125 	 * the QP we are closing.
126 	 */
127 	flush_workqueue(mlx5_ib_page_fault_wq);
128 }
129 
130 void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp)
131 {
132 	unsigned long flags;
133 
134 	spin_lock_irqsave(&qp->disable_page_faults_lock, flags);
135 	qp->disable_page_faults = 0;
136 	spin_unlock_irqrestore(&qp->disable_page_faults_lock, flags);
137 }
138 
139 static void mlx5_ib_pfault_handler(struct mlx5_core_qp *qp,
140 				   struct mlx5_pagefault *pfault)
141 {
142 	/*
143 	 * Note that we will only get one fault event per QP per context
144 	 * (responder/initiator, read/write), until we resolve the page fault
145 	 * with the mlx5_ib_page_fault_resume command. Since this function is
146 	 * called from within the work element, there is no risk of missing
147 	 * events.
148 	 */
149 	struct mlx5_ib_qp *mibqp = to_mibqp(qp);
150 	enum mlx5_ib_pagefault_context context =
151 		mlx5_ib_get_pagefault_context(pfault);
152 	struct mlx5_ib_pfault *qp_pfault = &mibqp->pagefaults[context];
153 
154 	qp_pfault->mpfault = *pfault;
155 
156 	/* No need to stop interrupts here since we are in an interrupt */
157 	spin_lock(&mibqp->disable_page_faults_lock);
158 	if (!mibqp->disable_page_faults)
159 		queue_work(mlx5_ib_page_fault_wq, &qp_pfault->work);
160 	spin_unlock(&mibqp->disable_page_faults_lock);
161 }
162 
163 void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp)
164 {
165 	int i;
166 
167 	qp->disable_page_faults = 1;
168 	spin_lock_init(&qp->disable_page_faults_lock);
169 
170 	qp->mqp.pfault_handler	= mlx5_ib_pfault_handler;
171 
172 	for (i = 0; i < MLX5_IB_PAGEFAULT_CONTEXTS; ++i)
173 		INIT_WORK(&qp->pagefaults[i].work, mlx5_ib_qp_pfault_action);
174 }
175 
176 int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev)
177 {
178 	int ret;
179 
180 	ret = init_srcu_struct(&ibdev->mr_srcu);
181 	if (ret)
182 		return ret;
183 
184 	return 0;
185 }
186 
187 void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev)
188 {
189 	cleanup_srcu_struct(&ibdev->mr_srcu);
190 }
191 
192 int __init mlx5_ib_odp_init(void)
193 {
194 	mlx5_ib_page_fault_wq =
195 		create_singlethread_workqueue("mlx5_ib_page_faults");
196 	if (!mlx5_ib_page_fault_wq)
197 		return -ENOMEM;
198 
199 	return 0;
200 }
201 
202 void mlx5_ib_odp_cleanup(void)
203 {
204 	destroy_workqueue(mlx5_ib_page_fault_wq);
205 }
206