odp.c (8cdd312cfed706b067d7ea952603e28cc33c40cc) odp.c (6aec21f6a8322fa8d43df3ea7f051dfd8967f1b9)
1/*
2 * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:

--- 18 unchanged lines hidden (view full) ---

27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include "mlx5_ib.h"
34
1/*
2 * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:

--- 18 unchanged lines hidden (view full) ---

27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include "mlx5_ib.h"
34
35struct workqueue_struct *mlx5_ib_page_fault_wq;
36
35#define COPY_ODP_BIT_MLX_TO_IB(reg, ib_caps, field_name, bit_name) do { \
36 if (be32_to_cpu(reg.field_name) & MLX5_ODP_SUPPORT_##bit_name) \
37 ib_caps->field_name |= IB_ODP_SUPPORT_##bit_name; \
38} while (0)
39
40int mlx5_ib_internal_query_odp_caps(struct mlx5_ib_dev *dev)
41{
42 int err;

--- 10 unchanged lines hidden (view full) ---

53 goto out;
54
55 /* At this point we would copy the capability bits that the driver
56 * supports from the hw_caps struct to the caps struct. However, no
57 * such capabilities are supported so far. */
58out:
59 return err;
60}
37#define COPY_ODP_BIT_MLX_TO_IB(reg, ib_caps, field_name, bit_name) do { \
38 if (be32_to_cpu(reg.field_name) & MLX5_ODP_SUPPORT_##bit_name) \
39 ib_caps->field_name |= IB_ODP_SUPPORT_##bit_name; \
40} while (0)
41
42int mlx5_ib_internal_query_odp_caps(struct mlx5_ib_dev *dev)
43{
44 int err;

--- 10 unchanged lines hidden (view full) ---

55 goto out;
56
57 /* At this point we would copy the capability bits that the driver
58 * supports from the hw_caps struct to the caps struct. However, no
59 * such capabilities are supported so far. */
60out:
61 return err;
62}
63
64static struct mlx5_ib_mr *mlx5_ib_odp_find_mr_lkey(struct mlx5_ib_dev *dev,
65 u32 key)
66{
67 u32 base_key = mlx5_base_mkey(key);
68 struct mlx5_core_mr *mmr = __mlx5_mr_lookup(dev->mdev, base_key);
69
70 if (!mmr || mmr->key != key)
71 return NULL;
72
73 return container_of(mmr, struct mlx5_ib_mr, mmr);
74}
75
76static void mlx5_ib_page_fault_resume(struct mlx5_ib_qp *qp,
77 struct mlx5_ib_pfault *pfault,
78 int error) {
79 struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device);
80 int ret = mlx5_core_page_fault_resume(dev->mdev, qp->mqp.qpn,
81 pfault->mpfault.flags,
82 error);
83 if (ret)
84 pr_err("Failed to resolve the page fault on QP 0x%x\n",
85 qp->mqp.qpn);
86}
87
88void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp,
89 struct mlx5_ib_pfault *pfault)
90{
91 u8 event_subtype = pfault->mpfault.event_subtype;
92
93 switch (event_subtype) {
94 default:
95 pr_warn("Invalid page fault event subtype: 0x%x\n",
96 event_subtype);
97 mlx5_ib_page_fault_resume(qp, pfault, 1);
98 break;
99 }
100}
101
102static void mlx5_ib_qp_pfault_action(struct work_struct *work)
103{
104 struct mlx5_ib_pfault *pfault = container_of(work,
105 struct mlx5_ib_pfault,
106 work);
107 enum mlx5_ib_pagefault_context context =
108 mlx5_ib_get_pagefault_context(&pfault->mpfault);
109 struct mlx5_ib_qp *qp = container_of(pfault, struct mlx5_ib_qp,
110 pagefaults[context]);
111 mlx5_ib_mr_pfault_handler(qp, pfault);
112}
113
114void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp)
115{
116 unsigned long flags;
117
118 spin_lock_irqsave(&qp->disable_page_faults_lock, flags);
119 qp->disable_page_faults = 1;
120 spin_unlock_irqrestore(&qp->disable_page_faults_lock, flags);
121
122 /*
123 * Note that at this point, we are guarenteed that no more
124 * work queue elements will be posted to the work queue with
125 * the QP we are closing.
126 */
127 flush_workqueue(mlx5_ib_page_fault_wq);
128}
129
130void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp)
131{
132 unsigned long flags;
133
134 spin_lock_irqsave(&qp->disable_page_faults_lock, flags);
135 qp->disable_page_faults = 0;
136 spin_unlock_irqrestore(&qp->disable_page_faults_lock, flags);
137}
138
139static void mlx5_ib_pfault_handler(struct mlx5_core_qp *qp,
140 struct mlx5_pagefault *pfault)
141{
142 /*
143 * Note that we will only get one fault event per QP per context
144 * (responder/initiator, read/write), until we resolve the page fault
145 * with the mlx5_ib_page_fault_resume command. Since this function is
146 * called from within the work element, there is no risk of missing
147 * events.
148 */
149 struct mlx5_ib_qp *mibqp = to_mibqp(qp);
150 enum mlx5_ib_pagefault_context context =
151 mlx5_ib_get_pagefault_context(pfault);
152 struct mlx5_ib_pfault *qp_pfault = &mibqp->pagefaults[context];
153
154 qp_pfault->mpfault = *pfault;
155
156 /* No need to stop interrupts here since we are in an interrupt */
157 spin_lock(&mibqp->disable_page_faults_lock);
158 if (!mibqp->disable_page_faults)
159 queue_work(mlx5_ib_page_fault_wq, &qp_pfault->work);
160 spin_unlock(&mibqp->disable_page_faults_lock);
161}
162
163void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp)
164{
165 int i;
166
167 qp->disable_page_faults = 1;
168 spin_lock_init(&qp->disable_page_faults_lock);
169
170 qp->mqp.pfault_handler = mlx5_ib_pfault_handler;
171
172 for (i = 0; i < MLX5_IB_PAGEFAULT_CONTEXTS; ++i)
173 INIT_WORK(&qp->pagefaults[i].work, mlx5_ib_qp_pfault_action);
174}
175
176int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev)
177{
178 int ret;
179
180 ret = init_srcu_struct(&ibdev->mr_srcu);
181 if (ret)
182 return ret;
183
184 return 0;
185}
186
187void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev)
188{
189 cleanup_srcu_struct(&ibdev->mr_srcu);
190}
191
192int __init mlx5_ib_odp_init(void)
193{
194 mlx5_ib_page_fault_wq =
195 create_singlethread_workqueue("mlx5_ib_page_faults");
196 if (!mlx5_ib_page_fault_wq)
197 return -ENOMEM;
198
199 return 0;
200}
201
202void mlx5_ib_odp_cleanup(void)
203{
204 destroy_workqueue(mlx5_ib_page_fault_wq);
205}