1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
4 */
5
6 #include "mana_ib.h"
7
8 #define VALID_MR_FLAGS \
9 (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ)
10
11 static enum gdma_mr_access_flags
mana_ib_verbs_to_gdma_access_flags(int access_flags)12 mana_ib_verbs_to_gdma_access_flags(int access_flags)
13 {
14 enum gdma_mr_access_flags flags = GDMA_ACCESS_FLAG_LOCAL_READ;
15
16 if (access_flags & IB_ACCESS_LOCAL_WRITE)
17 flags |= GDMA_ACCESS_FLAG_LOCAL_WRITE;
18
19 if (access_flags & IB_ACCESS_REMOTE_WRITE)
20 flags |= GDMA_ACCESS_FLAG_REMOTE_WRITE;
21
22 if (access_flags & IB_ACCESS_REMOTE_READ)
23 flags |= GDMA_ACCESS_FLAG_REMOTE_READ;
24
25 return flags;
26 }
27
mana_ib_gd_create_mr(struct mana_ib_dev * dev,struct mana_ib_mr * mr,struct gdma_create_mr_params * mr_params)28 static int mana_ib_gd_create_mr(struct mana_ib_dev *dev, struct mana_ib_mr *mr,
29 struct gdma_create_mr_params *mr_params)
30 {
31 struct gdma_create_mr_response resp = {};
32 struct gdma_create_mr_request req = {};
33 struct gdma_dev *mdev = dev->gdma_dev;
34 struct gdma_context *gc;
35 int err;
36
37 gc = mdev->gdma_context;
38
39 mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_MR, sizeof(req),
40 sizeof(resp));
41 req.pd_handle = mr_params->pd_handle;
42 req.mr_type = mr_params->mr_type;
43
44 switch (mr_params->mr_type) {
45 case GDMA_MR_TYPE_GVA:
46 req.gva.dma_region_handle = mr_params->gva.dma_region_handle;
47 req.gva.virtual_address = mr_params->gva.virtual_address;
48 req.gva.access_flags = mr_params->gva.access_flags;
49 break;
50
51 default:
52 ibdev_dbg(&dev->ib_dev,
53 "invalid param (GDMA_MR_TYPE) passed, type %d\n",
54 req.mr_type);
55 return -EINVAL;
56 }
57
58 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
59
60 if (err || resp.hdr.status) {
61 ibdev_dbg(&dev->ib_dev, "Failed to create mr %d, %u", err,
62 resp.hdr.status);
63 if (!err)
64 err = -EPROTO;
65
66 return err;
67 }
68
69 mr->ibmr.lkey = resp.lkey;
70 mr->ibmr.rkey = resp.rkey;
71 mr->mr_handle = resp.mr_handle;
72
73 return 0;
74 }
75
mana_ib_gd_destroy_mr(struct mana_ib_dev * dev,u64 mr_handle)76 static int mana_ib_gd_destroy_mr(struct mana_ib_dev *dev, u64 mr_handle)
77 {
78 struct gdma_destroy_mr_response resp = {};
79 struct gdma_destroy_mr_request req = {};
80 struct gdma_dev *mdev = dev->gdma_dev;
81 struct gdma_context *gc;
82 int err;
83
84 gc = mdev->gdma_context;
85
86 mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_MR, sizeof(req),
87 sizeof(resp));
88
89 req.mr_handle = mr_handle;
90
91 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
92 if (err || resp.hdr.status) {
93 dev_err(gc->dev, "Failed to destroy MR: %d, 0x%x\n", err,
94 resp.hdr.status);
95 if (!err)
96 err = -EPROTO;
97 return err;
98 }
99
100 return 0;
101 }
102
mana_ib_reg_user_mr(struct ib_pd * ibpd,u64 start,u64 length,u64 iova,int access_flags,struct ib_udata * udata)103 struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
104 u64 iova, int access_flags,
105 struct ib_udata *udata)
106 {
107 struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
108 struct gdma_create_mr_params mr_params = {};
109 struct ib_device *ibdev = ibpd->device;
110 struct mana_ib_dev *dev;
111 struct mana_ib_mr *mr;
112 u64 dma_region_handle;
113 int err;
114
115 dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
116
117 ibdev_dbg(ibdev,
118 "start 0x%llx, iova 0x%llx length 0x%llx access_flags 0x%x",
119 start, iova, length, access_flags);
120
121 access_flags &= ~IB_ACCESS_OPTIONAL;
122 if (access_flags & ~VALID_MR_FLAGS)
123 return ERR_PTR(-EINVAL);
124
125 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
126 if (!mr)
127 return ERR_PTR(-ENOMEM);
128
129 mr->umem = ib_umem_get(ibdev, start, length, access_flags);
130 if (IS_ERR(mr->umem)) {
131 err = PTR_ERR(mr->umem);
132 ibdev_dbg(ibdev,
133 "Failed to get umem for register user-mr, %d\n", err);
134 goto err_free;
135 }
136
137 err = mana_ib_gd_create_dma_region(dev, mr->umem, &dma_region_handle);
138 if (err) {
139 ibdev_dbg(ibdev, "Failed create dma region for user-mr, %d\n",
140 err);
141 goto err_umem;
142 }
143
144 ibdev_dbg(ibdev,
145 "mana_ib_gd_create_dma_region ret %d gdma_region %llx\n", err,
146 dma_region_handle);
147
148 mr_params.pd_handle = pd->pd_handle;
149 mr_params.mr_type = GDMA_MR_TYPE_GVA;
150 mr_params.gva.dma_region_handle = dma_region_handle;
151 mr_params.gva.virtual_address = iova;
152 mr_params.gva.access_flags =
153 mana_ib_verbs_to_gdma_access_flags(access_flags);
154
155 err = mana_ib_gd_create_mr(dev, mr, &mr_params);
156 if (err)
157 goto err_dma_region;
158
159 /*
160 * There is no need to keep track of dma_region_handle after MR is
161 * successfully created. The dma_region_handle is tracked in the PF
162 * as part of the lifecycle of this MR.
163 */
164
165 return &mr->ibmr;
166
167 err_dma_region:
168 mana_gd_destroy_dma_region(dev->gdma_dev->gdma_context,
169 dma_region_handle);
170
171 err_umem:
172 ib_umem_release(mr->umem);
173
174 err_free:
175 kfree(mr);
176 return ERR_PTR(err);
177 }
178
mana_ib_dereg_mr(struct ib_mr * ibmr,struct ib_udata * udata)179 int mana_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
180 {
181 struct mana_ib_mr *mr = container_of(ibmr, struct mana_ib_mr, ibmr);
182 struct ib_device *ibdev = ibmr->device;
183 struct mana_ib_dev *dev;
184 int err;
185
186 dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
187
188 err = mana_ib_gd_destroy_mr(dev, mr->mr_handle);
189 if (err)
190 return err;
191
192 if (mr->umem)
193 ib_umem_release(mr->umem);
194
195 kfree(mr);
196
197 return 0;
198 }
199