xref: /openbmc/qemu/backends/cryptodev-vhost.c (revision 327d4b7f)
1 /*
2  * QEMU Cryptodev backend for QEMU cipher APIs
3  *
4  * Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD.
5  *
6  * Authors:
7  *    Gonglei <arei.gonglei@huawei.com>
8  *    Jay Zhou <jianjay.zhou@huawei.com>
9  *
10  * This library is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * This library is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22  *
23  */
24 
25 #include "qemu/osdep.h"
26 #include "hw/virtio/virtio-bus.h"
27 #include "sysemu/cryptodev-vhost.h"
28 
29 #ifdef CONFIG_VHOST_CRYPTO
30 #include "qapi/error.h"
31 #include "qapi/qmp/qerror.h"
32 #include "qemu/error-report.h"
33 #include "hw/virtio/virtio-crypto.h"
34 #include "sysemu/cryptodev-vhost-user.h"
35 
36 uint64_t
37 cryptodev_vhost_get_max_queues(
38                         CryptoDevBackendVhost *crypto)
39 {
40     return crypto->dev.max_queues;
41 }
42 
43 void cryptodev_vhost_cleanup(CryptoDevBackendVhost *crypto)
44 {
45     vhost_dev_cleanup(&crypto->dev);
46     g_free(crypto);
47 }
48 
49 struct CryptoDevBackendVhost *
50 cryptodev_vhost_init(
51              CryptoDevBackendVhostOptions *options)
52 {
53     int r;
54     CryptoDevBackendVhost *crypto;
55     Error *local_err = NULL;
56 
57     crypto = g_new(CryptoDevBackendVhost, 1);
58     crypto->dev.max_queues = 1;
59     crypto->dev.nvqs = 1;
60     crypto->dev.vqs = crypto->vqs;
61 
62     crypto->cc = options->cc;
63 
64     crypto->dev.protocol_features = 0;
65     crypto->backend = -1;
66 
67     /* vhost-user needs vq_index to initiate a specific queue pair */
68     crypto->dev.vq_index = crypto->cc->queue_index * crypto->dev.nvqs;
69 
70     r = vhost_dev_init(&crypto->dev, options->opaque, options->backend_type, 0,
71                        &local_err);
72     if (r < 0) {
73         error_report_err(local_err);
74         goto fail;
75     }
76 
77     return crypto;
78 fail:
79     g_free(crypto);
80     return NULL;
81 }
82 
83 static int
84 cryptodev_vhost_start_one(CryptoDevBackendVhost *crypto,
85                                   VirtIODevice *dev)
86 {
87     int r;
88 
89     crypto->dev.nvqs = 1;
90     crypto->dev.vqs = crypto->vqs;
91 
92     r = vhost_dev_enable_notifiers(&crypto->dev, dev);
93     if (r < 0) {
94         goto fail_notifiers;
95     }
96 
97     r = vhost_dev_start(&crypto->dev, dev);
98     if (r < 0) {
99         goto fail_start;
100     }
101 
102     return 0;
103 
104 fail_start:
105     vhost_dev_disable_notifiers(&crypto->dev, dev);
106 fail_notifiers:
107     return r;
108 }
109 
110 static void
111 cryptodev_vhost_stop_one(CryptoDevBackendVhost *crypto,
112                                  VirtIODevice *dev)
113 {
114     vhost_dev_stop(&crypto->dev, dev);
115     vhost_dev_disable_notifiers(&crypto->dev, dev);
116 }
117 
118 CryptoDevBackendVhost *
119 cryptodev_get_vhost(CryptoDevBackendClient *cc,
120                             CryptoDevBackend *b,
121                             uint16_t queue)
122 {
123     CryptoDevBackendVhost *vhost_crypto = NULL;
124 
125     if (!cc) {
126         return NULL;
127     }
128 
129     switch (cc->type) {
130 #if defined(CONFIG_VHOST_USER) && defined(CONFIG_LINUX)
131     case CRYPTODEV_BACKEND_TYPE_VHOST_USER:
132         vhost_crypto = cryptodev_vhost_user_get_vhost(cc, b, queue);
133         break;
134 #endif
135     default:
136         break;
137     }
138 
139     return vhost_crypto;
140 }
141 
142 static void
143 cryptodev_vhost_set_vq_index(CryptoDevBackendVhost *crypto,
144                                      int vq_index)
145 {
146     crypto->dev.vq_index = vq_index;
147 }
148 
149 static int
150 vhost_set_vring_enable(CryptoDevBackendClient *cc,
151                             CryptoDevBackend *b,
152                             uint16_t queue, int enable)
153 {
154     CryptoDevBackendVhost *crypto =
155                        cryptodev_get_vhost(cc, b, queue);
156     const VhostOps *vhost_ops;
157 
158     cc->vring_enable = enable;
159 
160     if (!crypto) {
161         return 0;
162     }
163 
164     vhost_ops = crypto->dev.vhost_ops;
165     if (vhost_ops->vhost_set_vring_enable) {
166         return vhost_ops->vhost_set_vring_enable(&crypto->dev, enable);
167     }
168 
169     return 0;
170 }
171 
172 int cryptodev_vhost_start(VirtIODevice *dev, int total_queues)
173 {
174     VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(dev);
175     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
176     VirtioBusState *vbus = VIRTIO_BUS(qbus);
177     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
178     int r, e;
179     int i;
180     CryptoDevBackend *b = vcrypto->cryptodev;
181     CryptoDevBackendVhost *vhost_crypto;
182     CryptoDevBackendClient *cc;
183 
184     if (!k->set_guest_notifiers) {
185         error_report("binding does not support guest notifiers");
186         return -ENOSYS;
187     }
188 
189     for (i = 0; i < total_queues; i++) {
190         cc = b->conf.peers.ccs[i];
191 
192         vhost_crypto = cryptodev_get_vhost(cc, b, i);
193         cryptodev_vhost_set_vq_index(vhost_crypto, i);
194 
195         /* Suppress the masking guest notifiers on vhost user
196          * because vhost user doesn't interrupt masking/unmasking
197          * properly.
198          */
199         if (cc->type == CRYPTODEV_BACKEND_TYPE_VHOST_USER) {
200             dev->use_guest_notifier_mask = false;
201         }
202      }
203 
204     r = k->set_guest_notifiers(qbus->parent, total_queues, true);
205     if (r < 0) {
206         error_report("error binding guest notifier: %d", -r);
207         goto err;
208     }
209 
210     for (i = 0; i < total_queues; i++) {
211         cc = b->conf.peers.ccs[i];
212 
213         vhost_crypto = cryptodev_get_vhost(cc, b, i);
214         r = cryptodev_vhost_start_one(vhost_crypto, dev);
215 
216         if (r < 0) {
217             goto err_start;
218         }
219 
220         if (cc->vring_enable) {
221             /* restore vring enable state */
222             r = vhost_set_vring_enable(cc, b, i, cc->vring_enable);
223 
224             if (r < 0) {
225                 goto err_start;
226             }
227         }
228     }
229 
230     return 0;
231 
232 err_start:
233     while (--i >= 0) {
234         cc = b->conf.peers.ccs[i];
235         vhost_crypto = cryptodev_get_vhost(cc, b, i);
236         cryptodev_vhost_stop_one(vhost_crypto, dev);
237     }
238     e = k->set_guest_notifiers(qbus->parent, total_queues, false);
239     if (e < 0) {
240         error_report("vhost guest notifier cleanup failed: %d", e);
241     }
242 err:
243     return r;
244 }
245 
246 void cryptodev_vhost_stop(VirtIODevice *dev, int total_queues)
247 {
248     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
249     VirtioBusState *vbus = VIRTIO_BUS(qbus);
250     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
251     VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(dev);
252     CryptoDevBackend *b = vcrypto->cryptodev;
253     CryptoDevBackendVhost *vhost_crypto;
254     CryptoDevBackendClient *cc;
255     size_t i;
256     int r;
257 
258     for (i = 0; i < total_queues; i++) {
259         cc = b->conf.peers.ccs[i];
260 
261         vhost_crypto = cryptodev_get_vhost(cc, b, i);
262         cryptodev_vhost_stop_one(vhost_crypto, dev);
263     }
264 
265     r = k->set_guest_notifiers(qbus->parent, total_queues, false);
266     if (r < 0) {
267         error_report("vhost guest notifier cleanup failed: %d", r);
268     }
269     assert(r >= 0);
270 }
271 
272 void cryptodev_vhost_virtqueue_mask(VirtIODevice *dev,
273                                            int queue,
274                                            int idx, bool mask)
275 {
276     VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(dev);
277     CryptoDevBackend *b = vcrypto->cryptodev;
278     CryptoDevBackendVhost *vhost_crypto;
279     CryptoDevBackendClient *cc;
280 
281     assert(queue < MAX_CRYPTO_QUEUE_NUM);
282 
283     cc = b->conf.peers.ccs[queue];
284     vhost_crypto = cryptodev_get_vhost(cc, b, queue);
285 
286     vhost_virtqueue_mask(&vhost_crypto->dev, dev, idx, mask);
287 }
288 
289 bool cryptodev_vhost_virtqueue_pending(VirtIODevice *dev,
290                                               int queue, int idx)
291 {
292     VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(dev);
293     CryptoDevBackend *b = vcrypto->cryptodev;
294     CryptoDevBackendVhost *vhost_crypto;
295     CryptoDevBackendClient *cc;
296 
297     assert(queue < MAX_CRYPTO_QUEUE_NUM);
298 
299     cc = b->conf.peers.ccs[queue];
300     vhost_crypto = cryptodev_get_vhost(cc, b, queue);
301 
302     return vhost_virtqueue_pending(&vhost_crypto->dev, idx);
303 }
304 
305 #else
306 uint64_t
307 cryptodev_vhost_get_max_queues(CryptoDevBackendVhost *crypto)
308 {
309     return 0;
310 }
311 
312 void cryptodev_vhost_cleanup(CryptoDevBackendVhost *crypto)
313 {
314 }
315 
316 struct CryptoDevBackendVhost *
317 cryptodev_vhost_init(CryptoDevBackendVhostOptions *options)
318 {
319     return NULL;
320 }
321 
322 CryptoDevBackendVhost *
323 cryptodev_get_vhost(CryptoDevBackendClient *cc,
324                     CryptoDevBackend *b,
325                     uint16_t queue)
326 {
327     return NULL;
328 }
329 
330 int cryptodev_vhost_start(VirtIODevice *dev, int total_queues)
331 {
332     return -1;
333 }
334 
335 void cryptodev_vhost_stop(VirtIODevice *dev, int total_queues)
336 {
337 }
338 
339 void cryptodev_vhost_virtqueue_mask(VirtIODevice *dev,
340                                     int queue,
341                                     int idx, bool mask)
342 {
343 }
344 
345 bool cryptodev_vhost_virtqueue_pending(VirtIODevice *dev,
346                                        int queue, int idx)
347 {
348     return false;
349 }
350 #endif
351