1 /* 2 * QEMU Cryptodev backend for QEMU cipher APIs 3 * 4 * Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD. 5 * 6 * Authors: 7 * Gonglei <arei.gonglei@huawei.com> 8 * Jay Zhou <jianjay.zhou@huawei.com> 9 * 10 * This library is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU Lesser General Public 12 * License as published by the Free Software Foundation; either 13 * version 2.1 of the License, or (at your option) any later version. 14 * 15 * This library is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * Lesser General Public License for more details. 19 * 20 * You should have received a copy of the GNU Lesser General Public 21 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 22 * 23 */ 24 25 #include "qemu/osdep.h" 26 #include "hw/virtio/virtio-bus.h" 27 #include "sysemu/cryptodev-vhost.h" 28 29 #ifdef CONFIG_VHOST_CRYPTO 30 #include "qapi/error.h" 31 #include "qemu/error-report.h" 32 #include "hw/virtio/virtio-crypto.h" 33 #include "sysemu/cryptodev-vhost-user.h" 34 35 uint64_t 36 cryptodev_vhost_get_max_queues( 37 CryptoDevBackendVhost *crypto) 38 { 39 return crypto->dev.max_queues; 40 } 41 42 void cryptodev_vhost_cleanup(CryptoDevBackendVhost *crypto) 43 { 44 vhost_dev_cleanup(&crypto->dev); 45 g_free(crypto); 46 } 47 48 struct CryptoDevBackendVhost * 49 cryptodev_vhost_init( 50 CryptoDevBackendVhostOptions *options) 51 { 52 int r; 53 CryptoDevBackendVhost *crypto; 54 Error *local_err = NULL; 55 56 crypto = g_new(CryptoDevBackendVhost, 1); 57 crypto->dev.max_queues = 1; 58 crypto->dev.nvqs = 1; 59 crypto->dev.vqs = crypto->vqs; 60 61 crypto->cc = options->cc; 62 63 crypto->dev.protocol_features = 0; 64 crypto->backend = -1; 65 66 /* vhost-user needs vq_index to initiate a specific queue pair */ 67 crypto->dev.vq_index = crypto->cc->queue_index * crypto->dev.nvqs; 68 69 r = vhost_dev_init(&crypto->dev, options->opaque, options->backend_type, 0, 70 &local_err); 71 if (r < 0) { 72 error_report_err(local_err); 73 goto fail; 74 } 75 76 return crypto; 77 fail: 78 g_free(crypto); 79 return NULL; 80 } 81 82 static int 83 cryptodev_vhost_start_one(CryptoDevBackendVhost *crypto, 84 VirtIODevice *dev) 85 { 86 int r; 87 88 crypto->dev.nvqs = 1; 89 crypto->dev.vqs = crypto->vqs; 90 91 r = vhost_dev_enable_notifiers(&crypto->dev, dev); 92 if (r < 0) { 93 goto fail_notifiers; 94 } 95 96 r = vhost_dev_start(&crypto->dev, dev, false); 97 if (r < 0) { 98 goto fail_start; 99 } 100 101 return 0; 102 103 fail_start: 104 vhost_dev_disable_notifiers(&crypto->dev, dev); 105 fail_notifiers: 106 return r; 107 } 108 109 static void 110 cryptodev_vhost_stop_one(CryptoDevBackendVhost *crypto, 111 VirtIODevice *dev) 112 { 113 vhost_dev_stop(&crypto->dev, dev, false); 114 vhost_dev_disable_notifiers(&crypto->dev, dev); 115 } 116 117 CryptoDevBackendVhost * 118 cryptodev_get_vhost(CryptoDevBackendClient *cc, 119 CryptoDevBackend *b, 120 uint16_t queue) 121 { 122 CryptoDevBackendVhost *vhost_crypto = NULL; 123 124 if (!cc) { 125 return NULL; 126 } 127 128 switch (cc->type) { 129 #if defined(CONFIG_VHOST_USER) && defined(CONFIG_LINUX) 130 case CRYPTODEV_BACKEND_TYPE_VHOST_USER: 131 vhost_crypto = cryptodev_vhost_user_get_vhost(cc, b, queue); 132 break; 133 #endif 134 default: 135 break; 136 } 137 138 return vhost_crypto; 139 } 140 141 static void 142 cryptodev_vhost_set_vq_index(CryptoDevBackendVhost *crypto, 143 int vq_index) 144 { 145 crypto->dev.vq_index = vq_index; 146 } 147 148 static int 149 vhost_set_vring_enable(CryptoDevBackendClient *cc, 150 CryptoDevBackend *b, 151 uint16_t queue, int enable) 152 { 153 CryptoDevBackendVhost *crypto = 154 cryptodev_get_vhost(cc, b, queue); 155 const VhostOps *vhost_ops; 156 157 cc->vring_enable = enable; 158 159 if (!crypto) { 160 return 0; 161 } 162 163 vhost_ops = crypto->dev.vhost_ops; 164 if (vhost_ops->vhost_set_vring_enable) { 165 return vhost_ops->vhost_set_vring_enable(&crypto->dev, enable); 166 } 167 168 return 0; 169 } 170 171 int cryptodev_vhost_start(VirtIODevice *dev, int total_queues) 172 { 173 VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(dev); 174 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev))); 175 VirtioBusState *vbus = VIRTIO_BUS(qbus); 176 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus); 177 int r, e; 178 int i; 179 CryptoDevBackend *b = vcrypto->cryptodev; 180 CryptoDevBackendVhost *vhost_crypto; 181 CryptoDevBackendClient *cc; 182 183 if (!k->set_guest_notifiers) { 184 error_report("binding does not support guest notifiers"); 185 return -ENOSYS; 186 } 187 188 for (i = 0; i < total_queues; i++) { 189 cc = b->conf.peers.ccs[i]; 190 191 vhost_crypto = cryptodev_get_vhost(cc, b, i); 192 cryptodev_vhost_set_vq_index(vhost_crypto, i); 193 194 /* Suppress the masking guest notifiers on vhost user 195 * because vhost user doesn't interrupt masking/unmasking 196 * properly. 197 */ 198 if (cc->type == CRYPTODEV_BACKEND_TYPE_VHOST_USER) { 199 dev->use_guest_notifier_mask = false; 200 } 201 } 202 203 r = k->set_guest_notifiers(qbus->parent, total_queues, true); 204 if (r < 0) { 205 error_report("error binding guest notifier: %d", -r); 206 goto err; 207 } 208 209 for (i = 0; i < total_queues; i++) { 210 cc = b->conf.peers.ccs[i]; 211 212 vhost_crypto = cryptodev_get_vhost(cc, b, i); 213 r = cryptodev_vhost_start_one(vhost_crypto, dev); 214 215 if (r < 0) { 216 goto err_start; 217 } 218 219 if (cc->vring_enable) { 220 /* restore vring enable state */ 221 r = vhost_set_vring_enable(cc, b, i, cc->vring_enable); 222 223 if (r < 0) { 224 goto err_start; 225 } 226 } 227 } 228 229 return 0; 230 231 err_start: 232 while (--i >= 0) { 233 cc = b->conf.peers.ccs[i]; 234 vhost_crypto = cryptodev_get_vhost(cc, b, i); 235 cryptodev_vhost_stop_one(vhost_crypto, dev); 236 } 237 e = k->set_guest_notifiers(qbus->parent, total_queues, false); 238 if (e < 0) { 239 error_report("vhost guest notifier cleanup failed: %d", e); 240 } 241 err: 242 return r; 243 } 244 245 void cryptodev_vhost_stop(VirtIODevice *dev, int total_queues) 246 { 247 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev))); 248 VirtioBusState *vbus = VIRTIO_BUS(qbus); 249 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus); 250 VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(dev); 251 CryptoDevBackend *b = vcrypto->cryptodev; 252 CryptoDevBackendVhost *vhost_crypto; 253 CryptoDevBackendClient *cc; 254 size_t i; 255 int r; 256 257 for (i = 0; i < total_queues; i++) { 258 cc = b->conf.peers.ccs[i]; 259 260 vhost_crypto = cryptodev_get_vhost(cc, b, i); 261 cryptodev_vhost_stop_one(vhost_crypto, dev); 262 } 263 264 r = k->set_guest_notifiers(qbus->parent, total_queues, false); 265 if (r < 0) { 266 error_report("vhost guest notifier cleanup failed: %d", r); 267 } 268 assert(r >= 0); 269 } 270 271 void cryptodev_vhost_virtqueue_mask(VirtIODevice *dev, 272 int queue, 273 int idx, bool mask) 274 { 275 VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(dev); 276 CryptoDevBackend *b = vcrypto->cryptodev; 277 CryptoDevBackendVhost *vhost_crypto; 278 CryptoDevBackendClient *cc; 279 280 assert(queue < MAX_CRYPTO_QUEUE_NUM); 281 282 cc = b->conf.peers.ccs[queue]; 283 vhost_crypto = cryptodev_get_vhost(cc, b, queue); 284 285 vhost_virtqueue_mask(&vhost_crypto->dev, dev, idx, mask); 286 } 287 288 bool cryptodev_vhost_virtqueue_pending(VirtIODevice *dev, 289 int queue, int idx) 290 { 291 VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(dev); 292 CryptoDevBackend *b = vcrypto->cryptodev; 293 CryptoDevBackendVhost *vhost_crypto; 294 CryptoDevBackendClient *cc; 295 296 assert(queue < MAX_CRYPTO_QUEUE_NUM); 297 298 cc = b->conf.peers.ccs[queue]; 299 vhost_crypto = cryptodev_get_vhost(cc, b, queue); 300 301 return vhost_virtqueue_pending(&vhost_crypto->dev, idx); 302 } 303 304 #else 305 uint64_t 306 cryptodev_vhost_get_max_queues(CryptoDevBackendVhost *crypto) 307 { 308 return 0; 309 } 310 311 void cryptodev_vhost_cleanup(CryptoDevBackendVhost *crypto) 312 { 313 } 314 315 struct CryptoDevBackendVhost * 316 cryptodev_vhost_init(CryptoDevBackendVhostOptions *options) 317 { 318 return NULL; 319 } 320 321 CryptoDevBackendVhost * 322 cryptodev_get_vhost(CryptoDevBackendClient *cc, 323 CryptoDevBackend *b, 324 uint16_t queue) 325 { 326 return NULL; 327 } 328 329 int cryptodev_vhost_start(VirtIODevice *dev, int total_queues) 330 { 331 return -1; 332 } 333 334 void cryptodev_vhost_stop(VirtIODevice *dev, int total_queues) 335 { 336 } 337 338 void cryptodev_vhost_virtqueue_mask(VirtIODevice *dev, 339 int queue, 340 int idx, bool mask) 341 { 342 } 343 344 bool cryptodev_vhost_virtqueue_pending(VirtIODevice *dev, 345 int queue, int idx) 346 { 347 return false; 348 } 349 #endif 350