1 /* Copyright (C) 2009 Red Hat, Inc. 2 * Author: Michael S. Tsirkin <mst@redhat.com> 3 * 4 * This work is licensed under the terms of the GNU GPL, version 2. 5 * 6 * test virtio server in host kernel. 7 */ 8 9 #include <linux/compat.h> 10 #include <linux/eventfd.h> 11 #include <linux/vhost.h> 12 #include <linux/miscdevice.h> 13 #include <linux/module.h> 14 #include <linux/mutex.h> 15 #include <linux/workqueue.h> 16 #include <linux/rcupdate.h> 17 #include <linux/file.h> 18 #include <linux/slab.h> 19 20 #include "test.h" 21 #include "vhost.c" 22 23 /* Max number of bytes transferred before requeueing the job. 24 * Using this limit prevents one virtqueue from starving others. */ 25 #define VHOST_TEST_WEIGHT 0x80000 26 27 enum { 28 VHOST_TEST_VQ = 0, 29 VHOST_TEST_VQ_MAX = 1, 30 }; 31 32 struct vhost_test { 33 struct vhost_dev dev; 34 struct vhost_virtqueue vqs[VHOST_TEST_VQ_MAX]; 35 }; 36 37 /* Expects to be always run from workqueue - which acts as 38 * read-size critical section for our kind of RCU. */ 39 static void handle_vq(struct vhost_test *n) 40 { 41 struct vhost_virtqueue *vq = &n->dev.vqs[VHOST_TEST_VQ]; 42 unsigned out, in; 43 int head; 44 size_t len, total_len = 0; 45 void *private; 46 47 private = rcu_dereference_check(vq->private_data, 1); 48 if (!private) 49 return; 50 51 mutex_lock(&vq->mutex); 52 vhost_disable_notify(vq); 53 54 for (;;) { 55 head = vhost_get_vq_desc(&n->dev, vq, vq->iov, 56 ARRAY_SIZE(vq->iov), 57 &out, &in, 58 NULL, NULL); 59 /* On error, stop handling until the next kick. */ 60 if (unlikely(head < 0)) 61 break; 62 /* Nothing new? Wait for eventfd to tell us they refilled. */ 63 if (head == vq->num) { 64 if (unlikely(vhost_enable_notify(vq))) { 65 vhost_disable_notify(vq); 66 continue; 67 } 68 break; 69 } 70 if (in) { 71 vq_err(vq, "Unexpected descriptor format for TX: " 72 "out %d, int %d\n", out, in); 73 break; 74 } 75 len = iov_length(vq->iov, out); 76 /* Sanity check */ 77 if (!len) { 78 vq_err(vq, "Unexpected 0 len for TX\n"); 79 break; 80 } 81 vhost_add_used_and_signal(&n->dev, vq, head, 0); 82 total_len += len; 83 if (unlikely(total_len >= VHOST_TEST_WEIGHT)) { 84 vhost_poll_queue(&vq->poll); 85 break; 86 } 87 } 88 89 mutex_unlock(&vq->mutex); 90 } 91 92 static void handle_vq_kick(struct vhost_work *work) 93 { 94 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, 95 poll.work); 96 struct vhost_test *n = container_of(vq->dev, struct vhost_test, dev); 97 98 handle_vq(n); 99 } 100 101 static int vhost_test_open(struct inode *inode, struct file *f) 102 { 103 struct vhost_test *n = kmalloc(sizeof *n, GFP_KERNEL); 104 struct vhost_dev *dev; 105 int r; 106 107 if (!n) 108 return -ENOMEM; 109 110 dev = &n->dev; 111 n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick; 112 r = vhost_dev_init(dev, n->vqs, VHOST_TEST_VQ_MAX); 113 if (r < 0) { 114 kfree(n); 115 return r; 116 } 117 118 f->private_data = n; 119 120 return 0; 121 } 122 123 static void *vhost_test_stop_vq(struct vhost_test *n, 124 struct vhost_virtqueue *vq) 125 { 126 void *private; 127 128 mutex_lock(&vq->mutex); 129 private = rcu_dereference_protected(vq->private_data, 130 lockdep_is_held(&vq->mutex)); 131 rcu_assign_pointer(vq->private_data, NULL); 132 mutex_unlock(&vq->mutex); 133 return private; 134 } 135 136 static void vhost_test_stop(struct vhost_test *n, void **privatep) 137 { 138 *privatep = vhost_test_stop_vq(n, n->vqs + VHOST_TEST_VQ); 139 } 140 141 static void vhost_test_flush_vq(struct vhost_test *n, int index) 142 { 143 vhost_poll_flush(&n->dev.vqs[index].poll); 144 } 145 146 static void vhost_test_flush(struct vhost_test *n) 147 { 148 vhost_test_flush_vq(n, VHOST_TEST_VQ); 149 } 150 151 static int vhost_test_release(struct inode *inode, struct file *f) 152 { 153 struct vhost_test *n = f->private_data; 154 void *private; 155 156 vhost_test_stop(n, &private); 157 vhost_test_flush(n); 158 vhost_dev_cleanup(&n->dev); 159 /* We do an extra flush before freeing memory, 160 * since jobs can re-queue themselves. */ 161 vhost_test_flush(n); 162 kfree(n); 163 return 0; 164 } 165 166 static long vhost_test_run(struct vhost_test *n, int test) 167 { 168 void *priv, *oldpriv; 169 struct vhost_virtqueue *vq; 170 int r, index; 171 172 if (test < 0 || test > 1) 173 return -EINVAL; 174 175 mutex_lock(&n->dev.mutex); 176 r = vhost_dev_check_owner(&n->dev); 177 if (r) 178 goto err; 179 180 for (index = 0; index < n->dev.nvqs; ++index) { 181 /* Verify that ring has been setup correctly. */ 182 if (!vhost_vq_access_ok(&n->vqs[index])) { 183 r = -EFAULT; 184 goto err; 185 } 186 } 187 188 for (index = 0; index < n->dev.nvqs; ++index) { 189 vq = n->vqs + index; 190 mutex_lock(&vq->mutex); 191 priv = test ? n : NULL; 192 193 /* start polling new socket */ 194 oldpriv = rcu_dereference_protected(vq->private_data, 195 lockdep_is_held(&vq->mutex)); 196 rcu_assign_pointer(vq->private_data, priv); 197 198 mutex_unlock(&vq->mutex); 199 200 if (oldpriv) { 201 vhost_test_flush_vq(n, index); 202 } 203 } 204 205 mutex_unlock(&n->dev.mutex); 206 return 0; 207 208 err: 209 mutex_unlock(&n->dev.mutex); 210 return r; 211 } 212 213 static long vhost_test_reset_owner(struct vhost_test *n) 214 { 215 void *priv = NULL; 216 long err; 217 mutex_lock(&n->dev.mutex); 218 err = vhost_dev_check_owner(&n->dev); 219 if (err) 220 goto done; 221 vhost_test_stop(n, &priv); 222 vhost_test_flush(n); 223 err = vhost_dev_reset_owner(&n->dev); 224 done: 225 mutex_unlock(&n->dev.mutex); 226 return err; 227 } 228 229 static int vhost_test_set_features(struct vhost_test *n, u64 features) 230 { 231 mutex_lock(&n->dev.mutex); 232 if ((features & (1 << VHOST_F_LOG_ALL)) && 233 !vhost_log_access_ok(&n->dev)) { 234 mutex_unlock(&n->dev.mutex); 235 return -EFAULT; 236 } 237 n->dev.acked_features = features; 238 smp_wmb(); 239 vhost_test_flush(n); 240 mutex_unlock(&n->dev.mutex); 241 return 0; 242 } 243 244 static long vhost_test_ioctl(struct file *f, unsigned int ioctl, 245 unsigned long arg) 246 { 247 struct vhost_test *n = f->private_data; 248 void __user *argp = (void __user *)arg; 249 u64 __user *featurep = argp; 250 int test; 251 u64 features; 252 int r; 253 switch (ioctl) { 254 case VHOST_TEST_RUN: 255 if (copy_from_user(&test, argp, sizeof test)) 256 return -EFAULT; 257 return vhost_test_run(n, test); 258 case VHOST_GET_FEATURES: 259 features = VHOST_FEATURES; 260 if (copy_to_user(featurep, &features, sizeof features)) 261 return -EFAULT; 262 return 0; 263 case VHOST_SET_FEATURES: 264 if (copy_from_user(&features, featurep, sizeof features)) 265 return -EFAULT; 266 if (features & ~VHOST_FEATURES) 267 return -EOPNOTSUPP; 268 return vhost_test_set_features(n, features); 269 case VHOST_RESET_OWNER: 270 return vhost_test_reset_owner(n); 271 default: 272 mutex_lock(&n->dev.mutex); 273 r = vhost_dev_ioctl(&n->dev, ioctl, arg); 274 vhost_test_flush(n); 275 mutex_unlock(&n->dev.mutex); 276 return r; 277 } 278 } 279 280 #ifdef CONFIG_COMPAT 281 static long vhost_test_compat_ioctl(struct file *f, unsigned int ioctl, 282 unsigned long arg) 283 { 284 return vhost_test_ioctl(f, ioctl, (unsigned long)compat_ptr(arg)); 285 } 286 #endif 287 288 static const struct file_operations vhost_test_fops = { 289 .owner = THIS_MODULE, 290 .release = vhost_test_release, 291 .unlocked_ioctl = vhost_test_ioctl, 292 #ifdef CONFIG_COMPAT 293 .compat_ioctl = vhost_test_compat_ioctl, 294 #endif 295 .open = vhost_test_open, 296 .llseek = noop_llseek, 297 }; 298 299 static struct miscdevice vhost_test_misc = { 300 MISC_DYNAMIC_MINOR, 301 "vhost-test", 302 &vhost_test_fops, 303 }; 304 305 static int vhost_test_init(void) 306 { 307 return misc_register(&vhost_test_misc); 308 } 309 module_init(vhost_test_init); 310 311 static void vhost_test_exit(void) 312 { 313 misc_deregister(&vhost_test_misc); 314 } 315 module_exit(vhost_test_exit); 316 317 MODULE_VERSION("0.0.1"); 318 MODULE_LICENSE("GPL v2"); 319 MODULE_AUTHOR("Michael S. Tsirkin"); 320 MODULE_DESCRIPTION("Host kernel side for virtio simulator"); 321