1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2018, Tuomas Tynkkynen <tuomas.tynkkynen@iki.fi> 4 * Copyright (C) 2018, Bin Meng <bmeng.cn@gmail.com> 5 * 6 * virtio ring implementation 7 */ 8 9 #include <common.h> 10 #include <dm.h> 11 #include <malloc.h> 12 #include <virtio_types.h> 13 #include <virtio.h> 14 #include <virtio_ring.h> 15 16 int virtqueue_add(struct virtqueue *vq, struct virtio_sg *sgs[], 17 unsigned int out_sgs, unsigned int in_sgs) 18 { 19 struct vring_desc *desc; 20 unsigned int total_sg = out_sgs + in_sgs; 21 unsigned int i, n, avail, descs_used, uninitialized_var(prev); 22 int head; 23 24 WARN_ON(total_sg == 0); 25 26 head = vq->free_head; 27 28 desc = vq->vring.desc; 29 i = head; 30 descs_used = total_sg; 31 32 if (vq->num_free < descs_used) { 33 debug("Can't add buf len %i - avail = %i\n", 34 descs_used, vq->num_free); 35 /* 36 * FIXME: for historical reasons, we force a notify here if 37 * there are outgoing parts to the buffer. Presumably the 38 * host should service the ring ASAP. 39 */ 40 if (out_sgs) 41 virtio_notify(vq->vdev, vq); 42 return -ENOSPC; 43 } 44 45 for (n = 0; n < out_sgs; n++) { 46 struct virtio_sg *sg = sgs[n]; 47 48 desc[i].flags = cpu_to_virtio16(vq->vdev, VRING_DESC_F_NEXT); 49 desc[i].addr = cpu_to_virtio64(vq->vdev, (u64)(size_t)sg->addr); 50 desc[i].len = cpu_to_virtio32(vq->vdev, sg->length); 51 52 prev = i; 53 i = virtio16_to_cpu(vq->vdev, desc[i].next); 54 } 55 for (; n < (out_sgs + in_sgs); n++) { 56 struct virtio_sg *sg = sgs[n]; 57 58 desc[i].flags = cpu_to_virtio16(vq->vdev, VRING_DESC_F_NEXT | 59 VRING_DESC_F_WRITE); 60 desc[i].addr = cpu_to_virtio64(vq->vdev, 61 (u64)(uintptr_t)sg->addr); 62 desc[i].len = cpu_to_virtio32(vq->vdev, sg->length); 63 64 prev = i; 65 i = virtio16_to_cpu(vq->vdev, desc[i].next); 66 } 67 /* Last one doesn't continue */ 68 desc[prev].flags &= cpu_to_virtio16(vq->vdev, ~VRING_DESC_F_NEXT); 69 70 /* We're using some buffers from the free list. */ 71 vq->num_free -= descs_used; 72 73 /* Update free pointer */ 74 vq->free_head = i; 75 76 /* 77 * Put entry in available array (but don't update avail->idx 78 * until they do sync). 79 */ 80 avail = vq->avail_idx_shadow & (vq->vring.num - 1); 81 vq->vring.avail->ring[avail] = cpu_to_virtio16(vq->vdev, head); 82 83 /* 84 * Descriptors and available array need to be set before we expose the 85 * new available array entries. 86 */ 87 virtio_wmb(); 88 vq->avail_idx_shadow++; 89 vq->vring.avail->idx = cpu_to_virtio16(vq->vdev, vq->avail_idx_shadow); 90 vq->num_added++; 91 92 /* 93 * This is very unlikely, but theoretically possible. 94 * Kick just in case. 95 */ 96 if (unlikely(vq->num_added == (1 << 16) - 1)) 97 virtqueue_kick(vq); 98 99 return 0; 100 } 101 102 static bool virtqueue_kick_prepare(struct virtqueue *vq) 103 { 104 u16 new, old; 105 bool needs_kick; 106 107 /* 108 * We need to expose available array entries before checking 109 * avail event. 110 */ 111 virtio_mb(); 112 113 old = vq->avail_idx_shadow - vq->num_added; 114 new = vq->avail_idx_shadow; 115 vq->num_added = 0; 116 117 if (vq->event) { 118 needs_kick = vring_need_event(virtio16_to_cpu(vq->vdev, 119 vring_avail_event(&vq->vring)), new, old); 120 } else { 121 needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(vq->vdev, 122 VRING_USED_F_NO_NOTIFY)); 123 } 124 125 return needs_kick; 126 } 127 128 void virtqueue_kick(struct virtqueue *vq) 129 { 130 if (virtqueue_kick_prepare(vq)) 131 virtio_notify(vq->vdev, vq); 132 } 133 134 static void detach_buf(struct virtqueue *vq, unsigned int head) 135 { 136 unsigned int i; 137 __virtio16 nextflag = cpu_to_virtio16(vq->vdev, VRING_DESC_F_NEXT); 138 139 /* Put back on free list: unmap first-level descriptors and find end */ 140 i = head; 141 142 while (vq->vring.desc[i].flags & nextflag) { 143 i = virtio16_to_cpu(vq->vdev, vq->vring.desc[i].next); 144 vq->num_free++; 145 } 146 147 vq->vring.desc[i].next = cpu_to_virtio16(vq->vdev, vq->free_head); 148 vq->free_head = head; 149 150 /* Plus final descriptor */ 151 vq->num_free++; 152 } 153 154 static inline bool more_used(const struct virtqueue *vq) 155 { 156 return vq->last_used_idx != virtio16_to_cpu(vq->vdev, 157 vq->vring.used->idx); 158 } 159 160 void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len) 161 { 162 unsigned int i; 163 u16 last_used; 164 165 if (!more_used(vq)) { 166 debug("(%s.%d): No more buffers in queue\n", 167 vq->vdev->name, vq->index); 168 return NULL; 169 } 170 171 /* Only get used array entries after they have been exposed by host */ 172 virtio_rmb(); 173 174 last_used = (vq->last_used_idx & (vq->vring.num - 1)); 175 i = virtio32_to_cpu(vq->vdev, vq->vring.used->ring[last_used].id); 176 if (len) { 177 *len = virtio32_to_cpu(vq->vdev, 178 vq->vring.used->ring[last_used].len); 179 debug("(%s.%d): last used idx %u with len %u\n", 180 vq->vdev->name, vq->index, i, *len); 181 } 182 183 if (unlikely(i >= vq->vring.num)) { 184 printf("(%s.%d): id %u out of range\n", 185 vq->vdev->name, vq->index, i); 186 return NULL; 187 } 188 189 detach_buf(vq, i); 190 vq->last_used_idx++; 191 /* 192 * If we expect an interrupt for the next entry, tell host 193 * by writing event index and flush out the write before 194 * the read in the next get_buf call. 195 */ 196 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) 197 virtio_store_mb(&vring_used_event(&vq->vring), 198 cpu_to_virtio16(vq->vdev, vq->last_used_idx)); 199 200 return (void *)(uintptr_t)virtio64_to_cpu(vq->vdev, 201 vq->vring.desc[i].addr); 202 } 203 204 static struct virtqueue *__vring_new_virtqueue(unsigned int index, 205 struct vring vring, 206 struct udevice *udev) 207 { 208 unsigned int i; 209 struct virtqueue *vq; 210 struct virtio_dev_priv *uc_priv = dev_get_uclass_priv(udev); 211 struct udevice *vdev = uc_priv->vdev; 212 213 vq = malloc(sizeof(*vq)); 214 if (!vq) 215 return NULL; 216 217 vq->vdev = vdev; 218 vq->index = index; 219 vq->num_free = vring.num; 220 vq->vring = vring; 221 vq->last_used_idx = 0; 222 vq->avail_flags_shadow = 0; 223 vq->avail_idx_shadow = 0; 224 vq->num_added = 0; 225 list_add_tail(&vq->list, &uc_priv->vqs); 226 227 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); 228 229 /* Tell other side not to bother us */ 230 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; 231 if (!vq->event) 232 vq->vring.avail->flags = cpu_to_virtio16(vdev, 233 vq->avail_flags_shadow); 234 235 /* Put everything in free lists */ 236 vq->free_head = 0; 237 for (i = 0; i < vring.num - 1; i++) 238 vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1); 239 240 return vq; 241 } 242 243 struct virtqueue *vring_create_virtqueue(unsigned int index, unsigned int num, 244 unsigned int vring_align, 245 struct udevice *udev) 246 { 247 struct virtqueue *vq; 248 void *queue = NULL; 249 struct vring vring; 250 251 /* We assume num is a power of 2 */ 252 if (num & (num - 1)) { 253 printf("Bad virtqueue length %u\n", num); 254 return NULL; 255 } 256 257 /* TODO: allocate each queue chunk individually */ 258 for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) { 259 queue = memalign(PAGE_SIZE, vring_size(num, vring_align)); 260 if (queue) 261 break; 262 } 263 264 if (!num) 265 return NULL; 266 267 if (!queue) { 268 /* Try to get a single page. You are my only hope! */ 269 queue = memalign(PAGE_SIZE, vring_size(num, vring_align)); 270 } 271 if (!queue) 272 return NULL; 273 274 memset(queue, 0, vring_size(num, vring_align)); 275 vring_init(&vring, num, queue, vring_align); 276 277 vq = __vring_new_virtqueue(index, vring, udev); 278 if (!vq) { 279 free(queue); 280 return NULL; 281 } 282 debug("(%s): created vring @ %p for vq @ %p with num %u\n", udev->name, 283 queue, vq, num); 284 285 return vq; 286 } 287 288 void vring_del_virtqueue(struct virtqueue *vq) 289 { 290 free(vq->vring.desc); 291 list_del(&vq->list); 292 free(vq); 293 } 294 295 unsigned int virtqueue_get_vring_size(struct virtqueue *vq) 296 { 297 return vq->vring.num; 298 } 299 300 ulong virtqueue_get_desc_addr(struct virtqueue *vq) 301 { 302 return (ulong)vq->vring.desc; 303 } 304 305 ulong virtqueue_get_avail_addr(struct virtqueue *vq) 306 { 307 return (ulong)vq->vring.desc + 308 ((char *)vq->vring.avail - (char *)vq->vring.desc); 309 } 310 311 ulong virtqueue_get_used_addr(struct virtqueue *vq) 312 { 313 return (ulong)vq->vring.desc + 314 ((char *)vq->vring.used - (char *)vq->vring.desc); 315 } 316 317 bool virtqueue_poll(struct virtqueue *vq, u16 last_used_idx) 318 { 319 virtio_mb(); 320 321 return last_used_idx != virtio16_to_cpu(vq->vdev, vq->vring.used->idx); 322 } 323 324 void virtqueue_dump(struct virtqueue *vq) 325 { 326 unsigned int i; 327 328 printf("virtqueue %p for dev %s:\n", vq, vq->vdev->name); 329 printf("\tindex %u, phys addr %p num %u\n", 330 vq->index, vq->vring.desc, vq->vring.num); 331 printf("\tfree_head %u, num_added %u, num_free %u\n", 332 vq->free_head, vq->num_added, vq->num_free); 333 printf("\tlast_used_idx %u, avail_flags_shadow %u, avail_idx_shadow %u\n", 334 vq->last_used_idx, vq->avail_flags_shadow, vq->avail_idx_shadow); 335 336 printf("Descriptor dump:\n"); 337 for (i = 0; i < vq->vring.num; i++) { 338 printf("\tdesc[%u] = { 0x%llx, len %u, flags %u, next %u }\n", 339 i, vq->vring.desc[i].addr, vq->vring.desc[i].len, 340 vq->vring.desc[i].flags, vq->vring.desc[i].next); 341 } 342 343 printf("Avail ring dump:\n"); 344 printf("\tflags %u, idx %u\n", 345 vq->vring.avail->flags, vq->vring.avail->idx); 346 for (i = 0; i < vq->vring.num; i++) { 347 printf("\tavail[%u] = %u\n", 348 i, vq->vring.avail->ring[i]); 349 } 350 351 printf("Used ring dump:\n"); 352 printf("\tflags %u, idx %u\n", 353 vq->vring.used->flags, vq->vring.used->idx); 354 for (i = 0; i < vq->vring.num; i++) { 355 printf("\tused[%u] = { %u, %u }\n", i, 356 vq->vring.used->ring[i].id, vq->vring.used->ring[i].len); 357 } 358 } 359