1 /* 2 * QTest testcase for VirtIO IOMMU 3 * 4 * Copyright (c) 2021 Red Hat, Inc. 5 * 6 * Authors: 7 * Eric Auger <eric.auger@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or (at your 10 * option) any later version. See the COPYING file in the top-level directory. 11 * 12 */ 13 14 #include "qemu/osdep.h" 15 #include "libqtest-single.h" 16 #include "qemu/module.h" 17 #include "libqos/qgraph.h" 18 #include "libqos/virtio-iommu.h" 19 #include "hw/virtio/virtio-iommu.h" 20 21 #define PCI_SLOT_HP 0x06 22 #define QVIRTIO_IOMMU_TIMEOUT_US (30 * 1000 * 1000) 23 24 static QGuestAllocator *alloc; 25 26 static void pci_config(void *obj, void *data, QGuestAllocator *t_alloc) 27 { 28 QVirtioIOMMU *v_iommu = obj; 29 QVirtioDevice *dev = v_iommu->vdev; 30 uint64_t input_range_start = qvirtio_config_readq(dev, 8); 31 uint64_t input_range_end = qvirtio_config_readq(dev, 16); 32 uint32_t domain_range_start = qvirtio_config_readl(dev, 24); 33 uint32_t domain_range_end = qvirtio_config_readl(dev, 28); 34 35 g_assert_cmpint(input_range_start, ==, 0); 36 g_assert_cmphex(input_range_end, ==, UINT64_MAX); 37 g_assert_cmpint(domain_range_start, ==, 0); 38 g_assert_cmpint(domain_range_end, ==, UINT32_MAX); 39 } 40 41 static int read_tail_status(struct virtio_iommu_req_tail *buffer) 42 { 43 int i; 44 45 for (i = 0; i < 3; i++) { 46 g_assert_cmpint(buffer->reserved[i], ==, 0); 47 } 48 return buffer->status; 49 } 50 51 /** 52 * send_attach_detach - Send an attach/detach command to the device 53 * @type: VIRTIO_IOMMU_T_ATTACH/VIRTIO_IOMMU_T_DETACH 54 * @domain: domain the endpoint is attached to 55 * @ep: endpoint 56 */ 57 static int send_attach_detach(QTestState *qts, QVirtioIOMMU *v_iommu, 58 uint8_t type, uint32_t domain, uint32_t ep) 59 { 60 QVirtioDevice *dev = v_iommu->vdev; 61 QVirtQueue *vq = v_iommu->vq; 62 uint64_t ro_addr, wr_addr; 63 uint32_t free_head; 64 struct virtio_iommu_req_attach req = {}; /* same layout as detach */ 65 size_t ro_size = sizeof(req) - sizeof(struct virtio_iommu_req_tail); 66 size_t wr_size = sizeof(struct virtio_iommu_req_tail); 67 struct virtio_iommu_req_tail buffer; 68 int ret; 69 70 req.head.type = type; 71 req.domain = cpu_to_le32(domain); 72 req.endpoint = cpu_to_le32(ep); 73 74 ro_addr = guest_alloc(alloc, ro_size); 75 wr_addr = guest_alloc(alloc, wr_size); 76 77 qtest_memwrite(qts, ro_addr, &req, ro_size); 78 free_head = qvirtqueue_add(qts, vq, ro_addr, ro_size, false, true); 79 qvirtqueue_add(qts, vq, wr_addr, wr_size, true, false); 80 qvirtqueue_kick(qts, dev, vq, free_head); 81 qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL, 82 QVIRTIO_IOMMU_TIMEOUT_US); 83 qtest_memread(qts, wr_addr, &buffer, wr_size); 84 ret = read_tail_status(&buffer); 85 guest_free(alloc, ro_addr); 86 guest_free(alloc, wr_addr); 87 return ret; 88 } 89 90 /** 91 * send_map - Send a map command to the device 92 * @domain: domain the new mapping is attached to 93 * @virt_start: iova start 94 * @virt_end: iova end 95 * @phys_start: base physical address 96 * @flags: mapping flags 97 */ 98 static int send_map(QTestState *qts, QVirtioIOMMU *v_iommu, 99 uint32_t domain, uint64_t virt_start, uint64_t virt_end, 100 uint64_t phys_start, uint32_t flags) 101 { 102 QVirtioDevice *dev = v_iommu->vdev; 103 QVirtQueue *vq = v_iommu->vq; 104 uint64_t ro_addr, wr_addr; 105 uint32_t free_head; 106 struct virtio_iommu_req_map req; 107 size_t ro_size = sizeof(req) - sizeof(struct virtio_iommu_req_tail); 108 size_t wr_size = sizeof(struct virtio_iommu_req_tail); 109 struct virtio_iommu_req_tail buffer; 110 int ret; 111 112 req.head.type = VIRTIO_IOMMU_T_MAP; 113 req.domain = cpu_to_le32(domain); 114 req.virt_start = cpu_to_le64(virt_start); 115 req.virt_end = cpu_to_le64(virt_end); 116 req.phys_start = cpu_to_le64(phys_start); 117 req.flags = cpu_to_le32(flags); 118 119 ro_addr = guest_alloc(alloc, ro_size); 120 wr_addr = guest_alloc(alloc, wr_size); 121 122 qtest_memwrite(qts, ro_addr, &req, ro_size); 123 free_head = qvirtqueue_add(qts, vq, ro_addr, ro_size, false, true); 124 qvirtqueue_add(qts, vq, wr_addr, wr_size, true, false); 125 qvirtqueue_kick(qts, dev, vq, free_head); 126 qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL, 127 QVIRTIO_IOMMU_TIMEOUT_US); 128 qtest_memread(qts, wr_addr, &buffer, wr_size); 129 ret = read_tail_status(&buffer); 130 guest_free(alloc, ro_addr); 131 guest_free(alloc, wr_addr); 132 return ret; 133 } 134 135 /** 136 * send_unmap - Send an unmap command to the device 137 * @domain: domain the new binding is attached to 138 * @virt_start: iova start 139 * @virt_end: iova end 140 */ 141 static int send_unmap(QTestState *qts, QVirtioIOMMU *v_iommu, 142 uint32_t domain, uint64_t virt_start, uint64_t virt_end) 143 { 144 QVirtioDevice *dev = v_iommu->vdev; 145 QVirtQueue *vq = v_iommu->vq; 146 uint64_t ro_addr, wr_addr; 147 uint32_t free_head; 148 struct virtio_iommu_req_unmap req; 149 size_t ro_size = sizeof(req) - sizeof(struct virtio_iommu_req_tail); 150 size_t wr_size = sizeof(struct virtio_iommu_req_tail); 151 struct virtio_iommu_req_tail buffer; 152 int ret; 153 154 req.head.type = VIRTIO_IOMMU_T_UNMAP; 155 req.domain = cpu_to_le32(domain); 156 req.virt_start = cpu_to_le64(virt_start); 157 req.virt_end = cpu_to_le64(virt_end); 158 159 ro_addr = guest_alloc(alloc, ro_size); 160 wr_addr = guest_alloc(alloc, wr_size); 161 162 qtest_memwrite(qts, ro_addr, &req, ro_size); 163 free_head = qvirtqueue_add(qts, vq, ro_addr, ro_size, false, true); 164 qvirtqueue_add(qts, vq, wr_addr, wr_size, true, false); 165 qvirtqueue_kick(qts, dev, vq, free_head); 166 qvirtio_wait_used_elem(qts, dev, vq, free_head, NULL, 167 QVIRTIO_IOMMU_TIMEOUT_US); 168 qtest_memread(qts, wr_addr, &buffer, wr_size); 169 ret = read_tail_status(&buffer); 170 guest_free(alloc, ro_addr); 171 guest_free(alloc, wr_addr); 172 return ret; 173 } 174 175 static void test_attach_detach(void *obj, void *data, QGuestAllocator *t_alloc) 176 { 177 QVirtioIOMMU *v_iommu = obj; 178 QTestState *qts = global_qtest; 179 int ret; 180 181 alloc = t_alloc; 182 183 /* type, domain, ep */ 184 185 /* attach ep0 to domain 0 */ 186 ret = send_attach_detach(qts, v_iommu, VIRTIO_IOMMU_T_ATTACH, 0, 0); 187 g_assert_cmpint(ret, ==, 0); 188 189 /* attach a non existing device */ 190 ret = send_attach_detach(qts, v_iommu, VIRTIO_IOMMU_T_ATTACH, 0, 444); 191 g_assert_cmpint(ret, ==, VIRTIO_IOMMU_S_NOENT); 192 193 /* detach a non existing device (1) */ 194 ret = send_attach_detach(qts, v_iommu, VIRTIO_IOMMU_T_DETACH, 0, 1); 195 g_assert_cmpint(ret, ==, VIRTIO_IOMMU_S_NOENT); 196 197 /* move ep0 from domain 0 to domain 1 */ 198 ret = send_attach_detach(qts, v_iommu, VIRTIO_IOMMU_T_ATTACH, 1, 0); 199 g_assert_cmpint(ret, ==, 0); 200 201 /* detach ep0 from domain 0 */ 202 ret = send_attach_detach(qts, v_iommu, VIRTIO_IOMMU_T_DETACH, 0, 0); 203 g_assert_cmpint(ret, ==, VIRTIO_IOMMU_S_INVAL); 204 205 /* detach ep0 from domain 1 */ 206 ret = send_attach_detach(qts, v_iommu, VIRTIO_IOMMU_T_DETACH, 1, 0); 207 g_assert_cmpint(ret, ==, 0); 208 209 ret = send_attach_detach(qts, v_iommu, VIRTIO_IOMMU_T_ATTACH, 1, 0); 210 g_assert_cmpint(ret, ==, 0); 211 ret = send_map(qts, v_iommu, 1, 0x0, 0xFFF, 0xa1000, 212 VIRTIO_IOMMU_MAP_F_READ); 213 g_assert_cmpint(ret, ==, 0); 214 ret = send_map(qts, v_iommu, 1, 0x2000, 0x2FFF, 0xb1000, 215 VIRTIO_IOMMU_MAP_F_READ); 216 g_assert_cmpint(ret, ==, 0); 217 ret = send_attach_detach(qts, v_iommu, VIRTIO_IOMMU_T_DETACH, 1, 0); 218 g_assert_cmpint(ret, ==, 0); 219 } 220 221 /* Test map/unmap scenari documented in the spec */ 222 static void test_map_unmap(void *obj, void *data, QGuestAllocator *t_alloc) 223 { 224 QVirtioIOMMU *v_iommu = obj; 225 QTestState *qts = global_qtest; 226 int ret; 227 228 alloc = t_alloc; 229 230 /* attach ep0 to domain 1 */ 231 ret = send_attach_detach(qts, v_iommu, VIRTIO_IOMMU_T_ATTACH, 1, 0); 232 g_assert_cmpint(ret, ==, 0); 233 234 ret = send_map(qts, v_iommu, 0, 0, 0xFFF, 0xa1000, VIRTIO_IOMMU_MAP_F_READ); 235 g_assert_cmpint(ret, ==, VIRTIO_IOMMU_S_NOENT); 236 237 /* domain, virt start, virt end, phys start, flags */ 238 ret = send_map(qts, v_iommu, 1, 0x0, 0xFFF, 0xa1000, VIRTIO_IOMMU_MAP_F_READ); 239 g_assert_cmpint(ret, ==, 0); 240 241 /* send a new mapping overlapping the previous one */ 242 ret = send_map(qts, v_iommu, 1, 0, 0xFFFF, 0xb1000, VIRTIO_IOMMU_MAP_F_READ); 243 g_assert_cmpint(ret, ==, VIRTIO_IOMMU_S_INVAL); 244 245 ret = send_unmap(qts, v_iommu, 4, 0x10, 0xFFF); 246 g_assert_cmpint(ret, ==, VIRTIO_IOMMU_S_NOENT); 247 248 ret = send_unmap(qts, v_iommu, 1, 0x10, 0xFFF); 249 g_assert_cmpint(ret, ==, VIRTIO_IOMMU_S_RANGE); 250 251 ret = send_unmap(qts, v_iommu, 1, 0, 0x1000); 252 g_assert_cmpint(ret, ==, 0); /* unmap everything */ 253 254 /* Spec example sequence */ 255 256 /* 1 */ 257 ret = send_unmap(qts, v_iommu, 1, 0, 4); 258 g_assert_cmpint(ret, ==, 0); /* doesn't unmap anything */ 259 260 /* 2 */ 261 ret = send_map(qts, v_iommu, 1, 0, 9, 0xa1000, VIRTIO_IOMMU_MAP_F_READ); 262 g_assert_cmpint(ret, ==, 0); 263 ret = send_unmap(qts, v_iommu, 1, 0, 9); 264 g_assert_cmpint(ret, ==, 0); /* unmaps [0,9] */ 265 266 /* 3 */ 267 ret = send_map(qts, v_iommu, 1, 0, 4, 0xb1000, VIRTIO_IOMMU_MAP_F_READ); 268 g_assert_cmpint(ret, ==, 0); 269 ret = send_map(qts, v_iommu, 1, 5, 9, 0xb2000, VIRTIO_IOMMU_MAP_F_READ); 270 g_assert_cmpint(ret, ==, 0); 271 ret = send_unmap(qts, v_iommu, 1, 0, 9); 272 g_assert_cmpint(ret, ==, 0); /* unmaps [0,4] and [5,9] */ 273 274 /* 4 */ 275 ret = send_map(qts, v_iommu, 1, 0, 9, 0xc1000, VIRTIO_IOMMU_MAP_F_READ); 276 g_assert_cmpint(ret, ==, 0); 277 278 ret = send_unmap(qts, v_iommu, 1, 0, 4); 279 g_assert_cmpint(ret, ==, VIRTIO_IOMMU_S_RANGE); /* doesn't unmap anything */ 280 281 ret = send_unmap(qts, v_iommu, 1, 0, 10); 282 g_assert_cmpint(ret, ==, 0); 283 284 /* 5 */ 285 ret = send_map(qts, v_iommu, 1, 0, 4, 0xd1000, VIRTIO_IOMMU_MAP_F_READ); 286 g_assert_cmpint(ret, ==, 0); 287 ret = send_map(qts, v_iommu, 1, 5, 9, 0xd2000, VIRTIO_IOMMU_MAP_F_READ); 288 g_assert_cmpint(ret, ==, 0); 289 ret = send_unmap(qts, v_iommu, 1, 0, 4); 290 g_assert_cmpint(ret, ==, 0); /* unmaps [0,4] */ 291 292 ret = send_unmap(qts, v_iommu, 1, 5, 9); 293 g_assert_cmpint(ret, ==, 0); 294 295 /* 6 */ 296 ret = send_map(qts, v_iommu, 1, 0, 4, 0xe2000, VIRTIO_IOMMU_MAP_F_READ); 297 g_assert_cmpint(ret, ==, 0); 298 ret = send_unmap(qts, v_iommu, 1, 0, 9); 299 g_assert_cmpint(ret, ==, 0); /* unmaps [0,4] */ 300 301 /* 7 */ 302 ret = send_map(qts, v_iommu, 1, 0, 4, 0xf2000, VIRTIO_IOMMU_MAP_F_READ); 303 g_assert_cmpint(ret, ==, 0); 304 ret = send_map(qts, v_iommu, 1, 10, 14, 0xf3000, VIRTIO_IOMMU_MAP_F_READ); 305 g_assert_cmpint(ret, ==, 0); 306 ret = send_unmap(qts, v_iommu, 1, 0, 14); 307 g_assert_cmpint(ret, ==, 0); /* unmaps [0,4] and [10,14] */ 308 309 ret = send_map(qts, v_iommu, 1, 10, 14, 0xf3000, VIRTIO_IOMMU_MAP_F_READ); 310 g_assert_cmpint(ret, ==, 0); 311 ret = send_map(qts, v_iommu, 1, 0, 4, 0xf2000, VIRTIO_IOMMU_MAP_F_READ); 312 g_assert_cmpint(ret, ==, 0); 313 ret = send_unmap(qts, v_iommu, 1, 0, 4); 314 g_assert_cmpint(ret, ==, 0); /* only unmaps [0,4] */ 315 ret = send_map(qts, v_iommu, 1, 10, 14, 0xf3000, VIRTIO_IOMMU_MAP_F_READ); 316 g_assert_cmpint(ret, ==, VIRTIO_IOMMU_S_INVAL); /* 10-14 still is mapped */ 317 } 318 319 static void register_virtio_iommu_test(void) 320 { 321 qos_add_test("config", "virtio-iommu", pci_config, NULL); 322 qos_add_test("attach_detach", "virtio-iommu", test_attach_detach, NULL); 323 qos_add_test("map_unmap", "virtio-iommu", test_map_unmap, NULL); 324 } 325 326 libqos_init(register_virtio_iommu_test); 327