xref: /openbmc/qemu/hw/i386/amd_iommu.c (revision 795c40b8)
1 /*
2  * QEMU emulation of AMD IOMMU (AMD-Vi)
3  *
4  * Copyright (C) 2011 Eduard - Gabriel Munteanu
5  * Copyright (C) 2015 David Kiarie, <davidkiarie4@gmail.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11 
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16 
17  * You should have received a copy of the GNU General Public License along
18  * with this program; if not, see <http://www.gnu.org/licenses/>.
19  *
20  * Cache implementation inspired by hw/i386/intel_iommu.c
21  */
22 #include "qemu/osdep.h"
23 #include "hw/i386/amd_iommu.h"
24 #include "qemu/error-report.h"
25 #include "trace.h"
26 
27 /* used AMD-Vi MMIO registers */
28 const char *amdvi_mmio_low[] = {
29     "AMDVI_MMIO_DEVTAB_BASE",
30     "AMDVI_MMIO_CMDBUF_BASE",
31     "AMDVI_MMIO_EVTLOG_BASE",
32     "AMDVI_MMIO_CONTROL",
33     "AMDVI_MMIO_EXCL_BASE",
34     "AMDVI_MMIO_EXCL_LIMIT",
35     "AMDVI_MMIO_EXT_FEATURES",
36     "AMDVI_MMIO_PPR_BASE",
37     "UNHANDLED"
38 };
39 const char *amdvi_mmio_high[] = {
40     "AMDVI_MMIO_COMMAND_HEAD",
41     "AMDVI_MMIO_COMMAND_TAIL",
42     "AMDVI_MMIO_EVTLOG_HEAD",
43     "AMDVI_MMIO_EVTLOG_TAIL",
44     "AMDVI_MMIO_STATUS",
45     "AMDVI_MMIO_PPR_HEAD",
46     "AMDVI_MMIO_PPR_TAIL",
47     "UNHANDLED"
48 };
49 
50 struct AMDVIAddressSpace {
51     uint8_t bus_num;            /* bus number                           */
52     uint8_t devfn;              /* device function                      */
53     AMDVIState *iommu_state;    /* AMDVI - one per machine              */
54     MemoryRegion iommu;         /* Device's address translation region  */
55     MemoryRegion iommu_ir;      /* Device's interrupt remapping region  */
56     AddressSpace as;            /* device's corresponding address space */
57 };
58 
59 /* AMDVI cache entry */
60 typedef struct AMDVIIOTLBEntry {
61     uint16_t domid;             /* assigned domain id  */
62     uint16_t devid;             /* device owning entry */
63     uint64_t perms;             /* access permissions  */
64     uint64_t translated_addr;   /* translated address  */
65     uint64_t page_mask;         /* physical page size  */
66 } AMDVIIOTLBEntry;
67 
68 /* configure MMIO registers at startup/reset */
69 static void amdvi_set_quad(AMDVIState *s, hwaddr addr, uint64_t val,
70                            uint64_t romask, uint64_t w1cmask)
71 {
72     stq_le_p(&s->mmior[addr], val);
73     stq_le_p(&s->romask[addr], romask);
74     stq_le_p(&s->w1cmask[addr], w1cmask);
75 }
76 
77 static uint16_t amdvi_readw(AMDVIState *s, hwaddr addr)
78 {
79     return lduw_le_p(&s->mmior[addr]);
80 }
81 
82 static uint32_t amdvi_readl(AMDVIState *s, hwaddr addr)
83 {
84     return ldl_le_p(&s->mmior[addr]);
85 }
86 
87 static uint64_t amdvi_readq(AMDVIState *s, hwaddr addr)
88 {
89     return ldq_le_p(&s->mmior[addr]);
90 }
91 
92 /* internal write */
93 static void amdvi_writeq_raw(AMDVIState *s, uint64_t val, hwaddr addr)
94 {
95     stq_le_p(&s->mmior[addr], val);
96 }
97 
98 /* external write */
99 static void amdvi_writew(AMDVIState *s, hwaddr addr, uint16_t val)
100 {
101     uint16_t romask = lduw_le_p(&s->romask[addr]);
102     uint16_t w1cmask = lduw_le_p(&s->w1cmask[addr]);
103     uint16_t oldval = lduw_le_p(&s->mmior[addr]);
104     stw_le_p(&s->mmior[addr],
105             ((oldval & romask) | (val & ~romask)) & ~(val & w1cmask));
106 }
107 
108 static void amdvi_writel(AMDVIState *s, hwaddr addr, uint32_t val)
109 {
110     uint32_t romask = ldl_le_p(&s->romask[addr]);
111     uint32_t w1cmask = ldl_le_p(&s->w1cmask[addr]);
112     uint32_t oldval = ldl_le_p(&s->mmior[addr]);
113     stl_le_p(&s->mmior[addr],
114             ((oldval & romask) | (val & ~romask)) & ~(val & w1cmask));
115 }
116 
117 static void amdvi_writeq(AMDVIState *s, hwaddr addr, uint64_t val)
118 {
119     uint64_t romask = ldq_le_p(&s->romask[addr]);
120     uint64_t w1cmask = ldq_le_p(&s->w1cmask[addr]);
121     uint32_t oldval = ldq_le_p(&s->mmior[addr]);
122     stq_le_p(&s->mmior[addr],
123             ((oldval & romask) | (val & ~romask)) & ~(val & w1cmask));
124 }
125 
126 /* OR a 64-bit register with a 64-bit value */
127 static bool amdvi_test_mask(AMDVIState *s, hwaddr addr, uint64_t val)
128 {
129     return amdvi_readq(s, addr) | val;
130 }
131 
132 /* OR a 64-bit register with a 64-bit value storing result in the register */
133 static void amdvi_assign_orq(AMDVIState *s, hwaddr addr, uint64_t val)
134 {
135     amdvi_writeq_raw(s, addr, amdvi_readq(s, addr) | val);
136 }
137 
138 /* AND a 64-bit register with a 64-bit value storing result in the register */
139 static void amdvi_assign_andq(AMDVIState *s, hwaddr addr, uint64_t val)
140 {
141    amdvi_writeq_raw(s, addr, amdvi_readq(s, addr) & val);
142 }
143 
144 static void amdvi_generate_msi_interrupt(AMDVIState *s)
145 {
146     MSIMessage msg = {};
147     MemTxAttrs attrs = {
148         .requester_id = pci_requester_id(&s->pci.dev)
149     };
150 
151     if (msi_enabled(&s->pci.dev)) {
152         msg = msi_get_message(&s->pci.dev, 0);
153         address_space_stl_le(&address_space_memory, msg.address, msg.data,
154                              attrs, NULL);
155     }
156 }
157 
158 static void amdvi_log_event(AMDVIState *s, uint64_t *evt)
159 {
160     /* event logging not enabled */
161     if (!s->evtlog_enabled || amdvi_test_mask(s, AMDVI_MMIO_STATUS,
162         AMDVI_MMIO_STATUS_EVT_OVF)) {
163         return;
164     }
165 
166     /* event log buffer full */
167     if (s->evtlog_tail >= s->evtlog_len) {
168         amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_EVT_OVF);
169         /* generate interrupt */
170         amdvi_generate_msi_interrupt(s);
171         return;
172     }
173 
174     if (dma_memory_write(&address_space_memory, s->evtlog + s->evtlog_tail,
175         &evt, AMDVI_EVENT_LEN)) {
176         trace_amdvi_evntlog_fail(s->evtlog, s->evtlog_tail);
177     }
178 
179     s->evtlog_tail += AMDVI_EVENT_LEN;
180     amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_COMP_INT);
181     amdvi_generate_msi_interrupt(s);
182 }
183 
184 static void amdvi_setevent_bits(uint64_t *buffer, uint64_t value, int start,
185                                 int length)
186 {
187     int index = start / 64, bitpos = start % 64;
188     uint64_t mask = MAKE_64BIT_MASK(start, length);
189     buffer[index] &= ~mask;
190     buffer[index] |= (value << bitpos) & mask;
191 }
192 /*
193  * AMDVi event structure
194  *    0:15   -> DeviceID
195  *    55:63  -> event type + miscellaneous info
196  *    63:127 -> related address
197  */
198 static void amdvi_encode_event(uint64_t *evt, uint16_t devid, uint64_t addr,
199                                uint16_t info)
200 {
201     amdvi_setevent_bits(evt, devid, 0, 16);
202     amdvi_setevent_bits(evt, info, 55, 8);
203     amdvi_setevent_bits(evt, addr, 63, 64);
204 }
205 /* log an error encountered during a page walk
206  *
207  * @addr: virtual address in translation request
208  */
209 static void amdvi_page_fault(AMDVIState *s, uint16_t devid,
210                              hwaddr addr, uint16_t info)
211 {
212     uint64_t evt[4];
213 
214     info |= AMDVI_EVENT_IOPF_I | AMDVI_EVENT_IOPF;
215     amdvi_encode_event(evt, devid, addr, info);
216     amdvi_log_event(s, evt);
217     pci_word_test_and_set_mask(s->pci.dev.config + PCI_STATUS,
218             PCI_STATUS_SIG_TARGET_ABORT);
219 }
220 /*
221  * log a master abort accessing device table
222  *  @devtab : address of device table entry
223  *  @info : error flags
224  */
225 static void amdvi_log_devtab_error(AMDVIState *s, uint16_t devid,
226                                    hwaddr devtab, uint16_t info)
227 {
228     uint64_t evt[4];
229 
230     info |= AMDVI_EVENT_DEV_TAB_HW_ERROR;
231 
232     amdvi_encode_event(evt, devid, devtab, info);
233     amdvi_log_event(s, evt);
234     pci_word_test_and_set_mask(s->pci.dev.config + PCI_STATUS,
235             PCI_STATUS_SIG_TARGET_ABORT);
236 }
237 /* log an event trying to access command buffer
238  *   @addr : address that couldn't be accessed
239  */
240 static void amdvi_log_command_error(AMDVIState *s, hwaddr addr)
241 {
242     uint64_t evt[4], info = AMDVI_EVENT_COMMAND_HW_ERROR;
243 
244     amdvi_encode_event(evt, 0, addr, info);
245     amdvi_log_event(s, evt);
246     pci_word_test_and_set_mask(s->pci.dev.config + PCI_STATUS,
247             PCI_STATUS_SIG_TARGET_ABORT);
248 }
249 /* log an illegal comand event
250  *   @addr : address of illegal command
251  */
252 static void amdvi_log_illegalcom_error(AMDVIState *s, uint16_t info,
253                                        hwaddr addr)
254 {
255     uint64_t evt[4];
256 
257     info |= AMDVI_EVENT_ILLEGAL_COMMAND_ERROR;
258     amdvi_encode_event(evt, 0, addr, info);
259     amdvi_log_event(s, evt);
260 }
261 /* log an error accessing device table
262  *
263  *  @devid : device owning the table entry
264  *  @devtab : address of device table entry
265  *  @info : error flags
266  */
267 static void amdvi_log_illegaldevtab_error(AMDVIState *s, uint16_t devid,
268                                           hwaddr addr, uint16_t info)
269 {
270     uint64_t evt[4];
271 
272     info |= AMDVI_EVENT_ILLEGAL_DEVTAB_ENTRY;
273     amdvi_encode_event(evt, devid, addr, info);
274     amdvi_log_event(s, evt);
275 }
276 /* log an error accessing a PTE entry
277  * @addr : address that couldn't be accessed
278  */
279 static void amdvi_log_pagetab_error(AMDVIState *s, uint16_t devid,
280                                     hwaddr addr, uint16_t info)
281 {
282     uint64_t evt[4];
283 
284     info |= AMDVI_EVENT_PAGE_TAB_HW_ERROR;
285     amdvi_encode_event(evt, devid, addr, info);
286     amdvi_log_event(s, evt);
287     pci_word_test_and_set_mask(s->pci.dev.config + PCI_STATUS,
288              PCI_STATUS_SIG_TARGET_ABORT);
289 }
290 
291 static gboolean amdvi_uint64_equal(gconstpointer v1, gconstpointer v2)
292 {
293     return *((const uint64_t *)v1) == *((const uint64_t *)v2);
294 }
295 
296 static guint amdvi_uint64_hash(gconstpointer v)
297 {
298     return (guint)*(const uint64_t *)v;
299 }
300 
301 static AMDVIIOTLBEntry *amdvi_iotlb_lookup(AMDVIState *s, hwaddr addr,
302                                            uint64_t devid)
303 {
304     uint64_t key = (addr >> AMDVI_PAGE_SHIFT_4K) |
305                    ((uint64_t)(devid) << AMDVI_DEVID_SHIFT);
306     return g_hash_table_lookup(s->iotlb, &key);
307 }
308 
309 static void amdvi_iotlb_reset(AMDVIState *s)
310 {
311     assert(s->iotlb);
312     trace_amdvi_iotlb_reset();
313     g_hash_table_remove_all(s->iotlb);
314 }
315 
316 static gboolean amdvi_iotlb_remove_by_devid(gpointer key, gpointer value,
317                                             gpointer user_data)
318 {
319     AMDVIIOTLBEntry *entry = (AMDVIIOTLBEntry *)value;
320     uint16_t devid = *(uint16_t *)user_data;
321     return entry->devid == devid;
322 }
323 
324 static void amdvi_iotlb_remove_page(AMDVIState *s, hwaddr addr,
325                                     uint64_t devid)
326 {
327     uint64_t key = (addr >> AMDVI_PAGE_SHIFT_4K) |
328                    ((uint64_t)(devid) << AMDVI_DEVID_SHIFT);
329     g_hash_table_remove(s->iotlb, &key);
330 }
331 
332 static void amdvi_update_iotlb(AMDVIState *s, uint16_t devid,
333                                uint64_t gpa, IOMMUTLBEntry to_cache,
334                                uint16_t domid)
335 {
336     AMDVIIOTLBEntry *entry = g_new(AMDVIIOTLBEntry, 1);
337     uint64_t *key = g_new(uint64_t, 1);
338     uint64_t gfn = gpa >> AMDVI_PAGE_SHIFT_4K;
339 
340     /* don't cache erroneous translations */
341     if (to_cache.perm != IOMMU_NONE) {
342         trace_amdvi_cache_update(domid, PCI_BUS_NUM(devid), PCI_SLOT(devid),
343                 PCI_FUNC(devid), gpa, to_cache.translated_addr);
344 
345         if (g_hash_table_size(s->iotlb) >= AMDVI_IOTLB_MAX_SIZE) {
346             amdvi_iotlb_reset(s);
347         }
348 
349         entry->domid = domid;
350         entry->perms = to_cache.perm;
351         entry->translated_addr = to_cache.translated_addr;
352         entry->page_mask = to_cache.addr_mask;
353         *key = gfn | ((uint64_t)(devid) << AMDVI_DEVID_SHIFT);
354         g_hash_table_replace(s->iotlb, key, entry);
355     }
356 }
357 
358 static void amdvi_completion_wait(AMDVIState *s, uint64_t *cmd)
359 {
360     /* pad the last 3 bits */
361     hwaddr addr = cpu_to_le64(extract64(cmd[0], 3, 49)) << 3;
362     uint64_t data = cpu_to_le64(cmd[1]);
363 
364     if (extract64(cmd[0], 51, 8)) {
365         amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
366                                    s->cmdbuf + s->cmdbuf_head);
367     }
368     if (extract64(cmd[0], 0, 1)) {
369         if (dma_memory_write(&address_space_memory, addr, &data,
370             AMDVI_COMPLETION_DATA_SIZE)) {
371             trace_amdvi_completion_wait_fail(addr);
372         }
373     }
374     /* set completion interrupt */
375     if (extract64(cmd[0], 1, 1)) {
376         amdvi_test_mask(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_COMP_INT);
377         /* generate interrupt */
378         amdvi_generate_msi_interrupt(s);
379     }
380     trace_amdvi_completion_wait(addr, data);
381 }
382 
383 /* log error without aborting since linux seems to be using reserved bits */
384 static void amdvi_inval_devtab_entry(AMDVIState *s, uint64_t *cmd)
385 {
386     uint16_t devid = cpu_to_le16((uint16_t)extract64(cmd[0], 0, 16));
387 
388     /* This command should invalidate internal caches of which there isn't */
389     if (extract64(cmd[0], 15, 16) || cmd[1]) {
390         amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
391                                    s->cmdbuf + s->cmdbuf_head);
392     }
393     trace_amdvi_devtab_inval(PCI_BUS_NUM(devid), PCI_SLOT(devid),
394                              PCI_FUNC(devid));
395 }
396 
397 static void amdvi_complete_ppr(AMDVIState *s, uint64_t *cmd)
398 {
399     if (extract64(cmd[0], 15, 16) ||  extract64(cmd[0], 19, 8) ||
400         extract64(cmd[1], 0, 2) || extract64(cmd[1], 3, 29)
401         || extract64(cmd[1], 47, 16)) {
402         amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
403                                    s->cmdbuf + s->cmdbuf_head);
404     }
405     trace_amdvi_ppr_exec();
406 }
407 
408 static void amdvi_inval_all(AMDVIState *s, uint64_t *cmd)
409 {
410     if (extract64(cmd[0], 0, 60) || cmd[1]) {
411         amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
412                                    s->cmdbuf + s->cmdbuf_head);
413     }
414 
415     amdvi_iotlb_reset(s);
416     trace_amdvi_all_inval();
417 }
418 
419 static gboolean amdvi_iotlb_remove_by_domid(gpointer key, gpointer value,
420                                             gpointer user_data)
421 {
422     AMDVIIOTLBEntry *entry = (AMDVIIOTLBEntry *)value;
423     uint16_t domid = *(uint16_t *)user_data;
424     return entry->domid == domid;
425 }
426 
427 /* we don't have devid - we can't remove pages by address */
428 static void amdvi_inval_pages(AMDVIState *s, uint64_t *cmd)
429 {
430     uint16_t domid = cpu_to_le16((uint16_t)extract64(cmd[0], 32, 16));
431 
432     if (extract64(cmd[0], 20, 12) || extract64(cmd[0], 16, 12) ||
433         extract64(cmd[0], 3, 10)) {
434         amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
435                                    s->cmdbuf + s->cmdbuf_head);
436     }
437 
438     g_hash_table_foreach_remove(s->iotlb, amdvi_iotlb_remove_by_domid,
439                                 &domid);
440     trace_amdvi_pages_inval(domid);
441 }
442 
443 static void amdvi_prefetch_pages(AMDVIState *s, uint64_t *cmd)
444 {
445     if (extract64(cmd[0], 16, 8) || extract64(cmd[0], 20, 8) ||
446         extract64(cmd[1], 1, 1) || extract64(cmd[1], 3, 1) ||
447         extract64(cmd[1], 5, 7)) {
448         amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
449                                    s->cmdbuf + s->cmdbuf_head);
450     }
451 
452     trace_amdvi_prefetch_pages();
453 }
454 
455 static void amdvi_inval_inttable(AMDVIState *s, uint64_t *cmd)
456 {
457     if (extract64(cmd[0], 16, 16) || cmd[1]) {
458         amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
459                                    s->cmdbuf + s->cmdbuf_head);
460         return;
461     }
462 
463     trace_amdvi_intr_inval();
464 }
465 
466 /* FIXME: Try to work with the specified size instead of all the pages
467  * when the S bit is on
468  */
469 static void iommu_inval_iotlb(AMDVIState *s, uint64_t *cmd)
470 {
471 
472     uint16_t devid = extract64(cmd[0], 0, 16);
473     if (extract64(cmd[1], 1, 1) || extract64(cmd[1], 3, 9)) {
474         amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
475                                    s->cmdbuf + s->cmdbuf_head);
476         return;
477     }
478 
479     if (extract64(cmd[1], 0, 1)) {
480         g_hash_table_foreach_remove(s->iotlb, amdvi_iotlb_remove_by_devid,
481                                     &devid);
482     } else {
483         amdvi_iotlb_remove_page(s, cpu_to_le64(extract64(cmd[1], 12, 52)) << 12,
484                                 cpu_to_le16(extract64(cmd[1], 0, 16)));
485     }
486     trace_amdvi_iotlb_inval();
487 }
488 
489 /* not honouring reserved bits is regarded as an illegal command */
490 static void amdvi_cmdbuf_exec(AMDVIState *s)
491 {
492     uint64_t cmd[2];
493 
494     if (dma_memory_read(&address_space_memory, s->cmdbuf + s->cmdbuf_head,
495         cmd, AMDVI_COMMAND_SIZE)) {
496         trace_amdvi_command_read_fail(s->cmdbuf, s->cmdbuf_head);
497         amdvi_log_command_error(s, s->cmdbuf + s->cmdbuf_head);
498         return;
499     }
500 
501     switch (extract64(cmd[0], 60, 4)) {
502     case AMDVI_CMD_COMPLETION_WAIT:
503         amdvi_completion_wait(s, cmd);
504         break;
505     case AMDVI_CMD_INVAL_DEVTAB_ENTRY:
506         amdvi_inval_devtab_entry(s, cmd);
507         break;
508     case AMDVI_CMD_INVAL_AMDVI_PAGES:
509         amdvi_inval_pages(s, cmd);
510         break;
511     case AMDVI_CMD_INVAL_IOTLB_PAGES:
512         iommu_inval_iotlb(s, cmd);
513         break;
514     case AMDVI_CMD_INVAL_INTR_TABLE:
515         amdvi_inval_inttable(s, cmd);
516         break;
517     case AMDVI_CMD_PREFETCH_AMDVI_PAGES:
518         amdvi_prefetch_pages(s, cmd);
519         break;
520     case AMDVI_CMD_COMPLETE_PPR_REQUEST:
521         amdvi_complete_ppr(s, cmd);
522         break;
523     case AMDVI_CMD_INVAL_AMDVI_ALL:
524         amdvi_inval_all(s, cmd);
525         break;
526     default:
527         trace_amdvi_unhandled_command(extract64(cmd[1], 60, 4));
528         /* log illegal command */
529         amdvi_log_illegalcom_error(s, extract64(cmd[1], 60, 4),
530                                    s->cmdbuf + s->cmdbuf_head);
531     }
532 }
533 
534 static void amdvi_cmdbuf_run(AMDVIState *s)
535 {
536     if (!s->cmdbuf_enabled) {
537         trace_amdvi_command_error(amdvi_readq(s, AMDVI_MMIO_CONTROL));
538         return;
539     }
540 
541     /* check if there is work to do. */
542     while (s->cmdbuf_head != s->cmdbuf_tail) {
543         trace_amdvi_command_exec(s->cmdbuf_head, s->cmdbuf_tail, s->cmdbuf);
544         amdvi_cmdbuf_exec(s);
545         s->cmdbuf_head += AMDVI_COMMAND_SIZE;
546         amdvi_writeq_raw(s, s->cmdbuf_head, AMDVI_MMIO_COMMAND_HEAD);
547 
548         /* wrap head pointer */
549         if (s->cmdbuf_head >= s->cmdbuf_len * AMDVI_COMMAND_SIZE) {
550             s->cmdbuf_head = 0;
551         }
552     }
553 }
554 
555 static void amdvi_mmio_trace(hwaddr addr, unsigned size)
556 {
557     uint8_t index = (addr & ~0x2000) / 8;
558 
559     if ((addr & 0x2000)) {
560         /* high table */
561         index = index >= AMDVI_MMIO_REGS_HIGH ? AMDVI_MMIO_REGS_HIGH : index;
562         trace_amdvi_mmio_read(amdvi_mmio_high[index], addr, size, addr & ~0x07);
563     } else {
564         index = index >= AMDVI_MMIO_REGS_LOW ? AMDVI_MMIO_REGS_LOW : index;
565         trace_amdvi_mmio_read(amdvi_mmio_low[index], addr, size, addr & ~0x07);
566     }
567 }
568 
569 static uint64_t amdvi_mmio_read(void *opaque, hwaddr addr, unsigned size)
570 {
571     AMDVIState *s = opaque;
572 
573     uint64_t val = -1;
574     if (addr + size > AMDVI_MMIO_SIZE) {
575         trace_amdvi_mmio_read_invalid(AMDVI_MMIO_SIZE, addr, size);
576         return (uint64_t)-1;
577     }
578 
579     if (size == 2) {
580         val = amdvi_readw(s, addr);
581     } else if (size == 4) {
582         val = amdvi_readl(s, addr);
583     } else if (size == 8) {
584         val = amdvi_readq(s, addr);
585     }
586     amdvi_mmio_trace(addr, size);
587 
588     return val;
589 }
590 
591 static void amdvi_handle_control_write(AMDVIState *s)
592 {
593     unsigned long control = amdvi_readq(s, AMDVI_MMIO_CONTROL);
594     s->enabled = !!(control & AMDVI_MMIO_CONTROL_AMDVIEN);
595 
596     s->ats_enabled = !!(control & AMDVI_MMIO_CONTROL_HTTUNEN);
597     s->evtlog_enabled = s->enabled && !!(control &
598                         AMDVI_MMIO_CONTROL_EVENTLOGEN);
599 
600     s->evtlog_intr = !!(control & AMDVI_MMIO_CONTROL_EVENTINTEN);
601     s->completion_wait_intr = !!(control & AMDVI_MMIO_CONTROL_COMWAITINTEN);
602     s->cmdbuf_enabled = s->enabled && !!(control &
603                         AMDVI_MMIO_CONTROL_CMDBUFLEN);
604 
605     /* update the flags depending on the control register */
606     if (s->cmdbuf_enabled) {
607         amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_CMDBUF_RUN);
608     } else {
609         amdvi_assign_andq(s, AMDVI_MMIO_STATUS, ~AMDVI_MMIO_STATUS_CMDBUF_RUN);
610     }
611     if (s->evtlog_enabled) {
612         amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_EVT_RUN);
613     } else {
614         amdvi_assign_andq(s, AMDVI_MMIO_STATUS, ~AMDVI_MMIO_STATUS_EVT_RUN);
615     }
616 
617     trace_amdvi_control_status(control);
618     amdvi_cmdbuf_run(s);
619 }
620 
621 static inline void amdvi_handle_devtab_write(AMDVIState *s)
622 
623 {
624     uint64_t val = amdvi_readq(s, AMDVI_MMIO_DEVICE_TABLE);
625     s->devtab = (val & AMDVI_MMIO_DEVTAB_BASE_MASK);
626 
627     /* set device table length */
628     s->devtab_len = ((val & AMDVI_MMIO_DEVTAB_SIZE_MASK) + 1 *
629                     (AMDVI_MMIO_DEVTAB_SIZE_UNIT /
630                      AMDVI_MMIO_DEVTAB_ENTRY_SIZE));
631 }
632 
633 static inline void amdvi_handle_cmdhead_write(AMDVIState *s)
634 {
635     s->cmdbuf_head = amdvi_readq(s, AMDVI_MMIO_COMMAND_HEAD)
636                      & AMDVI_MMIO_CMDBUF_HEAD_MASK;
637     amdvi_cmdbuf_run(s);
638 }
639 
640 static inline void amdvi_handle_cmdbase_write(AMDVIState *s)
641 {
642     s->cmdbuf = amdvi_readq(s, AMDVI_MMIO_COMMAND_BASE)
643                 & AMDVI_MMIO_CMDBUF_BASE_MASK;
644     s->cmdbuf_len = 1UL << (amdvi_readq(s, AMDVI_MMIO_CMDBUF_SIZE_BYTE)
645                     & AMDVI_MMIO_CMDBUF_SIZE_MASK);
646     s->cmdbuf_head = s->cmdbuf_tail = 0;
647 }
648 
649 static inline void amdvi_handle_cmdtail_write(AMDVIState *s)
650 {
651     s->cmdbuf_tail = amdvi_readq(s, AMDVI_MMIO_COMMAND_TAIL)
652                      & AMDVI_MMIO_CMDBUF_TAIL_MASK;
653     amdvi_cmdbuf_run(s);
654 }
655 
656 static inline void amdvi_handle_excllim_write(AMDVIState *s)
657 {
658     uint64_t val = amdvi_readq(s, AMDVI_MMIO_EXCL_LIMIT);
659     s->excl_limit = (val & AMDVI_MMIO_EXCL_LIMIT_MASK) |
660                     AMDVI_MMIO_EXCL_LIMIT_LOW;
661 }
662 
663 static inline void amdvi_handle_evtbase_write(AMDVIState *s)
664 {
665     uint64_t val = amdvi_readq(s, AMDVI_MMIO_EVENT_BASE);
666     s->evtlog = val & AMDVI_MMIO_EVTLOG_BASE_MASK;
667     s->evtlog_len = 1UL << (amdvi_readq(s, AMDVI_MMIO_EVTLOG_SIZE_BYTE)
668                     & AMDVI_MMIO_EVTLOG_SIZE_MASK);
669 }
670 
671 static inline void amdvi_handle_evttail_write(AMDVIState *s)
672 {
673     uint64_t val = amdvi_readq(s, AMDVI_MMIO_EVENT_TAIL);
674     s->evtlog_tail = val & AMDVI_MMIO_EVTLOG_TAIL_MASK;
675 }
676 
677 static inline void amdvi_handle_evthead_write(AMDVIState *s)
678 {
679     uint64_t val = amdvi_readq(s, AMDVI_MMIO_EVENT_HEAD);
680     s->evtlog_head = val & AMDVI_MMIO_EVTLOG_HEAD_MASK;
681 }
682 
683 static inline void amdvi_handle_pprbase_write(AMDVIState *s)
684 {
685     uint64_t val = amdvi_readq(s, AMDVI_MMIO_PPR_BASE);
686     s->ppr_log = val & AMDVI_MMIO_PPRLOG_BASE_MASK;
687     s->pprlog_len = 1UL << (amdvi_readq(s, AMDVI_MMIO_PPRLOG_SIZE_BYTE)
688                     & AMDVI_MMIO_PPRLOG_SIZE_MASK);
689 }
690 
691 static inline void amdvi_handle_pprhead_write(AMDVIState *s)
692 {
693     uint64_t val = amdvi_readq(s, AMDVI_MMIO_PPR_HEAD);
694     s->pprlog_head = val & AMDVI_MMIO_PPRLOG_HEAD_MASK;
695 }
696 
697 static inline void amdvi_handle_pprtail_write(AMDVIState *s)
698 {
699     uint64_t val = amdvi_readq(s, AMDVI_MMIO_PPR_TAIL);
700     s->pprlog_tail = val & AMDVI_MMIO_PPRLOG_TAIL_MASK;
701 }
702 
703 /* FIXME: something might go wrong if System Software writes in chunks
704  * of one byte but linux writes in chunks of 4 bytes so currently it
705  * works correctly with linux but will definitely be busted if software
706  * reads/writes 8 bytes
707  */
708 static void amdvi_mmio_reg_write(AMDVIState *s, unsigned size, uint64_t val,
709                                  hwaddr addr)
710 {
711     if (size == 2) {
712         amdvi_writew(s, addr, val);
713     } else if (size == 4) {
714         amdvi_writel(s, addr, val);
715     } else if (size == 8) {
716         amdvi_writeq(s, addr, val);
717     }
718 }
719 
720 static void amdvi_mmio_write(void *opaque, hwaddr addr, uint64_t val,
721                              unsigned size)
722 {
723     AMDVIState *s = opaque;
724     unsigned long offset = addr & 0x07;
725 
726     if (addr + size > AMDVI_MMIO_SIZE) {
727         trace_amdvi_mmio_write("error: addr outside region: max ",
728                 (uint64_t)AMDVI_MMIO_SIZE, size, val, offset);
729         return;
730     }
731 
732     amdvi_mmio_trace(addr, size);
733     switch (addr & ~0x07) {
734     case AMDVI_MMIO_CONTROL:
735         amdvi_mmio_reg_write(s, size, val, addr);
736         amdvi_handle_control_write(s);
737         break;
738     case AMDVI_MMIO_DEVICE_TABLE:
739         amdvi_mmio_reg_write(s, size, val, addr);
740        /*  set device table address
741         *   This also suffers from inability to tell whether software
742         *   is done writing
743         */
744         if (offset || (size == 8)) {
745             amdvi_handle_devtab_write(s);
746         }
747         break;
748     case AMDVI_MMIO_COMMAND_HEAD:
749         amdvi_mmio_reg_write(s, size, val, addr);
750         amdvi_handle_cmdhead_write(s);
751         break;
752     case AMDVI_MMIO_COMMAND_BASE:
753         amdvi_mmio_reg_write(s, size, val, addr);
754         /* FIXME - make sure System Software has finished writing incase
755          * it writes in chucks less than 8 bytes in a robust way.As for
756          * now, this hacks works for the linux driver
757          */
758         if (offset || (size == 8)) {
759             amdvi_handle_cmdbase_write(s);
760         }
761         break;
762     case AMDVI_MMIO_COMMAND_TAIL:
763         amdvi_mmio_reg_write(s, size, val, addr);
764         amdvi_handle_cmdtail_write(s);
765         break;
766     case AMDVI_MMIO_EVENT_BASE:
767         amdvi_mmio_reg_write(s, size, val, addr);
768         amdvi_handle_evtbase_write(s);
769         break;
770     case AMDVI_MMIO_EVENT_HEAD:
771         amdvi_mmio_reg_write(s, size, val, addr);
772         amdvi_handle_evthead_write(s);
773         break;
774     case AMDVI_MMIO_EVENT_TAIL:
775         amdvi_mmio_reg_write(s, size, val, addr);
776         amdvi_handle_evttail_write(s);
777         break;
778     case AMDVI_MMIO_EXCL_LIMIT:
779         amdvi_mmio_reg_write(s, size, val, addr);
780         amdvi_handle_excllim_write(s);
781         break;
782         /* PPR log base - unused for now */
783     case AMDVI_MMIO_PPR_BASE:
784         amdvi_mmio_reg_write(s, size, val, addr);
785         amdvi_handle_pprbase_write(s);
786         break;
787         /* PPR log head - also unused for now */
788     case AMDVI_MMIO_PPR_HEAD:
789         amdvi_mmio_reg_write(s, size, val, addr);
790         amdvi_handle_pprhead_write(s);
791         break;
792         /* PPR log tail - unused for now */
793     case AMDVI_MMIO_PPR_TAIL:
794         amdvi_mmio_reg_write(s, size, val, addr);
795         amdvi_handle_pprtail_write(s);
796         break;
797     }
798 }
799 
800 static inline uint64_t amdvi_get_perms(uint64_t entry)
801 {
802     return (entry & (AMDVI_DEV_PERM_READ | AMDVI_DEV_PERM_WRITE)) >>
803            AMDVI_DEV_PERM_SHIFT;
804 }
805 
806 /* a valid entry should have V = 1 and reserved bits honoured */
807 static bool amdvi_validate_dte(AMDVIState *s, uint16_t devid,
808                                uint64_t *dte)
809 {
810     if ((dte[0] & AMDVI_DTE_LOWER_QUAD_RESERVED)
811         || (dte[1] & AMDVI_DTE_MIDDLE_QUAD_RESERVED)
812         || (dte[2] & AMDVI_DTE_UPPER_QUAD_RESERVED) || dte[3]) {
813         amdvi_log_illegaldevtab_error(s, devid,
814                                       s->devtab +
815                                       devid * AMDVI_DEVTAB_ENTRY_SIZE, 0);
816         return false;
817     }
818 
819     return dte[0] & AMDVI_DEV_VALID;
820 }
821 
822 /* get a device table entry given the devid */
823 static bool amdvi_get_dte(AMDVIState *s, int devid, uint64_t *entry)
824 {
825     uint32_t offset = devid * AMDVI_DEVTAB_ENTRY_SIZE;
826 
827     if (dma_memory_read(&address_space_memory, s->devtab + offset, entry,
828         AMDVI_DEVTAB_ENTRY_SIZE)) {
829         trace_amdvi_dte_get_fail(s->devtab, offset);
830         /* log error accessing dte */
831         amdvi_log_devtab_error(s, devid, s->devtab + offset, 0);
832         return false;
833     }
834 
835     *entry = le64_to_cpu(*entry);
836     if (!amdvi_validate_dte(s, devid, entry)) {
837         trace_amdvi_invalid_dte(entry[0]);
838         return false;
839     }
840 
841     return true;
842 }
843 
844 /* get pte translation mode */
845 static inline uint8_t get_pte_translation_mode(uint64_t pte)
846 {
847     return (pte >> AMDVI_DEV_MODE_RSHIFT) & AMDVI_DEV_MODE_MASK;
848 }
849 
850 static inline uint64_t pte_override_page_mask(uint64_t pte)
851 {
852     uint8_t page_mask = 12;
853     uint64_t addr = (pte & AMDVI_DEV_PT_ROOT_MASK) ^ AMDVI_DEV_PT_ROOT_MASK;
854     /* find the first zero bit */
855     while (addr & 1) {
856         page_mask++;
857         addr = addr >> 1;
858     }
859 
860     return ~((1ULL << page_mask) - 1);
861 }
862 
863 static inline uint64_t pte_get_page_mask(uint64_t oldlevel)
864 {
865     return ~((1UL << ((oldlevel * 9) + 3)) - 1);
866 }
867 
868 static inline uint64_t amdvi_get_pte_entry(AMDVIState *s, uint64_t pte_addr,
869                                           uint16_t devid)
870 {
871     uint64_t pte;
872 
873     if (dma_memory_read(&address_space_memory, pte_addr, &pte, sizeof(pte))) {
874         trace_amdvi_get_pte_hwerror(pte_addr);
875         amdvi_log_pagetab_error(s, devid, pte_addr, 0);
876         pte = 0;
877         return pte;
878     }
879 
880     pte = le64_to_cpu(pte);
881     return pte;
882 }
883 
884 static void amdvi_page_walk(AMDVIAddressSpace *as, uint64_t *dte,
885                             IOMMUTLBEntry *ret, unsigned perms,
886                             hwaddr addr)
887 {
888     unsigned level, present, pte_perms, oldlevel;
889     uint64_t pte = dte[0], pte_addr, page_mask;
890 
891     /* make sure the DTE has TV = 1 */
892     if (pte & AMDVI_DEV_TRANSLATION_VALID) {
893         level = get_pte_translation_mode(pte);
894         if (level >= 7) {
895             trace_amdvi_mode_invalid(level, addr);
896             return;
897         }
898         if (level == 0) {
899             goto no_remap;
900         }
901 
902         /* we are at the leaf page table or page table encodes a huge page */
903         while (level > 0) {
904             pte_perms = amdvi_get_perms(pte);
905             present = pte & 1;
906             if (!present || perms != (perms & pte_perms)) {
907                 amdvi_page_fault(as->iommu_state, as->devfn, addr, perms);
908                 trace_amdvi_page_fault(addr);
909                 return;
910             }
911 
912             /* go to the next lower level */
913             pte_addr = pte & AMDVI_DEV_PT_ROOT_MASK;
914             /* add offset and load pte */
915             pte_addr += ((addr >> (3 + 9 * level)) & 0x1FF) << 3;
916             pte = amdvi_get_pte_entry(as->iommu_state, pte_addr, as->devfn);
917             if (!pte) {
918                 return;
919             }
920             oldlevel = level;
921             level = get_pte_translation_mode(pte);
922             if (level == 0x7) {
923                 break;
924             }
925         }
926 
927         if (level == 0x7) {
928             page_mask = pte_override_page_mask(pte);
929         } else {
930             page_mask = pte_get_page_mask(oldlevel);
931         }
932 
933         /* get access permissions from pte */
934         ret->iova = addr & page_mask;
935         ret->translated_addr = (pte & AMDVI_DEV_PT_ROOT_MASK) & page_mask;
936         ret->addr_mask = ~page_mask;
937         ret->perm = amdvi_get_perms(pte);
938         return;
939     }
940 no_remap:
941     ret->iova = addr & AMDVI_PAGE_MASK_4K;
942     ret->translated_addr = addr & AMDVI_PAGE_MASK_4K;
943     ret->addr_mask = ~AMDVI_PAGE_MASK_4K;
944     ret->perm = amdvi_get_perms(pte);
945 }
946 
947 static void amdvi_do_translate(AMDVIAddressSpace *as, hwaddr addr,
948                                bool is_write, IOMMUTLBEntry *ret)
949 {
950     AMDVIState *s = as->iommu_state;
951     uint16_t devid = PCI_BUILD_BDF(as->bus_num, as->devfn);
952     AMDVIIOTLBEntry *iotlb_entry = amdvi_iotlb_lookup(s, addr, devid);
953     uint64_t entry[4];
954 
955     if (iotlb_entry) {
956         trace_amdvi_iotlb_hit(PCI_BUS_NUM(devid), PCI_SLOT(devid),
957                 PCI_FUNC(devid), addr, iotlb_entry->translated_addr);
958         ret->iova = addr & ~iotlb_entry->page_mask;
959         ret->translated_addr = iotlb_entry->translated_addr;
960         ret->addr_mask = iotlb_entry->page_mask;
961         ret->perm = iotlb_entry->perms;
962         return;
963     }
964 
965     /* devices with V = 0 are not translated */
966     if (!amdvi_get_dte(s, devid, entry)) {
967         goto out;
968     }
969 
970     amdvi_page_walk(as, entry, ret,
971                     is_write ? AMDVI_PERM_WRITE : AMDVI_PERM_READ, addr);
972 
973     amdvi_update_iotlb(s, devid, addr, *ret,
974                        entry[1] & AMDVI_DEV_DOMID_ID_MASK);
975     return;
976 
977 out:
978     ret->iova = addr & AMDVI_PAGE_MASK_4K;
979     ret->translated_addr = addr & AMDVI_PAGE_MASK_4K;
980     ret->addr_mask = ~AMDVI_PAGE_MASK_4K;
981     ret->perm = IOMMU_RW;
982 }
983 
984 static inline bool amdvi_is_interrupt_addr(hwaddr addr)
985 {
986     return addr >= AMDVI_INT_ADDR_FIRST && addr <= AMDVI_INT_ADDR_LAST;
987 }
988 
989 static IOMMUTLBEntry amdvi_translate(MemoryRegion *iommu, hwaddr addr,
990                                      bool is_write)
991 {
992     AMDVIAddressSpace *as = container_of(iommu, AMDVIAddressSpace, iommu);
993     AMDVIState *s = as->iommu_state;
994     IOMMUTLBEntry ret = {
995         .target_as = &address_space_memory,
996         .iova = addr,
997         .translated_addr = 0,
998         .addr_mask = ~(hwaddr)0,
999         .perm = IOMMU_NONE
1000     };
1001 
1002     if (!s->enabled) {
1003         /* AMDVI disabled - corresponds to iommu=off not
1004          * failure to provide any parameter
1005          */
1006         ret.iova = addr & AMDVI_PAGE_MASK_4K;
1007         ret.translated_addr = addr & AMDVI_PAGE_MASK_4K;
1008         ret.addr_mask = ~AMDVI_PAGE_MASK_4K;
1009         ret.perm = IOMMU_RW;
1010         return ret;
1011     } else if (amdvi_is_interrupt_addr(addr)) {
1012         ret.iova = addr & AMDVI_PAGE_MASK_4K;
1013         ret.translated_addr = addr & AMDVI_PAGE_MASK_4K;
1014         ret.addr_mask = ~AMDVI_PAGE_MASK_4K;
1015         ret.perm = IOMMU_WO;
1016         return ret;
1017     }
1018 
1019     amdvi_do_translate(as, addr, is_write, &ret);
1020     trace_amdvi_translation_result(as->bus_num, PCI_SLOT(as->devfn),
1021             PCI_FUNC(as->devfn), addr, ret.translated_addr);
1022     return ret;
1023 }
1024 
1025 static AddressSpace *amdvi_host_dma_iommu(PCIBus *bus, void *opaque, int devfn)
1026 {
1027     AMDVIState *s = opaque;
1028     AMDVIAddressSpace **iommu_as;
1029     int bus_num = pci_bus_num(bus);
1030 
1031     iommu_as = s->address_spaces[bus_num];
1032 
1033     /* allocate memory during the first run */
1034     if (!iommu_as) {
1035         iommu_as = g_malloc0(sizeof(AMDVIAddressSpace *) * PCI_DEVFN_MAX);
1036         s->address_spaces[bus_num] = iommu_as;
1037     }
1038 
1039     /* set up AMD-Vi region */
1040     if (!iommu_as[devfn]) {
1041         iommu_as[devfn] = g_malloc0(sizeof(AMDVIAddressSpace));
1042         iommu_as[devfn]->bus_num = (uint8_t)bus_num;
1043         iommu_as[devfn]->devfn = (uint8_t)devfn;
1044         iommu_as[devfn]->iommu_state = s;
1045 
1046         memory_region_init_iommu(&iommu_as[devfn]->iommu, OBJECT(s),
1047                                  &s->iommu_ops, "amd-iommu", UINT64_MAX);
1048         address_space_init(&iommu_as[devfn]->as, &iommu_as[devfn]->iommu,
1049                            "amd-iommu");
1050     }
1051     return &iommu_as[devfn]->as;
1052 }
1053 
1054 static const MemoryRegionOps mmio_mem_ops = {
1055     .read = amdvi_mmio_read,
1056     .write = amdvi_mmio_write,
1057     .endianness = DEVICE_LITTLE_ENDIAN,
1058     .impl = {
1059         .min_access_size = 1,
1060         .max_access_size = 8,
1061         .unaligned = false,
1062     },
1063     .valid = {
1064         .min_access_size = 1,
1065         .max_access_size = 8,
1066     }
1067 };
1068 
1069 static void amdvi_iommu_notify_flag_changed(MemoryRegion *iommu,
1070                                             IOMMUNotifierFlag old,
1071                                             IOMMUNotifierFlag new)
1072 {
1073     AMDVIAddressSpace *as = container_of(iommu, AMDVIAddressSpace, iommu);
1074 
1075     if (new & IOMMU_NOTIFIER_MAP) {
1076         error_report("device %02x.%02x.%x requires iommu notifier which is not "
1077                      "currently supported", as->bus_num, PCI_SLOT(as->devfn),
1078                      PCI_FUNC(as->devfn));
1079         exit(1);
1080     }
1081 }
1082 
1083 static void amdvi_init(AMDVIState *s)
1084 {
1085     amdvi_iotlb_reset(s);
1086 
1087     s->iommu_ops.translate = amdvi_translate;
1088     s->iommu_ops.notify_flag_changed = amdvi_iommu_notify_flag_changed;
1089     s->devtab_len = 0;
1090     s->cmdbuf_len = 0;
1091     s->cmdbuf_head = 0;
1092     s->cmdbuf_tail = 0;
1093     s->evtlog_head = 0;
1094     s->evtlog_tail = 0;
1095     s->excl_enabled = false;
1096     s->excl_allow = false;
1097     s->mmio_enabled = false;
1098     s->enabled = false;
1099     s->ats_enabled = false;
1100     s->cmdbuf_enabled = false;
1101 
1102     /* reset MMIO */
1103     memset(s->mmior, 0, AMDVI_MMIO_SIZE);
1104     amdvi_set_quad(s, AMDVI_MMIO_EXT_FEATURES, AMDVI_EXT_FEATURES,
1105             0xffffffffffffffef, 0);
1106     amdvi_set_quad(s, AMDVI_MMIO_STATUS, 0, 0x98, 0x67);
1107 
1108     /* reset device ident */
1109     pci_config_set_vendor_id(s->pci.dev.config, PCI_VENDOR_ID_AMD);
1110     pci_config_set_prog_interface(s->pci.dev.config, 00);
1111     pci_config_set_device_id(s->pci.dev.config, s->devid);
1112     pci_config_set_class(s->pci.dev.config, 0x0806);
1113 
1114     /* reset AMDVI specific capabilities, all r/o */
1115     pci_set_long(s->pci.dev.config + s->capab_offset, AMDVI_CAPAB_FEATURES);
1116     pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_BAR_LOW,
1117                  s->mmio.addr & ~(0xffff0000));
1118     pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_BAR_HIGH,
1119                 (s->mmio.addr & ~(0xffff)) >> 16);
1120     pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_RANGE,
1121                  0xff000000);
1122     pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_MISC, 0);
1123     pci_set_long(s->pci.dev.config + s->capab_offset + AMDVI_CAPAB_MISC,
1124             AMDVI_MAX_PH_ADDR | AMDVI_MAX_GVA_ADDR | AMDVI_MAX_VA_ADDR);
1125 }
1126 
1127 static void amdvi_reset(DeviceState *dev)
1128 {
1129     AMDVIState *s = AMD_IOMMU_DEVICE(dev);
1130 
1131     msi_reset(&s->pci.dev);
1132     amdvi_init(s);
1133 }
1134 
1135 static void amdvi_realize(DeviceState *dev, Error **err)
1136 {
1137     int ret = 0;
1138     AMDVIState *s = AMD_IOMMU_DEVICE(dev);
1139     X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(dev);
1140     PCIBus *bus = PC_MACHINE(qdev_get_machine())->bus;
1141     s->iotlb = g_hash_table_new_full(amdvi_uint64_hash,
1142                                      amdvi_uint64_equal, g_free, g_free);
1143 
1144     /* This device should take care of IOMMU PCI properties */
1145     x86_iommu->type = TYPE_AMD;
1146     qdev_set_parent_bus(DEVICE(&s->pci), &bus->qbus);
1147     object_property_set_bool(OBJECT(&s->pci), true, "realized", err);
1148     s->capab_offset = pci_add_capability(&s->pci.dev, AMDVI_CAPAB_ID_SEC, 0,
1149                                          AMDVI_CAPAB_SIZE);
1150     assert(s->capab_offset > 0);
1151     ret = pci_add_capability(&s->pci.dev, PCI_CAP_ID_MSI, 0, AMDVI_CAPAB_REG_SIZE);
1152     assert(ret > 0);
1153     ret = pci_add_capability(&s->pci.dev, PCI_CAP_ID_HT, 0, AMDVI_CAPAB_REG_SIZE);
1154     assert(ret > 0);
1155 
1156     /* set up MMIO */
1157     memory_region_init_io(&s->mmio, OBJECT(s), &mmio_mem_ops, s, "amdvi-mmio",
1158                           AMDVI_MMIO_SIZE);
1159 
1160     sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->mmio);
1161     sysbus_mmio_map(SYS_BUS_DEVICE(s), 0, AMDVI_BASE_ADDR);
1162     pci_setup_iommu(bus, amdvi_host_dma_iommu, s);
1163     s->devid = object_property_get_int(OBJECT(&s->pci), "addr", err);
1164     msi_init(&s->pci.dev, 0, 1, true, false, err);
1165     amdvi_init(s);
1166 }
1167 
1168 static const VMStateDescription vmstate_amdvi = {
1169     .name = "amd-iommu",
1170     .unmigratable = 1
1171 };
1172 
1173 static void amdvi_instance_init(Object *klass)
1174 {
1175     AMDVIState *s = AMD_IOMMU_DEVICE(klass);
1176 
1177     object_initialize(&s->pci, sizeof(s->pci), TYPE_AMD_IOMMU_PCI);
1178 }
1179 
1180 static void amdvi_class_init(ObjectClass *klass, void* data)
1181 {
1182     DeviceClass *dc = DEVICE_CLASS(klass);
1183     X86IOMMUClass *dc_class = X86_IOMMU_CLASS(klass);
1184 
1185     dc->reset = amdvi_reset;
1186     dc->vmsd = &vmstate_amdvi;
1187     dc->hotpluggable = false;
1188     dc_class->realize = amdvi_realize;
1189 }
1190 
1191 static const TypeInfo amdvi = {
1192     .name = TYPE_AMD_IOMMU_DEVICE,
1193     .parent = TYPE_X86_IOMMU_DEVICE,
1194     .instance_size = sizeof(AMDVIState),
1195     .instance_init = amdvi_instance_init,
1196     .class_init = amdvi_class_init
1197 };
1198 
1199 static const TypeInfo amdviPCI = {
1200     .name = "AMDVI-PCI",
1201     .parent = TYPE_PCI_DEVICE,
1202     .instance_size = sizeof(AMDVIPCIState),
1203 };
1204 
1205 static void amdviPCI_register_types(void)
1206 {
1207     type_register_static(&amdviPCI);
1208     type_register_static(&amdvi);
1209 }
1210 
1211 type_init(amdviPCI_register_types);
1212