xref: /openbmc/qemu/hw/ppc/spapr_iommu.c (revision 14a650ec)
1 /*
2  * QEMU sPAPR IOMMU (TCE) code
3  *
4  * Copyright (c) 2010 David Gibson, IBM Corporation <dwg@au1.ibm.com>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "hw/hw.h"
20 #include "sysemu/kvm.h"
21 #include "hw/qdev.h"
22 #include "kvm_ppc.h"
23 #include "sysemu/dma.h"
24 #include "exec/address-spaces.h"
25 #include "trace.h"
26 
27 #include "hw/ppc/spapr.h"
28 
29 #include <libfdt.h>
30 
31 enum sPAPRTCEAccess {
32     SPAPR_TCE_FAULT = 0,
33     SPAPR_TCE_RO = 1,
34     SPAPR_TCE_WO = 2,
35     SPAPR_TCE_RW = 3,
36 };
37 
38 QLIST_HEAD(spapr_tce_tables, sPAPRTCETable) spapr_tce_tables;
39 
40 static sPAPRTCETable *spapr_tce_find_by_liobn(uint32_t liobn)
41 {
42     sPAPRTCETable *tcet;
43 
44     if (liobn & 0xFFFFFFFF00000000ULL) {
45         hcall_dprintf("Request for out-of-bounds LIOBN 0x" TARGET_FMT_lx "\n",
46                       liobn);
47         return NULL;
48     }
49 
50     QLIST_FOREACH(tcet, &spapr_tce_tables, list) {
51         if (tcet->liobn == liobn) {
52             return tcet;
53         }
54     }
55 
56     return NULL;
57 }
58 
59 static IOMMUTLBEntry spapr_tce_translate_iommu(MemoryRegion *iommu, hwaddr addr)
60 {
61     sPAPRTCETable *tcet = container_of(iommu, sPAPRTCETable, iommu);
62     uint64_t tce;
63     IOMMUTLBEntry ret = {
64         .target_as = &address_space_memory,
65         .iova = 0,
66         .translated_addr = 0,
67         .addr_mask = ~(hwaddr)0,
68         .perm = IOMMU_NONE,
69     };
70 
71     if (tcet->bypass) {
72         ret.perm = IOMMU_RW;
73     } else if (addr < tcet->window_size) {
74         /* Check if we are in bound */
75         tce = tcet->table[addr >> SPAPR_TCE_PAGE_SHIFT];
76         ret.iova = addr & ~SPAPR_TCE_PAGE_MASK;
77         ret.translated_addr = tce & ~SPAPR_TCE_PAGE_MASK;
78         ret.addr_mask = SPAPR_TCE_PAGE_MASK;
79         ret.perm = tce;
80     }
81     trace_spapr_iommu_xlate(tcet->liobn, addr, ret.iova, ret.perm,
82                             ret.addr_mask);
83 
84     return ret;
85 }
86 
87 static int spapr_tce_table_pre_load(void *opaque)
88 {
89     sPAPRTCETable *tcet = SPAPR_TCE_TABLE(opaque);
90 
91     tcet->nb_table = tcet->window_size >> SPAPR_TCE_PAGE_SHIFT;
92 
93     return 0;
94 }
95 
96 static const VMStateDescription vmstate_spapr_tce_table = {
97     .name = "spapr_iommu",
98     .version_id = 1,
99     .minimum_version_id = 1,
100     .minimum_version_id_old = 1,
101     .pre_load = spapr_tce_table_pre_load,
102     .fields      = (VMStateField []) {
103         /* Sanity check */
104         VMSTATE_UINT32_EQUAL(liobn, sPAPRTCETable),
105         VMSTATE_UINT32_EQUAL(window_size, sPAPRTCETable),
106 
107         /* IOMMU state */
108         VMSTATE_BOOL(bypass, sPAPRTCETable),
109         VMSTATE_VARRAY_UINT32(table, sPAPRTCETable, nb_table, 0, vmstate_info_uint64, uint64_t),
110 
111         VMSTATE_END_OF_LIST()
112     },
113 };
114 
115 static MemoryRegionIOMMUOps spapr_iommu_ops = {
116     .translate = spapr_tce_translate_iommu,
117 };
118 
119 static int spapr_tce_table_realize(DeviceState *dev)
120 {
121     sPAPRTCETable *tcet = SPAPR_TCE_TABLE(dev);
122 
123     if (kvm_enabled()) {
124         tcet->table = kvmppc_create_spapr_tce(tcet->liobn,
125                                               tcet->window_size,
126                                               &tcet->fd);
127     }
128 
129     if (!tcet->table) {
130         size_t table_size = (tcet->window_size >> SPAPR_TCE_PAGE_SHIFT)
131             * sizeof(uint64_t);
132         tcet->table = g_malloc0(table_size);
133     }
134     tcet->nb_table = tcet->window_size >> SPAPR_TCE_PAGE_SHIFT;
135 
136     trace_spapr_iommu_new_table(tcet->liobn, tcet, tcet->table, tcet->fd);
137 
138     memory_region_init_iommu(&tcet->iommu, OBJECT(dev), &spapr_iommu_ops,
139                              "iommu-spapr", UINT64_MAX);
140 
141     QLIST_INSERT_HEAD(&spapr_tce_tables, tcet, list);
142 
143     return 0;
144 }
145 
146 sPAPRTCETable *spapr_tce_new_table(DeviceState *owner, uint32_t liobn, size_t window_size)
147 {
148     sPAPRTCETable *tcet;
149 
150     if (spapr_tce_find_by_liobn(liobn)) {
151         fprintf(stderr, "Attempted to create TCE table with duplicate"
152                 " LIOBN 0x%x\n", liobn);
153         return NULL;
154     }
155 
156     if (!window_size) {
157         return NULL;
158     }
159 
160     tcet = SPAPR_TCE_TABLE(object_new(TYPE_SPAPR_TCE_TABLE));
161     tcet->liobn = liobn;
162     tcet->window_size = window_size;
163 
164     object_property_add_child(OBJECT(owner), "tce-table", OBJECT(tcet), NULL);
165 
166     qdev_init_nofail(DEVICE(tcet));
167 
168     return tcet;
169 }
170 
171 static void spapr_tce_table_finalize(Object *obj)
172 {
173     sPAPRTCETable *tcet = SPAPR_TCE_TABLE(obj);
174 
175     QLIST_REMOVE(tcet, list);
176 
177     if (!kvm_enabled() ||
178         (kvmppc_remove_spapr_tce(tcet->table, tcet->fd,
179                                  tcet->window_size) != 0)) {
180         g_free(tcet->table);
181     }
182 }
183 
184 MemoryRegion *spapr_tce_get_iommu(sPAPRTCETable *tcet)
185 {
186     return &tcet->iommu;
187 }
188 
189 void spapr_tce_set_bypass(sPAPRTCETable *tcet, bool bypass)
190 {
191     tcet->bypass = bypass;
192 }
193 
194 static void spapr_tce_reset(DeviceState *dev)
195 {
196     sPAPRTCETable *tcet = SPAPR_TCE_TABLE(dev);
197     size_t table_size = (tcet->window_size >> SPAPR_TCE_PAGE_SHIFT)
198         * sizeof(uint64_t);
199 
200     tcet->bypass = false;
201     memset(tcet->table, 0, table_size);
202 }
203 
204 static target_ulong put_tce_emu(sPAPRTCETable *tcet, target_ulong ioba,
205                                 target_ulong tce)
206 {
207     IOMMUTLBEntry entry;
208 
209     if (ioba >= tcet->window_size) {
210         hcall_dprintf("spapr_vio_put_tce on out-of-bounds IOBA 0x"
211                       TARGET_FMT_lx "\n", ioba);
212         return H_PARAMETER;
213     }
214 
215     tcet->table[ioba >> SPAPR_TCE_PAGE_SHIFT] = tce;
216 
217     entry.target_as = &address_space_memory,
218     entry.iova = ioba & ~SPAPR_TCE_PAGE_MASK;
219     entry.translated_addr = tce & ~SPAPR_TCE_PAGE_MASK;
220     entry.addr_mask = SPAPR_TCE_PAGE_MASK;
221     entry.perm = tce;
222     memory_region_notify_iommu(&tcet->iommu, entry);
223 
224     return H_SUCCESS;
225 }
226 
227 static target_ulong h_put_tce(PowerPCCPU *cpu, sPAPREnvironment *spapr,
228                               target_ulong opcode, target_ulong *args)
229 {
230     target_ulong liobn = args[0];
231     target_ulong ioba = args[1];
232     target_ulong tce = args[2];
233     target_ulong ret = H_PARAMETER;
234     sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn);
235 
236     ioba &= ~(SPAPR_TCE_PAGE_SIZE - 1);
237 
238     if (tcet) {
239         ret = put_tce_emu(tcet, ioba, tce);
240     }
241     trace_spapr_iommu_put(liobn, ioba, tce, ret);
242 
243     return ret;
244 }
245 
246 int spapr_dma_dt(void *fdt, int node_off, const char *propname,
247                  uint32_t liobn, uint64_t window, uint32_t size)
248 {
249     uint32_t dma_prop[5];
250     int ret;
251 
252     dma_prop[0] = cpu_to_be32(liobn);
253     dma_prop[1] = cpu_to_be32(window >> 32);
254     dma_prop[2] = cpu_to_be32(window & 0xFFFFFFFF);
255     dma_prop[3] = 0; /* window size is 32 bits */
256     dma_prop[4] = cpu_to_be32(size);
257 
258     ret = fdt_setprop_cell(fdt, node_off, "ibm,#dma-address-cells", 2);
259     if (ret < 0) {
260         return ret;
261     }
262 
263     ret = fdt_setprop_cell(fdt, node_off, "ibm,#dma-size-cells", 2);
264     if (ret < 0) {
265         return ret;
266     }
267 
268     ret = fdt_setprop(fdt, node_off, propname, dma_prop, sizeof(dma_prop));
269     if (ret < 0) {
270         return ret;
271     }
272 
273     return 0;
274 }
275 
276 int spapr_tcet_dma_dt(void *fdt, int node_off, const char *propname,
277                       sPAPRTCETable *tcet)
278 {
279     if (!tcet) {
280         return 0;
281     }
282 
283     return spapr_dma_dt(fdt, node_off, propname,
284                         tcet->liobn, 0, tcet->window_size);
285 }
286 
287 static void spapr_tce_table_class_init(ObjectClass *klass, void *data)
288 {
289     DeviceClass *dc = DEVICE_CLASS(klass);
290     dc->vmsd = &vmstate_spapr_tce_table;
291     dc->init = spapr_tce_table_realize;
292     dc->reset = spapr_tce_reset;
293 
294     QLIST_INIT(&spapr_tce_tables);
295 
296     /* hcall-tce */
297     spapr_register_hypercall(H_PUT_TCE, h_put_tce);
298 }
299 
300 static TypeInfo spapr_tce_table_info = {
301     .name = TYPE_SPAPR_TCE_TABLE,
302     .parent = TYPE_DEVICE,
303     .instance_size = sizeof(sPAPRTCETable),
304     .class_init = spapr_tce_table_class_init,
305     .instance_finalize = spapr_tce_table_finalize,
306 };
307 
308 static void register_types(void)
309 {
310     type_register_static(&spapr_tce_table_info);
311 }
312 
313 type_init(register_types);
314