1 /*
2 * Arm PrimeCell PL080/PL081 DMA controller
3 *
4 * Copyright (c) 2006 CodeSourcery.
5 * Written by Paul Brook
6 *
7 * This code is licensed under the GPL.
8 */
9
10 #include "qemu/osdep.h"
11 #include "hw/sysbus.h"
12 #include "migration/vmstate.h"
13 #include "qemu/log.h"
14 #include "qemu/module.h"
15 #include "hw/dma/pl080.h"
16 #include "hw/hw.h"
17 #include "hw/irq.h"
18 #include "hw/qdev-properties.h"
19 #include "qapi/error.h"
20
21 #define PL080_CONF_E 0x1
22 #define PL080_CONF_M1 0x2
23 #define PL080_CONF_M2 0x4
24
25 #define PL080_CCONF_H 0x40000
26 #define PL080_CCONF_A 0x20000
27 #define PL080_CCONF_L 0x10000
28 #define PL080_CCONF_ITC 0x08000
29 #define PL080_CCONF_IE 0x04000
30 #define PL080_CCONF_E 0x00001
31
32 #define PL080_CCTRL_I 0x80000000
33 #define PL080_CCTRL_DI 0x08000000
34 #define PL080_CCTRL_SI 0x04000000
35 #define PL080_CCTRL_D 0x02000000
36 #define PL080_CCTRL_S 0x01000000
37
38 static const VMStateDescription vmstate_pl080_channel = {
39 .name = "pl080_channel",
40 .version_id = 1,
41 .minimum_version_id = 1,
42 .fields = (const VMStateField[]) {
43 VMSTATE_UINT32(src, pl080_channel),
44 VMSTATE_UINT32(dest, pl080_channel),
45 VMSTATE_UINT32(lli, pl080_channel),
46 VMSTATE_UINT32(ctrl, pl080_channel),
47 VMSTATE_UINT32(conf, pl080_channel),
48 VMSTATE_END_OF_LIST()
49 }
50 };
51
52 static const VMStateDescription vmstate_pl080 = {
53 .name = "pl080",
54 .version_id = 1,
55 .minimum_version_id = 1,
56 .fields = (const VMStateField[]) {
57 VMSTATE_UINT8(tc_int, PL080State),
58 VMSTATE_UINT8(tc_mask, PL080State),
59 VMSTATE_UINT8(err_int, PL080State),
60 VMSTATE_UINT8(err_mask, PL080State),
61 VMSTATE_UINT32(conf, PL080State),
62 VMSTATE_UINT32(sync, PL080State),
63 VMSTATE_UINT32(req_single, PL080State),
64 VMSTATE_UINT32(req_burst, PL080State),
65 VMSTATE_UINT8(tc_int, PL080State),
66 VMSTATE_UINT8(tc_int, PL080State),
67 VMSTATE_UINT8(tc_int, PL080State),
68 VMSTATE_STRUCT_ARRAY(chan, PL080State, PL080_MAX_CHANNELS,
69 1, vmstate_pl080_channel, pl080_channel),
70 VMSTATE_INT32(running, PL080State),
71 VMSTATE_END_OF_LIST()
72 }
73 };
74
75 static const unsigned char pl080_id[] =
76 { 0x80, 0x10, 0x04, 0x0a, 0x0d, 0xf0, 0x05, 0xb1 };
77
78 static const unsigned char pl081_id[] =
79 { 0x81, 0x10, 0x04, 0x0a, 0x0d, 0xf0, 0x05, 0xb1 };
80
pl080_update(PL080State * s)81 static void pl080_update(PL080State *s)
82 {
83 bool tclevel = (s->tc_int & s->tc_mask);
84 bool errlevel = (s->err_int & s->err_mask);
85
86 qemu_set_irq(s->interr, errlevel);
87 qemu_set_irq(s->inttc, tclevel);
88 qemu_set_irq(s->irq, errlevel || tclevel);
89 }
90
pl080_run(PL080State * s)91 static void pl080_run(PL080State *s)
92 {
93 int c;
94 int flow;
95 pl080_channel *ch;
96 int swidth;
97 int dwidth;
98 int xsize;
99 int n;
100 int src_id;
101 int dest_id;
102 int size;
103 uint8_t buff[4];
104 uint32_t req;
105
106 s->tc_mask = 0;
107 for (c = 0; c < s->nchannels; c++) {
108 if (s->chan[c].conf & PL080_CCONF_ITC)
109 s->tc_mask |= 1 << c;
110 if (s->chan[c].conf & PL080_CCONF_IE)
111 s->err_mask |= 1 << c;
112 }
113
114 if ((s->conf & PL080_CONF_E) == 0)
115 return;
116
117 /* If we are already in the middle of a DMA operation then indicate that
118 there may be new DMA requests and return immediately. */
119 if (s->running) {
120 s->running++;
121 return;
122 }
123 s->running = 1;
124 while (s->running) {
125 for (c = 0; c < s->nchannels; c++) {
126 ch = &s->chan[c];
127 again:
128 /* Test if thiws channel has any pending DMA requests. */
129 if ((ch->conf & (PL080_CCONF_H | PL080_CCONF_E))
130 != PL080_CCONF_E)
131 continue;
132 flow = (ch->conf >> 11) & 7;
133 if (flow >= 4) {
134 hw_error(
135 "pl080_run: Peripheral flow control not implemented\n");
136 }
137 src_id = (ch->conf >> 1) & 0x1f;
138 dest_id = (ch->conf >> 6) & 0x1f;
139 size = ch->ctrl & 0xfff;
140 req = s->req_single | s->req_burst;
141 switch (flow) {
142 case 0:
143 break;
144 case 1:
145 if ((req & (1u << dest_id)) == 0)
146 size = 0;
147 break;
148 case 2:
149 if ((req & (1u << src_id)) == 0)
150 size = 0;
151 break;
152 case 3:
153 if ((req & (1u << src_id)) == 0
154 || (req & (1u << dest_id)) == 0)
155 size = 0;
156 break;
157 }
158 if (!size)
159 continue;
160
161 /* Transfer one element. */
162 /* ??? Should transfer multiple elements for a burst request. */
163 /* ??? Unclear what the proper behavior is when source and
164 destination widths are different. */
165 swidth = 1 << ((ch->ctrl >> 18) & 7);
166 dwidth = 1 << ((ch->ctrl >> 21) & 7);
167 for (n = 0; n < dwidth; n+= swidth) {
168 address_space_read(&s->downstream_as, ch->src,
169 MEMTXATTRS_UNSPECIFIED, buff + n, swidth);
170 if (ch->ctrl & PL080_CCTRL_SI)
171 ch->src += swidth;
172 }
173 xsize = (dwidth < swidth) ? swidth : dwidth;
174 /* ??? This may pad the value incorrectly for dwidth < 32. */
175 for (n = 0; n < xsize; n += dwidth) {
176 address_space_write(&s->downstream_as, ch->dest + n,
177 MEMTXATTRS_UNSPECIFIED, buff + n, dwidth);
178 if (ch->ctrl & PL080_CCTRL_DI)
179 ch->dest += swidth;
180 }
181
182 size--;
183 ch->ctrl = (ch->ctrl & 0xfffff000) | size;
184 if (size == 0) {
185 /* Transfer complete. */
186 if (ch->lli) {
187 ch->src = address_space_ldl_le(&s->downstream_as,
188 ch->lli,
189 MEMTXATTRS_UNSPECIFIED,
190 NULL);
191 ch->dest = address_space_ldl_le(&s->downstream_as,
192 ch->lli + 4,
193 MEMTXATTRS_UNSPECIFIED,
194 NULL);
195 ch->ctrl = address_space_ldl_le(&s->downstream_as,
196 ch->lli + 12,
197 MEMTXATTRS_UNSPECIFIED,
198 NULL);
199 ch->lli = address_space_ldl_le(&s->downstream_as,
200 ch->lli + 8,
201 MEMTXATTRS_UNSPECIFIED,
202 NULL);
203 } else {
204 ch->conf &= ~PL080_CCONF_E;
205 }
206 if (ch->ctrl & PL080_CCTRL_I) {
207 s->tc_int |= 1 << c;
208 }
209 }
210 goto again;
211 }
212 if (--s->running)
213 s->running = 1;
214 }
215 }
216
pl080_read(void * opaque,hwaddr offset,unsigned size)217 static uint64_t pl080_read(void *opaque, hwaddr offset,
218 unsigned size)
219 {
220 PL080State *s = (PL080State *)opaque;
221 uint32_t i;
222 uint32_t mask;
223
224 if (offset >= 0xfe0 && offset < 0x1000) {
225 if (s->nchannels == 8) {
226 return pl080_id[(offset - 0xfe0) >> 2];
227 } else {
228 return pl081_id[(offset - 0xfe0) >> 2];
229 }
230 }
231 if (offset >= 0x100 && offset < 0x200) {
232 i = (offset & 0xe0) >> 5;
233 if (i >= s->nchannels)
234 goto bad_offset;
235 switch ((offset >> 2) & 7) {
236 case 0: /* SrcAddr */
237 return s->chan[i].src;
238 case 1: /* DestAddr */
239 return s->chan[i].dest;
240 case 2: /* LLI */
241 return s->chan[i].lli;
242 case 3: /* Control */
243 return s->chan[i].ctrl;
244 case 4: /* Configuration */
245 return s->chan[i].conf;
246 default:
247 goto bad_offset;
248 }
249 }
250 switch (offset >> 2) {
251 case 0: /* IntStatus */
252 return (s->tc_int & s->tc_mask) | (s->err_int & s->err_mask);
253 case 1: /* IntTCStatus */
254 return (s->tc_int & s->tc_mask);
255 case 3: /* IntErrorStatus */
256 return (s->err_int & s->err_mask);
257 case 5: /* RawIntTCStatus */
258 return s->tc_int;
259 case 6: /* RawIntErrorStatus */
260 return s->err_int;
261 case 7: /* EnbldChns */
262 mask = 0;
263 for (i = 0; i < s->nchannels; i++) {
264 if (s->chan[i].conf & PL080_CCONF_E)
265 mask |= 1 << i;
266 }
267 return mask;
268 case 8: /* SoftBReq */
269 case 9: /* SoftSReq */
270 case 10: /* SoftLBReq */
271 case 11: /* SoftLSReq */
272 /* ??? Implement these. */
273 return 0;
274 case 12: /* Configuration */
275 return s->conf;
276 case 13: /* Sync */
277 return s->sync;
278 default:
279 bad_offset:
280 qemu_log_mask(LOG_GUEST_ERROR,
281 "pl080_read: Bad offset %x\n", (int)offset);
282 return 0;
283 }
284 }
285
pl080_write(void * opaque,hwaddr offset,uint64_t value,unsigned size)286 static void pl080_write(void *opaque, hwaddr offset,
287 uint64_t value, unsigned size)
288 {
289 PL080State *s = (PL080State *)opaque;
290 int i;
291
292 if (offset >= 0x100 && offset < 0x200) {
293 i = (offset & 0xe0) >> 5;
294 if (i >= s->nchannels)
295 goto bad_offset;
296 switch ((offset >> 2) & 7) {
297 case 0: /* SrcAddr */
298 s->chan[i].src = value;
299 break;
300 case 1: /* DestAddr */
301 s->chan[i].dest = value;
302 break;
303 case 2: /* LLI */
304 s->chan[i].lli = value;
305 break;
306 case 3: /* Control */
307 s->chan[i].ctrl = value;
308 break;
309 case 4: /* Configuration */
310 s->chan[i].conf = value;
311 pl080_run(s);
312 break;
313 }
314 return;
315 }
316 switch (offset >> 2) {
317 case 2: /* IntTCClear */
318 s->tc_int &= ~value;
319 break;
320 case 4: /* IntErrorClear */
321 s->err_int &= ~value;
322 break;
323 case 8: /* SoftBReq */
324 case 9: /* SoftSReq */
325 case 10: /* SoftLBReq */
326 case 11: /* SoftLSReq */
327 /* ??? Implement these. */
328 qemu_log_mask(LOG_UNIMP, "pl080_write: Soft DMA not implemented\n");
329 break;
330 case 12: /* Configuration */
331 s->conf = value;
332 if (s->conf & (PL080_CONF_M1 | PL080_CONF_M2)) {
333 qemu_log_mask(LOG_UNIMP,
334 "pl080_write: Big-endian DMA not implemented\n");
335 }
336 pl080_run(s);
337 break;
338 case 13: /* Sync */
339 s->sync = value;
340 break;
341 default:
342 bad_offset:
343 qemu_log_mask(LOG_GUEST_ERROR,
344 "pl080_write: Bad offset %x\n", (int)offset);
345 }
346 pl080_update(s);
347 }
348
349 static const MemoryRegionOps pl080_ops = {
350 .read = pl080_read,
351 .write = pl080_write,
352 .endianness = DEVICE_NATIVE_ENDIAN,
353 };
354
pl080_reset(DeviceState * dev)355 static void pl080_reset(DeviceState *dev)
356 {
357 PL080State *s = PL080(dev);
358 int i;
359
360 s->tc_int = 0;
361 s->tc_mask = 0;
362 s->err_int = 0;
363 s->err_mask = 0;
364 s->conf = 0;
365 s->sync = 0;
366 s->req_single = 0;
367 s->req_burst = 0;
368 s->running = 0;
369
370 for (i = 0; i < s->nchannels; i++) {
371 s->chan[i].src = 0;
372 s->chan[i].dest = 0;
373 s->chan[i].lli = 0;
374 s->chan[i].ctrl = 0;
375 s->chan[i].conf = 0;
376 }
377 }
378
pl080_init(Object * obj)379 static void pl080_init(Object *obj)
380 {
381 SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
382 PL080State *s = PL080(obj);
383
384 memory_region_init_io(&s->iomem, OBJECT(s), &pl080_ops, s, "pl080", 0x1000);
385 sysbus_init_mmio(sbd, &s->iomem);
386 sysbus_init_irq(sbd, &s->irq);
387 sysbus_init_irq(sbd, &s->interr);
388 sysbus_init_irq(sbd, &s->inttc);
389 s->nchannels = 8;
390 }
391
pl080_realize(DeviceState * dev,Error ** errp)392 static void pl080_realize(DeviceState *dev, Error **errp)
393 {
394 PL080State *s = PL080(dev);
395
396 if (!s->downstream) {
397 error_setg(errp, "PL080 'downstream' link not set");
398 return;
399 }
400
401 address_space_init(&s->downstream_as, s->downstream, "pl080-downstream");
402 }
403
pl081_init(Object * obj)404 static void pl081_init(Object *obj)
405 {
406 PL080State *s = PL080(obj);
407
408 s->nchannels = 2;
409 }
410
411 static Property pl080_properties[] = {
412 DEFINE_PROP_LINK("downstream", PL080State, downstream,
413 TYPE_MEMORY_REGION, MemoryRegion *),
414 DEFINE_PROP_END_OF_LIST(),
415 };
416
pl080_class_init(ObjectClass * oc,void * data)417 static void pl080_class_init(ObjectClass *oc, void *data)
418 {
419 DeviceClass *dc = DEVICE_CLASS(oc);
420
421 dc->vmsd = &vmstate_pl080;
422 dc->realize = pl080_realize;
423 device_class_set_props(dc, pl080_properties);
424 dc->reset = pl080_reset;
425 }
426
427 static const TypeInfo pl080_info = {
428 .name = TYPE_PL080,
429 .parent = TYPE_SYS_BUS_DEVICE,
430 .instance_size = sizeof(PL080State),
431 .instance_init = pl080_init,
432 .class_init = pl080_class_init,
433 };
434
435 static const TypeInfo pl081_info = {
436 .name = TYPE_PL081,
437 .parent = TYPE_PL080,
438 .instance_init = pl081_init,
439 };
440
441 /* The PL080 and PL081 are the same except for the number of channels
442 they implement (8 and 2 respectively). */
pl080_register_types(void)443 static void pl080_register_types(void)
444 {
445 type_register_static(&pl080_info);
446 type_register_static(&pl081_info);
447 }
448
449 type_init(pl080_register_types)
450