xref: /openbmc/qemu/hw/dma/sifive_pdma.c (revision 40f23e4e)
1 /*
2  * SiFive Platform DMA emulation
3  *
4  * Copyright (c) 2020 Wind River Systems, Inc.
5  *
6  * Author:
7  *   Bin Meng <bin.meng@windriver.com>
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License as
11  * published by the Free Software Foundation; either version 2 or
12  * (at your option) version 3 of the License.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License along
20  * with this program; if not, see <http://www.gnu.org/licenses/>.
21  */
22 
23 #include "qemu/osdep.h"
24 #include "qemu/bitops.h"
25 #include "qemu/log.h"
26 #include "qapi/error.h"
27 #include "hw/irq.h"
28 #include "hw/qdev-properties.h"
29 #include "hw/sysbus.h"
30 #include "migration/vmstate.h"
31 #include "sysemu/dma.h"
32 #include "hw/dma/sifive_pdma.h"
33 
34 #define DMA_CONTROL         0x000
35 #define   CONTROL_CLAIM     BIT(0)
36 #define   CONTROL_RUN       BIT(1)
37 #define   CONTROL_DONE_IE   BIT(14)
38 #define   CONTROL_ERR_IE    BIT(15)
39 #define   CONTROL_DONE      BIT(30)
40 #define   CONTROL_ERR       BIT(31)
41 
42 #define DMA_NEXT_CONFIG     0x004
43 #define   CONFIG_REPEAT     BIT(2)
44 #define   CONFIG_ORDER      BIT(3)
45 #define   CONFIG_WRSZ_SHIFT 24
46 #define   CONFIG_RDSZ_SHIFT 28
47 #define   CONFIG_SZ_MASK    0xf
48 
49 #define DMA_NEXT_BYTES      0x008
50 #define DMA_NEXT_DST        0x010
51 #define DMA_NEXT_SRC        0x018
52 #define DMA_EXEC_CONFIG     0x104
53 #define DMA_EXEC_BYTES      0x108
54 #define DMA_EXEC_DST        0x110
55 #define DMA_EXEC_SRC        0x118
56 
57 enum dma_chan_state {
58     DMA_CHAN_STATE_IDLE,
59     DMA_CHAN_STATE_STARTED,
60     DMA_CHAN_STATE_ERROR,
61     DMA_CHAN_STATE_DONE
62 };
63 
64 static void sifive_pdma_run(SiFivePDMAState *s, int ch)
65 {
66     uint64_t bytes = s->chan[ch].next_bytes;
67     uint64_t dst = s->chan[ch].next_dst;
68     uint64_t src = s->chan[ch].next_src;
69     uint32_t config = s->chan[ch].next_config;
70     int wsize, rsize, size;
71     uint8_t buf[64];
72     int n;
73 
74     /* do nothing if bytes to transfer is zero */
75     if (!bytes) {
76         goto error;
77     }
78 
79     /*
80      * The manual does not describe how the hardware behaviors when
81      * config.wsize and config.rsize are given different values.
82      * A common case is memory to memory DMA, and in this case they
83      * are normally the same. Abort if this expectation fails.
84      */
85     wsize = (config >> CONFIG_WRSZ_SHIFT) & CONFIG_SZ_MASK;
86     rsize = (config >> CONFIG_RDSZ_SHIFT) & CONFIG_SZ_MASK;
87     if (wsize != rsize) {
88         goto error;
89     }
90 
91     /*
92      * Calculate the transaction size
93      *
94      * size field is base 2 logarithm of DMA transaction size,
95      * but there is an upper limit of 64 bytes per transaction.
96      */
97     size = wsize;
98     if (size > 6) {
99         size = 6;
100     }
101     size = 1 << size;
102 
103     /* the bytes to transfer should be multiple of transaction size */
104     if (bytes % size) {
105         goto error;
106     }
107 
108     /* indicate a DMA transfer is started */
109     s->chan[ch].state = DMA_CHAN_STATE_STARTED;
110     s->chan[ch].control &= ~CONTROL_DONE;
111     s->chan[ch].control &= ~CONTROL_ERR;
112 
113     /* load the next_ registers into their exec_ counterparts */
114     s->chan[ch].exec_config = config;
115     s->chan[ch].exec_bytes = bytes;
116     s->chan[ch].exec_dst = dst;
117     s->chan[ch].exec_src = src;
118 
119     for (n = 0; n < bytes / size; n++) {
120         cpu_physical_memory_read(s->chan[ch].exec_src, buf, size);
121         cpu_physical_memory_write(s->chan[ch].exec_dst, buf, size);
122         s->chan[ch].exec_src += size;
123         s->chan[ch].exec_dst += size;
124         s->chan[ch].exec_bytes -= size;
125     }
126 
127     /* indicate a DMA transfer is done */
128     s->chan[ch].state = DMA_CHAN_STATE_DONE;
129     s->chan[ch].control &= ~CONTROL_RUN;
130     s->chan[ch].control |= CONTROL_DONE;
131 
132     /* reload exec_ registers if repeat is required */
133     if (s->chan[ch].next_config & CONFIG_REPEAT) {
134         s->chan[ch].exec_bytes = bytes;
135         s->chan[ch].exec_dst = dst;
136         s->chan[ch].exec_src = src;
137     }
138 
139     return;
140 
141 error:
142     s->chan[ch].state = DMA_CHAN_STATE_ERROR;
143     s->chan[ch].control |= CONTROL_ERR;
144     return;
145 }
146 
147 static inline void sifive_pdma_update_irq(SiFivePDMAState *s, int ch)
148 {
149     bool done_ie, err_ie;
150 
151     done_ie = !!(s->chan[ch].control & CONTROL_DONE_IE);
152     err_ie = !!(s->chan[ch].control & CONTROL_ERR_IE);
153 
154     if (done_ie && (s->chan[ch].control & CONTROL_DONE)) {
155         qemu_irq_raise(s->irq[ch * 2]);
156     } else {
157         qemu_irq_lower(s->irq[ch * 2]);
158     }
159 
160     if (err_ie && (s->chan[ch].control & CONTROL_ERR)) {
161         qemu_irq_raise(s->irq[ch * 2 + 1]);
162     } else {
163         qemu_irq_lower(s->irq[ch * 2 + 1]);
164     }
165 
166     s->chan[ch].state = DMA_CHAN_STATE_IDLE;
167 }
168 
169 static uint64_t sifive_pdma_read(void *opaque, hwaddr offset, unsigned size)
170 {
171     SiFivePDMAState *s = opaque;
172     int ch = SIFIVE_PDMA_CHAN_NO(offset);
173     uint64_t val = 0;
174 
175     if (ch >= SIFIVE_PDMA_CHANS) {
176         qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid channel no %d\n",
177                       __func__, ch);
178         return 0;
179     }
180 
181     offset &= 0xfff;
182     switch (offset) {
183     case DMA_CONTROL:
184         val = s->chan[ch].control;
185         break;
186     case DMA_NEXT_CONFIG:
187         val = s->chan[ch].next_config;
188         break;
189     case DMA_NEXT_BYTES:
190         val = s->chan[ch].next_bytes;
191         break;
192     case DMA_NEXT_DST:
193         val = s->chan[ch].next_dst;
194         break;
195     case DMA_NEXT_SRC:
196         val = s->chan[ch].next_src;
197         break;
198     case DMA_EXEC_CONFIG:
199         val = s->chan[ch].exec_config;
200         break;
201     case DMA_EXEC_BYTES:
202         val = s->chan[ch].exec_bytes;
203         break;
204     case DMA_EXEC_DST:
205         val = s->chan[ch].exec_dst;
206         break;
207     case DMA_EXEC_SRC:
208         val = s->chan[ch].exec_src;
209         break;
210     default:
211         qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n",
212                       __func__, offset);
213         break;
214     }
215 
216     return val;
217 }
218 
219 static void sifive_pdma_write(void *opaque, hwaddr offset,
220                               uint64_t value, unsigned size)
221 {
222     SiFivePDMAState *s = opaque;
223     int ch = SIFIVE_PDMA_CHAN_NO(offset);
224 
225     if (ch >= SIFIVE_PDMA_CHANS) {
226         qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid channel no %d\n",
227                       __func__, ch);
228         return;
229     }
230 
231     offset &= 0xfff;
232     switch (offset) {
233     case DMA_CONTROL:
234         s->chan[ch].control = value;
235 
236         if (value & CONTROL_RUN) {
237             sifive_pdma_run(s, ch);
238         }
239 
240         sifive_pdma_update_irq(s, ch);
241         break;
242     case DMA_NEXT_CONFIG:
243         s->chan[ch].next_config = value;
244         break;
245     case DMA_NEXT_BYTES:
246         s->chan[ch].next_bytes = value;
247         break;
248     case DMA_NEXT_DST:
249         s->chan[ch].next_dst = value;
250         break;
251     case DMA_NEXT_SRC:
252         s->chan[ch].next_src = value;
253         break;
254     case DMA_EXEC_CONFIG:
255     case DMA_EXEC_BYTES:
256     case DMA_EXEC_DST:
257     case DMA_EXEC_SRC:
258         /* these are read-only registers */
259         break;
260     default:
261         qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n",
262                       __func__, offset);
263         break;
264     }
265 }
266 
267 static const MemoryRegionOps sifive_pdma_ops = {
268     .read = sifive_pdma_read,
269     .write = sifive_pdma_write,
270     .endianness = DEVICE_LITTLE_ENDIAN,
271     /* there are 32-bit and 64-bit wide registers */
272     .impl = {
273         .min_access_size = 4,
274         .max_access_size = 8,
275     }
276 };
277 
278 static void sifive_pdma_realize(DeviceState *dev, Error **errp)
279 {
280     SiFivePDMAState *s = SIFIVE_PDMA(dev);
281     int i;
282 
283     memory_region_init_io(&s->iomem, OBJECT(dev), &sifive_pdma_ops, s,
284                           TYPE_SIFIVE_PDMA, SIFIVE_PDMA_REG_SIZE);
285     sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem);
286 
287     for (i = 0; i < SIFIVE_PDMA_IRQS; i++) {
288         sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq[i]);
289     }
290 }
291 
292 static void sifive_pdma_class_init(ObjectClass *klass, void *data)
293 {
294     DeviceClass *dc = DEVICE_CLASS(klass);
295 
296     dc->desc = "SiFive Platform DMA controller";
297     dc->realize = sifive_pdma_realize;
298 }
299 
300 static const TypeInfo sifive_pdma_info = {
301     .name          = TYPE_SIFIVE_PDMA,
302     .parent        = TYPE_SYS_BUS_DEVICE,
303     .instance_size = sizeof(SiFivePDMAState),
304     .class_init    = sifive_pdma_class_init,
305 };
306 
307 static void sifive_pdma_register_types(void)
308 {
309     type_register_static(&sifive_pdma_info);
310 }
311 
312 type_init(sifive_pdma_register_types)
313