xref: /openbmc/qemu/hw/dma/sifive_pdma.c (revision 34b36c3b)
1 /*
2  * SiFive Platform DMA emulation
3  *
4  * Copyright (c) 2020 Wind River Systems, Inc.
5  *
6  * Author:
7  *   Bin Meng <bin.meng@windriver.com>
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License as
11  * published by the Free Software Foundation; either version 2 or
12  * (at your option) version 3 of the License.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License along
20  * with this program; if not, see <http://www.gnu.org/licenses/>.
21  */
22 
23 #include "qemu/osdep.h"
24 #include "qemu/bitops.h"
25 #include "qemu/log.h"
26 #include "qapi/error.h"
27 #include "hw/hw.h"
28 #include "hw/irq.h"
29 #include "hw/qdev-properties.h"
30 #include "hw/sysbus.h"
31 #include "migration/vmstate.h"
32 #include "sysemu/dma.h"
33 #include "hw/dma/sifive_pdma.h"
34 
35 #define DMA_CONTROL         0x000
36 #define   CONTROL_CLAIM     BIT(0)
37 #define   CONTROL_RUN       BIT(1)
38 #define   CONTROL_DONE_IE   BIT(14)
39 #define   CONTROL_ERR_IE    BIT(15)
40 #define   CONTROL_DONE      BIT(30)
41 #define   CONTROL_ERR       BIT(31)
42 
43 #define DMA_NEXT_CONFIG     0x004
44 #define   CONFIG_REPEAT     BIT(2)
45 #define   CONFIG_ORDER      BIT(3)
46 #define   CONFIG_WRSZ_SHIFT 24
47 #define   CONFIG_RDSZ_SHIFT 28
48 #define   CONFIG_SZ_MASK    0xf
49 
50 #define DMA_NEXT_BYTES      0x008
51 #define DMA_NEXT_DST        0x010
52 #define DMA_NEXT_SRC        0x018
53 #define DMA_EXEC_CONFIG     0x104
54 #define DMA_EXEC_BYTES      0x108
55 #define DMA_EXEC_DST        0x110
56 #define DMA_EXEC_SRC        0x118
57 
58 enum dma_chan_state {
59     DMA_CHAN_STATE_IDLE,
60     DMA_CHAN_STATE_STARTED,
61     DMA_CHAN_STATE_ERROR,
62     DMA_CHAN_STATE_DONE
63 };
64 
65 static void sifive_pdma_run(SiFivePDMAState *s, int ch)
66 {
67     uint64_t bytes = s->chan[ch].next_bytes;
68     uint64_t dst = s->chan[ch].next_dst;
69     uint64_t src = s->chan[ch].next_src;
70     uint32_t config = s->chan[ch].next_config;
71     int wsize, rsize, size;
72     uint8_t buf[64];
73     int n;
74 
75     /* do nothing if bytes to transfer is zero */
76     if (!bytes) {
77         goto error;
78     }
79 
80     /*
81      * The manual does not describe how the hardware behaviors when
82      * config.wsize and config.rsize are given different values.
83      * A common case is memory to memory DMA, and in this case they
84      * are normally the same. Abort if this expectation fails.
85      */
86     wsize = (config >> CONFIG_WRSZ_SHIFT) & CONFIG_SZ_MASK;
87     rsize = (config >> CONFIG_RDSZ_SHIFT) & CONFIG_SZ_MASK;
88     if (wsize != rsize) {
89         goto error;
90     }
91 
92     /*
93      * Calculate the transaction size
94      *
95      * size field is base 2 logarithm of DMA transaction size,
96      * but there is an upper limit of 64 bytes per transaction.
97      */
98     size = wsize;
99     if (size > 6) {
100         size = 6;
101     }
102     size = 1 << size;
103 
104     /* the bytes to transfer should be multiple of transaction size */
105     if (bytes % size) {
106         goto error;
107     }
108 
109     /* indicate a DMA transfer is started */
110     s->chan[ch].state = DMA_CHAN_STATE_STARTED;
111     s->chan[ch].control &= ~CONTROL_DONE;
112     s->chan[ch].control &= ~CONTROL_ERR;
113 
114     /* load the next_ registers into their exec_ counterparts */
115     s->chan[ch].exec_config = config;
116     s->chan[ch].exec_bytes = bytes;
117     s->chan[ch].exec_dst = dst;
118     s->chan[ch].exec_src = src;
119 
120     for (n = 0; n < bytes / size; n++) {
121         cpu_physical_memory_read(s->chan[ch].exec_src, buf, size);
122         cpu_physical_memory_write(s->chan[ch].exec_dst, buf, size);
123         s->chan[ch].exec_src += size;
124         s->chan[ch].exec_dst += size;
125         s->chan[ch].exec_bytes -= size;
126     }
127 
128     /* indicate a DMA transfer is done */
129     s->chan[ch].state = DMA_CHAN_STATE_DONE;
130     s->chan[ch].control &= ~CONTROL_RUN;
131     s->chan[ch].control |= CONTROL_DONE;
132 
133     /* reload exec_ registers if repeat is required */
134     if (s->chan[ch].next_config & CONFIG_REPEAT) {
135         s->chan[ch].exec_bytes = bytes;
136         s->chan[ch].exec_dst = dst;
137         s->chan[ch].exec_src = src;
138     }
139 
140     return;
141 
142 error:
143     s->chan[ch].state = DMA_CHAN_STATE_ERROR;
144     s->chan[ch].control |= CONTROL_ERR;
145     return;
146 }
147 
148 static inline void sifive_pdma_update_irq(SiFivePDMAState *s, int ch)
149 {
150     bool done_ie, err_ie;
151 
152     done_ie = !!(s->chan[ch].control & CONTROL_DONE_IE);
153     err_ie = !!(s->chan[ch].control & CONTROL_ERR_IE);
154 
155     if (done_ie && (s->chan[ch].control & CONTROL_DONE)) {
156         qemu_irq_raise(s->irq[ch * 2]);
157     } else {
158         qemu_irq_lower(s->irq[ch * 2]);
159     }
160 
161     if (err_ie && (s->chan[ch].control & CONTROL_ERR)) {
162         qemu_irq_raise(s->irq[ch * 2 + 1]);
163     } else {
164         qemu_irq_lower(s->irq[ch * 2 + 1]);
165     }
166 
167     s->chan[ch].state = DMA_CHAN_STATE_IDLE;
168 }
169 
170 static uint64_t sifive_pdma_read(void *opaque, hwaddr offset, unsigned size)
171 {
172     SiFivePDMAState *s = opaque;
173     int ch = SIFIVE_PDMA_CHAN_NO(offset);
174     uint64_t val = 0;
175 
176     if (ch >= SIFIVE_PDMA_CHANS) {
177         qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid channel no %d\n",
178                       __func__, ch);
179         return 0;
180     }
181 
182     offset &= 0xfff;
183     switch (offset) {
184     case DMA_CONTROL:
185         val = s->chan[ch].control;
186         break;
187     case DMA_NEXT_CONFIG:
188         val = s->chan[ch].next_config;
189         break;
190     case DMA_NEXT_BYTES:
191         val = s->chan[ch].next_bytes;
192         break;
193     case DMA_NEXT_DST:
194         val = s->chan[ch].next_dst;
195         break;
196     case DMA_NEXT_SRC:
197         val = s->chan[ch].next_src;
198         break;
199     case DMA_EXEC_CONFIG:
200         val = s->chan[ch].exec_config;
201         break;
202     case DMA_EXEC_BYTES:
203         val = s->chan[ch].exec_bytes;
204         break;
205     case DMA_EXEC_DST:
206         val = s->chan[ch].exec_dst;
207         break;
208     case DMA_EXEC_SRC:
209         val = s->chan[ch].exec_src;
210         break;
211     default:
212         qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n",
213                       __func__, offset);
214         break;
215     }
216 
217     return val;
218 }
219 
220 static void sifive_pdma_write(void *opaque, hwaddr offset,
221                               uint64_t value, unsigned size)
222 {
223     SiFivePDMAState *s = opaque;
224     int ch = SIFIVE_PDMA_CHAN_NO(offset);
225 
226     if (ch >= SIFIVE_PDMA_CHANS) {
227         qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid channel no %d\n",
228                       __func__, ch);
229         return;
230     }
231 
232     offset &= 0xfff;
233     switch (offset) {
234     case DMA_CONTROL:
235         s->chan[ch].control = value;
236 
237         if (value & CONTROL_RUN) {
238             sifive_pdma_run(s, ch);
239         }
240 
241         sifive_pdma_update_irq(s, ch);
242         break;
243     case DMA_NEXT_CONFIG:
244         s->chan[ch].next_config = value;
245         break;
246     case DMA_NEXT_BYTES:
247         s->chan[ch].next_bytes = value;
248         break;
249     case DMA_NEXT_DST:
250         s->chan[ch].next_dst = value;
251         break;
252     case DMA_NEXT_SRC:
253         s->chan[ch].next_src = value;
254         break;
255     case DMA_EXEC_CONFIG:
256     case DMA_EXEC_BYTES:
257     case DMA_EXEC_DST:
258     case DMA_EXEC_SRC:
259         /* these are read-only registers */
260         break;
261     default:
262         qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n",
263                       __func__, offset);
264         break;
265     }
266 }
267 
268 static const MemoryRegionOps sifive_pdma_ops = {
269     .read = sifive_pdma_read,
270     .write = sifive_pdma_write,
271     .endianness = DEVICE_LITTLE_ENDIAN,
272     /* there are 32-bit and 64-bit wide registers */
273     .impl = {
274         .min_access_size = 4,
275         .max_access_size = 8,
276     }
277 };
278 
279 static void sifive_pdma_realize(DeviceState *dev, Error **errp)
280 {
281     SiFivePDMAState *s = SIFIVE_PDMA(dev);
282     int i;
283 
284     memory_region_init_io(&s->iomem, OBJECT(dev), &sifive_pdma_ops, s,
285                           TYPE_SIFIVE_PDMA, SIFIVE_PDMA_REG_SIZE);
286     sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem);
287 
288     for (i = 0; i < SIFIVE_PDMA_IRQS; i++) {
289         sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq[i]);
290     }
291 }
292 
293 static void sifive_pdma_class_init(ObjectClass *klass, void *data)
294 {
295     DeviceClass *dc = DEVICE_CLASS(klass);
296 
297     dc->desc = "SiFive Platform DMA controller";
298     dc->realize = sifive_pdma_realize;
299 }
300 
301 static const TypeInfo sifive_pdma_info = {
302     .name          = TYPE_SIFIVE_PDMA,
303     .parent        = TYPE_SYS_BUS_DEVICE,
304     .instance_size = sizeof(SiFivePDMAState),
305     .class_init    = sifive_pdma_class_init,
306 };
307 
308 static void sifive_pdma_register_types(void)
309 {
310     type_register_static(&sifive_pdma_info);
311 }
312 
313 type_init(sifive_pdma_register_types)
314