Lines Matching +full:dma +full:- +full:mem
2 * On-chip DMA controller framework.
21 #include "qemu/error-report.h"
27 memcpy(ch->paddr[0], ch->paddr[1], ch->bytes); in transfer_mem2mem()
28 ch->paddr[0] += ch->bytes; in transfer_mem2mem()
29 ch->paddr[1] += ch->bytes; in transfer_mem2mem()
34 ch->io_fn[1](ch->io_opaque[1], ch->paddr[0], ch->bytes); in transfer_mem2fifo()
35 ch->paddr[0] += ch->bytes; in transfer_mem2fifo()
40 ch->io_fn[0](ch->io_opaque[0], ch->paddr[1], ch->bytes); in transfer_fifo2mem()
41 ch->paddr[1] += ch->bytes; in transfer_fifo2mem()
45 * DMA peripherals forbid this kind of transfers and even when they don't,
51 if (ch->bytes > fifo_size) in transfer_fifo2fifo()
52 fifo_buf = g_realloc(fifo_buf, fifo_size = ch->bytes); in transfer_fifo2fifo()
55 ch->io_fn[0](ch->io_opaque[0], fifo_buf, ch->bytes); in transfer_fifo2fifo()
56 ch->io_fn[1](ch->io_opaque[1], fifo_buf, ch->bytes); in transfer_fifo2fifo()
78 } mem; member
89 struct dma_s *dma = (struct dma_s *) ch->dma; in soc_dma_ch_schedule() local
91 timer_mod(ch->timer, now + delay_bytes / dma->channel_freq); in soc_dma_ch_schedule()
98 ch->running = 1; in soc_dma_ch_run()
99 ch->dma->setup_fn(ch); in soc_dma_ch_run()
100 ch->transfer_fn(ch); in soc_dma_ch_run()
101 ch->running = 0; in soc_dma_ch_run()
103 if (ch->enable) in soc_dma_ch_run()
104 soc_dma_ch_schedule(ch, ch->bytes); in soc_dma_ch_run()
105 ch->bytes = 0; in soc_dma_ch_run()
108 static inline struct memmap_entry_s *soc_dma_lookup(struct dma_s *dma, in soc_dma_lookup() argument
114 lo = dma->memmap; in soc_dma_lookup()
115 hi = dma->memmap_size; in soc_dma_lookup()
129 struct dma_s *dma = (struct dma_s *) ch->dma; in soc_dma_ch_update_type() local
130 struct memmap_entry_s *entry = soc_dma_lookup(dma, ch->vaddr[port]); in soc_dma_ch_update_type()
132 if (entry->type == soc_dma_port_fifo) { in soc_dma_ch_update_type()
133 while (entry < dma->memmap + dma->memmap_size && in soc_dma_ch_update_type()
134 entry->u.fifo.out != port) in soc_dma_ch_update_type()
136 if (entry->addr != ch->vaddr[port] || entry->u.fifo.out != port) in soc_dma_ch_update_type()
139 if (ch->type[port] != soc_dma_access_const) in soc_dma_ch_update_type()
142 ch->io_fn[port] = entry->u.fifo.fn; in soc_dma_ch_update_type()
143 ch->io_opaque[port] = entry->u.fifo.opaque; in soc_dma_ch_update_type()
145 } else if (entry->type == soc_dma_port_mem) { in soc_dma_ch_update_type()
146 if (entry->addr > ch->vaddr[port] || in soc_dma_ch_update_type()
147 entry->addr + entry->u.mem.size <= ch->vaddr[port]) in soc_dma_ch_update_type()
152 if (ch->type[port] != soc_dma_access_const) in soc_dma_ch_update_type()
155 ch->paddr[port] = (uint8_t *) entry->u.mem.base + in soc_dma_ch_update_type()
156 (ch->vaddr[port] - entry->addr); in soc_dma_ch_update_type()
170 ch->update = 0; in soc_dma_ch_update()
171 ch->transfer_fn = ch->dma->transfer_fn; in soc_dma_ch_update()
178 ch->transfer_fn = transfer_mem2mem; in soc_dma_ch_update()
180 ch->transfer_fn = transfer_mem2fifo; in soc_dma_ch_update()
182 ch->transfer_fn = transfer_fifo2mem; in soc_dma_ch_update()
184 ch->transfer_fn = transfer_fifo2fifo; in soc_dma_ch_update()
186 ch->transfer_fn = ch->dma->transfer_fn; in soc_dma_ch_update()
188 ch->update = (dst != soc_dma_port_other); in soc_dma_ch_update()
193 if (s->enabled_count) in soc_dma_ch_freq_update()
195 s->channel_freq = s->soc.freq / s->enabled_count; in soc_dma_ch_freq_update()
199 * auto-idle is enabled in the clock controller and if we are stopping in soc_dma_ch_freq_update()
201 * user keeping them on and auto-idle enabled. */ in soc_dma_ch_freq_update()
207 struct dma_s *dma = (struct dma_s *) ch->dma; in soc_dma_set_request() local
209 dma->enabled_count += level - ch->enable; in soc_dma_set_request()
212 dma->ch_enable_mask |= (uint64_t)1 << ch->num; in soc_dma_set_request()
214 dma->ch_enable_mask &= ~((uint64_t)1 << ch->num); in soc_dma_set_request()
216 if (level != ch->enable) { in soc_dma_set_request()
217 soc_dma_ch_freq_update(dma); in soc_dma_set_request()
218 ch->enable = level; in soc_dma_set_request()
220 if (!ch->enable) in soc_dma_set_request()
221 timer_del(ch->timer); in soc_dma_set_request()
222 else if (!ch->running) in soc_dma_set_request()
233 s->soc.drqbmp = 0; in soc_dma_reset()
234 s->ch_enable_mask = 0; in soc_dma_reset()
235 s->enabled_count = 0; in soc_dma_reset()
239 /* TODO: take a functional-clock argument */
243 struct dma_s *s = g_malloc0(sizeof(*s) + n * sizeof(*s->ch)); in soc_dma_init()
245 s->chnum = n; in soc_dma_init()
246 s->soc.ch = s->ch; in soc_dma_init()
248 s->ch[i].dma = &s->soc; in soc_dma_init()
249 s->ch[i].num = i; in soc_dma_init()
250 s->ch[i].timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, soc_dma_ch_run, &s->ch[i]); in soc_dma_init()
253 soc_dma_reset(&s->soc); in soc_dma_init()
256 return &s->soc; in soc_dma_init()
263 struct dma_s *dma = (struct dma_s *) soc; in soc_dma_port_add_fifo() local
265 dma->memmap = g_realloc(dma->memmap, sizeof(*entry) * in soc_dma_port_add_fifo()
266 (dma->memmap_size + 1)); in soc_dma_port_add_fifo()
267 entry = soc_dma_lookup(dma, virt_base); in soc_dma_port_add_fifo()
269 if (dma->memmap_size) { in soc_dma_port_add_fifo()
270 if (entry->type == soc_dma_port_mem) { in soc_dma_port_add_fifo()
271 if (entry->addr <= virt_base && in soc_dma_port_add_fifo()
272 entry->addr + entry->u.mem.size > virt_base) { in soc_dma_port_add_fifo()
275 "-%"PRIx64, __func__, in soc_dma_port_add_fifo()
276 virt_base, entry->addr, in soc_dma_port_add_fifo()
277 (entry->addr + entry->u.mem.size)); in soc_dma_port_add_fifo()
278 exit(-1); in soc_dma_port_add_fifo()
281 if (entry->addr <= virt_base) in soc_dma_port_add_fifo()
284 while (entry < dma->memmap + dma->memmap_size && in soc_dma_port_add_fifo()
285 entry->addr <= virt_base) { in soc_dma_port_add_fifo()
286 if (entry->addr == virt_base && entry->u.fifo.out == out) { in soc_dma_port_add_fifo()
289 __func__, virt_base, entry->addr); in soc_dma_port_add_fifo()
290 exit(-1); in soc_dma_port_add_fifo()
297 (uint8_t *) (dma->memmap + dma->memmap_size ++) - in soc_dma_port_add_fifo()
300 dma->memmap_size ++; in soc_dma_port_add_fifo()
302 entry->addr = virt_base; in soc_dma_port_add_fifo()
303 entry->type = soc_dma_port_fifo; in soc_dma_port_add_fifo()
304 entry->u.fifo.fn = fn; in soc_dma_port_add_fifo()
305 entry->u.fifo.opaque = opaque; in soc_dma_port_add_fifo()
306 entry->u.fifo.out = out; in soc_dma_port_add_fifo()
313 struct dma_s *dma = (struct dma_s *) soc; in soc_dma_port_add_mem() local
315 dma->memmap = g_realloc(dma->memmap, sizeof(*entry) * in soc_dma_port_add_mem()
316 (dma->memmap_size + 1)); in soc_dma_port_add_mem()
317 entry = soc_dma_lookup(dma, virt_base); in soc_dma_port_add_mem()
319 if (dma->memmap_size) { in soc_dma_port_add_mem()
320 if (entry->type == soc_dma_port_mem) { in soc_dma_port_add_mem()
321 if ((entry->addr >= virt_base && entry->addr < virt_base + size) || in soc_dma_port_add_mem()
322 (entry->addr <= virt_base && in soc_dma_port_add_mem()
323 entry->addr + entry->u.mem.size > virt_base)) { in soc_dma_port_add_mem()
324 error_report("%s: RAM at %"PRIx64 "-%"PRIx64 in soc_dma_port_add_mem()
326 "-%"PRIx64, __func__, in soc_dma_port_add_mem()
328 entry->addr, entry->addr + entry->u.mem.size); in soc_dma_port_add_mem()
329 exit(-1); in soc_dma_port_add_mem()
332 if (entry->addr <= virt_base) in soc_dma_port_add_mem()
335 if (entry->addr >= virt_base && in soc_dma_port_add_mem()
336 entry->addr < virt_base + size) { in soc_dma_port_add_mem()
337 error_report("%s: RAM at %"PRIx64 "-%"PRIx64 in soc_dma_port_add_mem()
340 entry->addr); in soc_dma_port_add_mem()
341 exit(-1); in soc_dma_port_add_mem()
344 while (entry < dma->memmap + dma->memmap_size && in soc_dma_port_add_mem()
345 entry->addr <= virt_base) in soc_dma_port_add_mem()
350 (uint8_t *) (dma->memmap + dma->memmap_size ++) - in soc_dma_port_add_mem()
353 dma->memmap_size ++; in soc_dma_port_add_mem()
355 entry->addr = virt_base; in soc_dma_port_add_mem()
356 entry->type = soc_dma_port_mem; in soc_dma_port_add_mem()
357 entry->u.mem.base = phys_base; in soc_dma_port_add_mem()
358 entry->u.mem.size = size; in soc_dma_port_add_mem()