xref: /openbmc/qemu/hw/dma/omap_dma.c (revision ebe15582)
1 /*
2  * TI OMAP DMA gigacell.
3  *
4  * Copyright (C) 2006-2008 Andrzej Zaborowski  <balrog@zabor.org>
5  * Copyright (C) 2007-2008 Lauro Ramos Venancio  <lauro.venancio@indt.org.br>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License as
9  * published by the Free Software Foundation; either version 2 of
10  * the License, or (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License along
18  * with this program; if not, see <http://www.gnu.org/licenses/>.
19  */
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "qemu/timer.h"
23 #include "hw/arm/omap.h"
24 #include "hw/irq.h"
25 #include "hw/arm/soc_dma.h"
26 
27 struct omap_dma_channel_s {
28     /* transfer data */
29     int burst[2];
30     int pack[2];
31     int endian[2];
32     int endian_lock[2];
33     int translate[2];
34     enum omap_dma_port port[2];
35     hwaddr addr[2];
36     omap_dma_addressing_t mode[2];
37     uint32_t elements;
38     uint16_t frames;
39     int32_t frame_index[2];
40     int16_t element_index[2];
41     int data_type;
42 
43     /* transfer type */
44     int transparent_copy;
45     int constant_fill;
46     uint32_t color;
47     int prefetch;
48 
49     /* auto init and linked channel data */
50     int end_prog;
51     int repeat;
52     int auto_init;
53     int link_enabled;
54     int link_next_ch;
55 
56     /* interruption data */
57     int interrupts;
58     int status;
59     int cstatus;
60 
61     /* state data */
62     int active;
63     int enable;
64     int sync;
65     int src_sync;
66     int pending_request;
67     int waiting_end_prog;
68     uint16_t cpc;
69     int set_update;
70 
71     /* sync type */
72     int fs;
73     int bs;
74 
75     /* compatibility */
76     int omap_3_1_compatible_disable;
77 
78     qemu_irq irq;
79     struct omap_dma_channel_s *sibling;
80 
81     struct omap_dma_reg_set_s {
82         hwaddr src, dest;
83         int frame;
84         int element;
85         int pck_element;
86         int frame_delta[2];
87         int elem_delta[2];
88         int frames;
89         int elements;
90         int pck_elements;
91     } active_set;
92 
93     struct soc_dma_ch_s *dma;
94 
95     /* unused parameters */
96     int write_mode;
97     int priority;
98     int interleave_disabled;
99     int type;
100     int suspend;
101     int buf_disable;
102 };
103 
104 struct omap_dma_s {
105     struct soc_dma_s *dma;
106     MemoryRegion iomem;
107 
108     struct omap_mpu_state_s *mpu;
109     omap_clk clk;
110     qemu_irq irq[4];
111     void (*intr_update)(struct omap_dma_s *s);
112     enum omap_dma_model model;
113     int omap_3_1_mapping_disabled;
114 
115     uint32_t gcr;
116     uint32_t ocp;
117     uint32_t caps[5];
118     uint32_t irqen[4];
119     uint32_t irqstat[4];
120 
121     int chans;
122     struct omap_dma_channel_s ch[32];
123     struct omap_dma_lcd_channel_s lcd_ch;
124 };
125 
126 /* Interrupts */
127 #define TIMEOUT_INTR    (1 << 0)
128 #define EVENT_DROP_INTR (1 << 1)
129 #define HALF_FRAME_INTR (1 << 2)
130 #define END_FRAME_INTR  (1 << 3)
131 #define LAST_FRAME_INTR (1 << 4)
132 #define END_BLOCK_INTR  (1 << 5)
133 #define SYNC            (1 << 6)
134 #define END_PKT_INTR	(1 << 7)
135 #define TRANS_ERR_INTR	(1 << 8)
136 #define MISALIGN_INTR	(1 << 11)
137 
138 static inline void omap_dma_interrupts_update(struct omap_dma_s *s)
139 {
140     s->intr_update(s);
141 }
142 
143 static void omap_dma_channel_load(struct omap_dma_channel_s *ch)
144 {
145     struct omap_dma_reg_set_s *a = &ch->active_set;
146     int i, normal;
147     int omap_3_1 = !ch->omap_3_1_compatible_disable;
148 
149     /*
150      * TODO: verify address ranges and alignment
151      * TODO: port endianness
152      */
153 
154     a->src = ch->addr[0];
155     a->dest = ch->addr[1];
156     a->frames = ch->frames;
157     a->elements = ch->elements;
158     a->pck_elements = ch->frame_index[!ch->src_sync];
159     a->frame = 0;
160     a->element = 0;
161     a->pck_element = 0;
162 
163     if (unlikely(!ch->elements || !ch->frames)) {
164         printf("%s: bad DMA request\n", __func__);
165         return;
166     }
167 
168     for (i = 0; i < 2; i ++)
169         switch (ch->mode[i]) {
170         case constant:
171             a->elem_delta[i] = 0;
172             a->frame_delta[i] = 0;
173             break;
174         case post_incremented:
175             a->elem_delta[i] = ch->data_type;
176             a->frame_delta[i] = 0;
177             break;
178         case single_index:
179             a->elem_delta[i] = ch->data_type +
180                     ch->element_index[omap_3_1 ? 0 : i] - 1;
181             a->frame_delta[i] = 0;
182             break;
183         case double_index:
184             a->elem_delta[i] = ch->data_type +
185                     ch->element_index[omap_3_1 ? 0 : i] - 1;
186             a->frame_delta[i] = ch->frame_index[omap_3_1 ? 0 : i] -
187                     ch->element_index[omap_3_1 ? 0 : i];
188             break;
189         default:
190             break;
191         }
192 
193     normal = !ch->transparent_copy && !ch->constant_fill &&
194             /* FIFO is big-endian so either (ch->endian[n] == 1) OR
195              * (ch->endian_lock[n] == 1) mean no endianism conversion.  */
196             (ch->endian[0] | ch->endian_lock[0]) ==
197             (ch->endian[1] | ch->endian_lock[1]);
198     for (i = 0; i < 2; i ++) {
199         /* TODO: for a->frame_delta[i] > 0 still use the fast path, just
200          * limit min_elems in omap_dma_transfer_setup to the nearest frame
201          * end.  */
202         if (!a->elem_delta[i] && normal &&
203                         (a->frames == 1 || !a->frame_delta[i]))
204             ch->dma->type[i] = soc_dma_access_const;
205         else if (a->elem_delta[i] == ch->data_type && normal &&
206                         (a->frames == 1 || !a->frame_delta[i]))
207             ch->dma->type[i] = soc_dma_access_linear;
208         else
209             ch->dma->type[i] = soc_dma_access_other;
210 
211         ch->dma->vaddr[i] = ch->addr[i];
212     }
213     soc_dma_ch_update(ch->dma);
214 }
215 
216 static void omap_dma_activate_channel(struct omap_dma_s *s,
217                 struct omap_dma_channel_s *ch)
218 {
219     if (!ch->active) {
220         if (ch->set_update) {
221             /* It's not clear when the active set is supposed to be
222              * loaded from registers.  We're already loading it when the
223              * channel is enabled, and for some guests this is not enough
224              * but that may be also because of a race condition (no
225              * delays in qemu) in the guest code, which we're just
226              * working around here.  */
227             omap_dma_channel_load(ch);
228             ch->set_update = 0;
229         }
230 
231         ch->active = 1;
232         soc_dma_set_request(ch->dma, 1);
233         if (ch->sync)
234             ch->status |= SYNC;
235     }
236 }
237 
238 static void omap_dma_deactivate_channel(struct omap_dma_s *s,
239                 struct omap_dma_channel_s *ch)
240 {
241     /* Update cpc */
242     ch->cpc = ch->active_set.dest & 0xffff;
243 
244     if (ch->pending_request && !ch->waiting_end_prog && ch->enable) {
245         /* Don't deactivate the channel */
246         ch->pending_request = 0;
247         return;
248     }
249 
250     /* Don't deactive the channel if it is synchronized and the DMA request is
251        active */
252     if (ch->sync && ch->enable && (s->dma->drqbmp & (1ULL << ch->sync)))
253         return;
254 
255     if (ch->active) {
256         ch->active = 0;
257         ch->status &= ~SYNC;
258         soc_dma_set_request(ch->dma, 0);
259     }
260 }
261 
262 static void omap_dma_enable_channel(struct omap_dma_s *s,
263                 struct omap_dma_channel_s *ch)
264 {
265     if (!ch->enable) {
266         ch->enable = 1;
267         ch->waiting_end_prog = 0;
268         omap_dma_channel_load(ch);
269         /* TODO: theoretically if ch->sync && ch->prefetch &&
270          * !s->dma->drqbmp[ch->sync], we should also activate and fetch
271          * from source and then stall until signalled.  */
272         if ((!ch->sync) || (s->dma->drqbmp & (1ULL << ch->sync))) {
273             omap_dma_activate_channel(s, ch);
274         }
275     }
276 }
277 
278 static void omap_dma_disable_channel(struct omap_dma_s *s,
279                 struct omap_dma_channel_s *ch)
280 {
281     if (ch->enable) {
282         ch->enable = 0;
283         /* Discard any pending request */
284         ch->pending_request = 0;
285         omap_dma_deactivate_channel(s, ch);
286     }
287 }
288 
289 static void omap_dma_channel_end_prog(struct omap_dma_s *s,
290                 struct omap_dma_channel_s *ch)
291 {
292     if (ch->waiting_end_prog) {
293         ch->waiting_end_prog = 0;
294         if (!ch->sync || ch->pending_request) {
295             ch->pending_request = 0;
296             omap_dma_activate_channel(s, ch);
297         }
298     }
299 }
300 
301 static void omap_dma_interrupts_3_1_update(struct omap_dma_s *s)
302 {
303     struct omap_dma_channel_s *ch = s->ch;
304 
305     /* First three interrupts are shared between two channels each. */
306     if (ch[0].status | ch[6].status)
307         qemu_irq_raise(ch[0].irq);
308     if (ch[1].status | ch[7].status)
309         qemu_irq_raise(ch[1].irq);
310     if (ch[2].status | ch[8].status)
311         qemu_irq_raise(ch[2].irq);
312     if (ch[3].status)
313         qemu_irq_raise(ch[3].irq);
314     if (ch[4].status)
315         qemu_irq_raise(ch[4].irq);
316     if (ch[5].status)
317         qemu_irq_raise(ch[5].irq);
318 }
319 
320 static void omap_dma_interrupts_3_2_update(struct omap_dma_s *s)
321 {
322     struct omap_dma_channel_s *ch = s->ch;
323     int i;
324 
325     for (i = s->chans; i; ch ++, i --)
326         if (ch->status)
327             qemu_irq_raise(ch->irq);
328 }
329 
330 static void omap_dma_enable_3_1_mapping(struct omap_dma_s *s)
331 {
332     s->omap_3_1_mapping_disabled = 0;
333     s->chans = 9;
334     s->intr_update = omap_dma_interrupts_3_1_update;
335 }
336 
337 static void omap_dma_disable_3_1_mapping(struct omap_dma_s *s)
338 {
339     s->omap_3_1_mapping_disabled = 1;
340     s->chans = 16;
341     s->intr_update = omap_dma_interrupts_3_2_update;
342 }
343 
344 static void omap_dma_process_request(struct omap_dma_s *s, int request)
345 {
346     int channel;
347     int drop_event = 0;
348     struct omap_dma_channel_s *ch = s->ch;
349 
350     for (channel = 0; channel < s->chans; channel ++, ch ++) {
351         if (ch->enable && ch->sync == request) {
352             if (!ch->active)
353                 omap_dma_activate_channel(s, ch);
354             else if (!ch->pending_request)
355                 ch->pending_request = 1;
356             else {
357                 /* Request collision */
358                 /* Second request received while processing other request */
359                 ch->status |= EVENT_DROP_INTR;
360                 drop_event = 1;
361             }
362         }
363     }
364 
365     if (drop_event)
366         omap_dma_interrupts_update(s);
367 }
368 
369 static void omap_dma_transfer_generic(struct soc_dma_ch_s *dma)
370 {
371     uint8_t value[4];
372     struct omap_dma_channel_s *ch = dma->opaque;
373     struct omap_dma_reg_set_s *a = &ch->active_set;
374     int bytes = dma->bytes;
375 #ifdef MULTI_REQ
376     uint16_t status = ch->status;
377 #endif
378 
379     do {
380         /* Transfer a single element */
381         /* FIXME: check the endianness */
382         if (!ch->constant_fill)
383             cpu_physical_memory_read(a->src, value, ch->data_type);
384         else
385             *(uint32_t *) value = ch->color;
386 
387         if (!ch->transparent_copy || *(uint32_t *) value != ch->color)
388             cpu_physical_memory_write(a->dest, value, ch->data_type);
389 
390         a->src += a->elem_delta[0];
391         a->dest += a->elem_delta[1];
392         a->element ++;
393 
394 #ifndef MULTI_REQ
395         if (a->element == a->elements) {
396             /* End of Frame */
397             a->element = 0;
398             a->src += a->frame_delta[0];
399             a->dest += a->frame_delta[1];
400             a->frame ++;
401 
402             /* If the channel is async, update cpc */
403             if (!ch->sync)
404                 ch->cpc = a->dest & 0xffff;
405         }
406     } while ((bytes -= ch->data_type));
407 #else
408         /* If the channel is element synchronized, deactivate it */
409         if (ch->sync && !ch->fs && !ch->bs)
410             omap_dma_deactivate_channel(s, ch);
411 
412         /* If it is the last frame, set the LAST_FRAME interrupt */
413         if (a->element == 1 && a->frame == a->frames - 1)
414             if (ch->interrupts & LAST_FRAME_INTR)
415                 ch->status |= LAST_FRAME_INTR;
416 
417         /* If the half of the frame was reached, set the HALF_FRAME
418            interrupt */
419         if (a->element == (a->elements >> 1))
420             if (ch->interrupts & HALF_FRAME_INTR)
421                 ch->status |= HALF_FRAME_INTR;
422 
423         if (ch->fs && ch->bs) {
424             a->pck_element ++;
425             /* Check if a full packet has beed transferred.  */
426             if (a->pck_element == a->pck_elements) {
427                 a->pck_element = 0;
428 
429                 /* Set the END_PKT interrupt */
430                 if ((ch->interrupts & END_PKT_INTR) && !ch->src_sync)
431                     ch->status |= END_PKT_INTR;
432 
433                 /* If the channel is packet-synchronized, deactivate it */
434                 if (ch->sync)
435                     omap_dma_deactivate_channel(s, ch);
436             }
437         }
438 
439         if (a->element == a->elements) {
440             /* End of Frame */
441             a->element = 0;
442             a->src += a->frame_delta[0];
443             a->dest += a->frame_delta[1];
444             a->frame ++;
445 
446             /* If the channel is frame synchronized, deactivate it */
447             if (ch->sync && ch->fs && !ch->bs)
448                 omap_dma_deactivate_channel(s, ch);
449 
450             /* If the channel is async, update cpc */
451             if (!ch->sync)
452                 ch->cpc = a->dest & 0xffff;
453 
454             /* Set the END_FRAME interrupt */
455             if (ch->interrupts & END_FRAME_INTR)
456                 ch->status |= END_FRAME_INTR;
457 
458             if (a->frame == a->frames) {
459                 /* End of Block */
460                 /* Disable the channel */
461 
462                 if (ch->omap_3_1_compatible_disable) {
463                     omap_dma_disable_channel(s, ch);
464                     if (ch->link_enabled)
465                         omap_dma_enable_channel(s,
466                                         &s->ch[ch->link_next_ch]);
467                 } else {
468                     if (!ch->auto_init)
469                         omap_dma_disable_channel(s, ch);
470                     else if (ch->repeat || ch->end_prog)
471                         omap_dma_channel_load(ch);
472                     else {
473                         ch->waiting_end_prog = 1;
474                         omap_dma_deactivate_channel(s, ch);
475                     }
476                 }
477 
478                 if (ch->interrupts & END_BLOCK_INTR)
479                     ch->status |= END_BLOCK_INTR;
480             }
481         }
482     } while (status == ch->status && ch->active);
483 
484     omap_dma_interrupts_update(s);
485 #endif
486 }
487 
488 enum {
489     omap_dma_intr_element_sync,
490     omap_dma_intr_last_frame,
491     omap_dma_intr_half_frame,
492     omap_dma_intr_frame,
493     omap_dma_intr_frame_sync,
494     omap_dma_intr_packet,
495     omap_dma_intr_packet_sync,
496     omap_dma_intr_block,
497     __omap_dma_intr_last,
498 };
499 
500 static void omap_dma_transfer_setup(struct soc_dma_ch_s *dma)
501 {
502     struct omap_dma_port_if_s *src_p, *dest_p;
503     struct omap_dma_reg_set_s *a;
504     struct omap_dma_channel_s *ch = dma->opaque;
505     struct omap_dma_s *s = dma->dma->opaque;
506     int frames, min_elems, elements[__omap_dma_intr_last];
507 
508     a = &ch->active_set;
509 
510     src_p = &s->mpu->port[ch->port[0]];
511     dest_p = &s->mpu->port[ch->port[1]];
512     if ((!ch->constant_fill && !src_p->addr_valid(s->mpu, a->src)) ||
513                     (!dest_p->addr_valid(s->mpu, a->dest))) {
514 #if 0
515         /* Bus time-out */
516         if (ch->interrupts & TIMEOUT_INTR)
517             ch->status |= TIMEOUT_INTR;
518         omap_dma_deactivate_channel(s, ch);
519         continue;
520 #endif
521         printf("%s: Bus time-out in DMA%i operation\n",
522                         __func__, dma->num);
523     }
524 
525     min_elems = INT_MAX;
526 
527     /* Check all the conditions that terminate the transfer starting
528      * with those that can occur the soonest.  */
529 #define INTR_CHECK(cond, id, nelements)	\
530     if (cond) {			\
531         elements[id] = nelements;	\
532         if (elements[id] < min_elems)	\
533             min_elems = elements[id];	\
534     } else				\
535         elements[id] = INT_MAX;
536 
537     /* Elements */
538     INTR_CHECK(
539                     ch->sync && !ch->fs && !ch->bs,
540                     omap_dma_intr_element_sync,
541                     1)
542 
543     /* Frames */
544     /* TODO: for transfers where entire frames can be read and written
545      * using memcpy() but a->frame_delta is non-zero, try to still do
546      * transfers using soc_dma but limit min_elems to a->elements - ...
547      * See also the TODO in omap_dma_channel_load.  */
548     INTR_CHECK(
549                     (ch->interrupts & LAST_FRAME_INTR) &&
550                     ((a->frame < a->frames - 1) || !a->element),
551                     omap_dma_intr_last_frame,
552                     (a->frames - a->frame - 2) * a->elements +
553                     (a->elements - a->element + 1))
554     INTR_CHECK(
555                     ch->interrupts & HALF_FRAME_INTR,
556                     omap_dma_intr_half_frame,
557                     (a->elements >> 1) +
558                     (a->element >= (a->elements >> 1) ? a->elements : 0) -
559                     a->element)
560     INTR_CHECK(
561                     ch->sync && ch->fs && (ch->interrupts & END_FRAME_INTR),
562                     omap_dma_intr_frame,
563                     a->elements - a->element)
564     INTR_CHECK(
565                     ch->sync && ch->fs && !ch->bs,
566                     omap_dma_intr_frame_sync,
567                     a->elements - a->element)
568 
569     /* Packets */
570     INTR_CHECK(
571                     ch->fs && ch->bs &&
572                     (ch->interrupts & END_PKT_INTR) && !ch->src_sync,
573                     omap_dma_intr_packet,
574                     a->pck_elements - a->pck_element)
575     INTR_CHECK(
576                     ch->fs && ch->bs && ch->sync,
577                     omap_dma_intr_packet_sync,
578                     a->pck_elements - a->pck_element)
579 
580     /* Blocks */
581     INTR_CHECK(
582                     1,
583                     omap_dma_intr_block,
584                     (a->frames - a->frame - 1) * a->elements +
585                     (a->elements - a->element))
586 
587     dma->bytes = min_elems * ch->data_type;
588 
589     /* Set appropriate interrupts and/or deactivate channels */
590 
591 #ifdef MULTI_REQ
592     /* TODO: should all of this only be done if dma->update, and otherwise
593      * inside omap_dma_transfer_generic below - check what's faster.  */
594     if (dma->update) {
595 #endif
596 
597         /* If the channel is element synchronized, deactivate it */
598         if (min_elems == elements[omap_dma_intr_element_sync])
599             omap_dma_deactivate_channel(s, ch);
600 
601         /* If it is the last frame, set the LAST_FRAME interrupt */
602         if (min_elems == elements[omap_dma_intr_last_frame])
603             ch->status |= LAST_FRAME_INTR;
604 
605         /* If exactly half of the frame was reached, set the HALF_FRAME
606            interrupt */
607         if (min_elems == elements[omap_dma_intr_half_frame])
608             ch->status |= HALF_FRAME_INTR;
609 
610         /* If a full packet has been transferred, set the END_PKT interrupt */
611         if (min_elems == elements[omap_dma_intr_packet])
612             ch->status |= END_PKT_INTR;
613 
614         /* If the channel is packet-synchronized, deactivate it */
615         if (min_elems == elements[omap_dma_intr_packet_sync])
616             omap_dma_deactivate_channel(s, ch);
617 
618         /* If the channel is frame synchronized, deactivate it */
619         if (min_elems == elements[omap_dma_intr_frame_sync])
620             omap_dma_deactivate_channel(s, ch);
621 
622         /* Set the END_FRAME interrupt */
623         if (min_elems == elements[omap_dma_intr_frame])
624             ch->status |= END_FRAME_INTR;
625 
626         if (min_elems == elements[omap_dma_intr_block]) {
627             /* End of Block */
628             /* Disable the channel */
629 
630             if (ch->omap_3_1_compatible_disable) {
631                 omap_dma_disable_channel(s, ch);
632                 if (ch->link_enabled)
633                     omap_dma_enable_channel(s, &s->ch[ch->link_next_ch]);
634             } else {
635                 if (!ch->auto_init)
636                     omap_dma_disable_channel(s, ch);
637                 else if (ch->repeat || ch->end_prog)
638                     omap_dma_channel_load(ch);
639                 else {
640                     ch->waiting_end_prog = 1;
641                     omap_dma_deactivate_channel(s, ch);
642                 }
643             }
644 
645             if (ch->interrupts & END_BLOCK_INTR)
646                 ch->status |= END_BLOCK_INTR;
647         }
648 
649         /* Update packet number */
650         if (ch->fs && ch->bs) {
651             a->pck_element += min_elems;
652             a->pck_element %= a->pck_elements;
653         }
654 
655         /* TODO: check if we really need to update anything here or perhaps we
656          * can skip part of this.  */
657 #ifndef MULTI_REQ
658         if (dma->update) {
659 #endif
660             a->element += min_elems;
661 
662             frames = a->element / a->elements;
663             a->element = a->element % a->elements;
664             a->frame += frames;
665             a->src += min_elems * a->elem_delta[0] + frames * a->frame_delta[0];
666             a->dest += min_elems * a->elem_delta[1] + frames * a->frame_delta[1];
667 
668             /* If the channel is async, update cpc */
669             if (!ch->sync && frames)
670                 ch->cpc = a->dest & 0xffff;
671 
672             /* TODO: if the destination port is IMIF or EMIFF, set the dirty
673              * bits on it.  */
674 #ifndef MULTI_REQ
675         }
676 #else
677     }
678 #endif
679 
680     omap_dma_interrupts_update(s);
681 }
682 
683 void omap_dma_reset(struct soc_dma_s *dma)
684 {
685     int i;
686     struct omap_dma_s *s = dma->opaque;
687 
688     soc_dma_reset(s->dma);
689     if (s->model < omap_dma_4)
690         s->gcr = 0x0004;
691     else
692         s->gcr = 0x00010010;
693     s->ocp = 0x00000000;
694     memset(&s->irqstat, 0, sizeof(s->irqstat));
695     memset(&s->irqen, 0, sizeof(s->irqen));
696     s->lcd_ch.src = emiff;
697     s->lcd_ch.condition = 0;
698     s->lcd_ch.interrupts = 0;
699     s->lcd_ch.dual = 0;
700     if (s->model < omap_dma_4)
701         omap_dma_enable_3_1_mapping(s);
702     for (i = 0; i < s->chans; i ++) {
703         s->ch[i].suspend = 0;
704         s->ch[i].prefetch = 0;
705         s->ch[i].buf_disable = 0;
706         s->ch[i].src_sync = 0;
707         memset(&s->ch[i].burst, 0, sizeof(s->ch[i].burst));
708         memset(&s->ch[i].port, 0, sizeof(s->ch[i].port));
709         memset(&s->ch[i].mode, 0, sizeof(s->ch[i].mode));
710         memset(&s->ch[i].frame_index, 0, sizeof(s->ch[i].frame_index));
711         memset(&s->ch[i].element_index, 0, sizeof(s->ch[i].element_index));
712         memset(&s->ch[i].endian, 0, sizeof(s->ch[i].endian));
713         memset(&s->ch[i].endian_lock, 0, sizeof(s->ch[i].endian_lock));
714         memset(&s->ch[i].translate, 0, sizeof(s->ch[i].translate));
715         s->ch[i].write_mode = 0;
716         s->ch[i].data_type = 0;
717         s->ch[i].transparent_copy = 0;
718         s->ch[i].constant_fill = 0;
719         s->ch[i].color = 0x00000000;
720         s->ch[i].end_prog = 0;
721         s->ch[i].repeat = 0;
722         s->ch[i].auto_init = 0;
723         s->ch[i].link_enabled = 0;
724         if (s->model < omap_dma_4)
725             s->ch[i].interrupts = 0x0003;
726         else
727             s->ch[i].interrupts = 0x0000;
728         s->ch[i].status = 0;
729         s->ch[i].cstatus = 0;
730         s->ch[i].active = 0;
731         s->ch[i].enable = 0;
732         s->ch[i].sync = 0;
733         s->ch[i].pending_request = 0;
734         s->ch[i].waiting_end_prog = 0;
735         s->ch[i].cpc = 0x0000;
736         s->ch[i].fs = 0;
737         s->ch[i].bs = 0;
738         s->ch[i].omap_3_1_compatible_disable = 0;
739         memset(&s->ch[i].active_set, 0, sizeof(s->ch[i].active_set));
740         s->ch[i].priority = 0;
741         s->ch[i].interleave_disabled = 0;
742         s->ch[i].type = 0;
743     }
744 }
745 
746 static int omap_dma_ch_reg_read(struct omap_dma_s *s,
747                 struct omap_dma_channel_s *ch, int reg, uint16_t *value)
748 {
749     switch (reg) {
750     case 0x00:	/* SYS_DMA_CSDP_CH0 */
751         *value = (ch->burst[1] << 14) |
752                 (ch->pack[1] << 13) |
753                 (ch->port[1] << 9) |
754                 (ch->burst[0] << 7) |
755                 (ch->pack[0] << 6) |
756                 (ch->port[0] << 2) |
757                 (ch->data_type >> 1);
758         break;
759 
760     case 0x02:	/* SYS_DMA_CCR_CH0 */
761         if (s->model <= omap_dma_3_1)
762             *value = 0 << 10;			/* FIFO_FLUSH reads as 0 */
763         else
764             *value = ch->omap_3_1_compatible_disable << 10;
765         *value |= (ch->mode[1] << 14) |
766                 (ch->mode[0] << 12) |
767                 (ch->end_prog << 11) |
768                 (ch->repeat << 9) |
769                 (ch->auto_init << 8) |
770                 (ch->enable << 7) |
771                 (ch->priority << 6) |
772                 (ch->fs << 5) | ch->sync;
773         break;
774 
775     case 0x04:	/* SYS_DMA_CICR_CH0 */
776         *value = ch->interrupts;
777         break;
778 
779     case 0x06:	/* SYS_DMA_CSR_CH0 */
780         *value = ch->status;
781         ch->status &= SYNC;
782         if (!ch->omap_3_1_compatible_disable && ch->sibling) {
783             *value |= (ch->sibling->status & 0x3f) << 6;
784             ch->sibling->status &= SYNC;
785         }
786         qemu_irq_lower(ch->irq);
787         break;
788 
789     case 0x08:	/* SYS_DMA_CSSA_L_CH0 */
790         *value = ch->addr[0] & 0x0000ffff;
791         break;
792 
793     case 0x0a:	/* SYS_DMA_CSSA_U_CH0 */
794         *value = ch->addr[0] >> 16;
795         break;
796 
797     case 0x0c:	/* SYS_DMA_CDSA_L_CH0 */
798         *value = ch->addr[1] & 0x0000ffff;
799         break;
800 
801     case 0x0e:	/* SYS_DMA_CDSA_U_CH0 */
802         *value = ch->addr[1] >> 16;
803         break;
804 
805     case 0x10:	/* SYS_DMA_CEN_CH0 */
806         *value = ch->elements;
807         break;
808 
809     case 0x12:	/* SYS_DMA_CFN_CH0 */
810         *value = ch->frames;
811         break;
812 
813     case 0x14:	/* SYS_DMA_CFI_CH0 */
814         *value = ch->frame_index[0];
815         break;
816 
817     case 0x16:	/* SYS_DMA_CEI_CH0 */
818         *value = ch->element_index[0];
819         break;
820 
821     case 0x18:	/* SYS_DMA_CPC_CH0 or DMA_CSAC */
822         if (ch->omap_3_1_compatible_disable)
823             *value = ch->active_set.src & 0xffff;	/* CSAC */
824         else
825             *value = ch->cpc;
826         break;
827 
828     case 0x1a:	/* DMA_CDAC */
829         *value = ch->active_set.dest & 0xffff;	/* CDAC */
830         break;
831 
832     case 0x1c:	/* DMA_CDEI */
833         *value = ch->element_index[1];
834         break;
835 
836     case 0x1e:	/* DMA_CDFI */
837         *value = ch->frame_index[1];
838         break;
839 
840     case 0x20:	/* DMA_COLOR_L */
841         *value = ch->color & 0xffff;
842         break;
843 
844     case 0x22:	/* DMA_COLOR_U */
845         *value = ch->color >> 16;
846         break;
847 
848     case 0x24:	/* DMA_CCR2 */
849         *value = (ch->bs << 2) |
850                 (ch->transparent_copy << 1) |
851                 ch->constant_fill;
852         break;
853 
854     case 0x28:	/* DMA_CLNK_CTRL */
855         *value = (ch->link_enabled << 15) |
856                 (ch->link_next_ch & 0xf);
857         break;
858 
859     case 0x2a:	/* DMA_LCH_CTRL */
860         *value = (ch->interleave_disabled << 15) |
861                 ch->type;
862         break;
863 
864     default:
865         return 1;
866     }
867     return 0;
868 }
869 
870 static int omap_dma_ch_reg_write(struct omap_dma_s *s,
871                 struct omap_dma_channel_s *ch, int reg, uint16_t value)
872 {
873     switch (reg) {
874     case 0x00:	/* SYS_DMA_CSDP_CH0 */
875         ch->burst[1] = (value & 0xc000) >> 14;
876         ch->pack[1] = (value & 0x2000) >> 13;
877         ch->port[1] = (enum omap_dma_port) ((value & 0x1e00) >> 9);
878         ch->burst[0] = (value & 0x0180) >> 7;
879         ch->pack[0] = (value & 0x0040) >> 6;
880         ch->port[0] = (enum omap_dma_port) ((value & 0x003c) >> 2);
881         if (ch->port[0] >= __omap_dma_port_last) {
882             qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid DMA port %i\n",
883                           __func__, ch->port[0]);
884         }
885         if (ch->port[1] >= __omap_dma_port_last) {
886             qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid DMA port %i\n",
887                           __func__, ch->port[1]);
888         }
889         ch->data_type = 1 << (value & 3);
890         if ((value & 3) == 3) {
891             qemu_log_mask(LOG_GUEST_ERROR,
892                           "%s: bad data_type for DMA channel\n", __func__);
893             ch->data_type >>= 1;
894         }
895         break;
896 
897     case 0x02:	/* SYS_DMA_CCR_CH0 */
898         ch->mode[1] = (omap_dma_addressing_t) ((value & 0xc000) >> 14);
899         ch->mode[0] = (omap_dma_addressing_t) ((value & 0x3000) >> 12);
900         ch->end_prog = (value & 0x0800) >> 11;
901         if (s->model >= omap_dma_3_2)
902             ch->omap_3_1_compatible_disable  = (value >> 10) & 0x1;
903         ch->repeat = (value & 0x0200) >> 9;
904         ch->auto_init = (value & 0x0100) >> 8;
905         ch->priority = (value & 0x0040) >> 6;
906         ch->fs = (value & 0x0020) >> 5;
907         ch->sync = value & 0x001f;
908 
909         if (value & 0x0080)
910             omap_dma_enable_channel(s, ch);
911         else
912             omap_dma_disable_channel(s, ch);
913 
914         if (ch->end_prog)
915             omap_dma_channel_end_prog(s, ch);
916 
917         break;
918 
919     case 0x04:	/* SYS_DMA_CICR_CH0 */
920         ch->interrupts = value & 0x3f;
921         break;
922 
923     case 0x06:	/* SYS_DMA_CSR_CH0 */
924         OMAP_RO_REG((hwaddr) reg);
925         break;
926 
927     case 0x08:	/* SYS_DMA_CSSA_L_CH0 */
928         ch->addr[0] &= 0xffff0000;
929         ch->addr[0] |= value;
930         break;
931 
932     case 0x0a:	/* SYS_DMA_CSSA_U_CH0 */
933         ch->addr[0] &= 0x0000ffff;
934         ch->addr[0] |= (uint32_t) value << 16;
935         break;
936 
937     case 0x0c:	/* SYS_DMA_CDSA_L_CH0 */
938         ch->addr[1] &= 0xffff0000;
939         ch->addr[1] |= value;
940         break;
941 
942     case 0x0e:	/* SYS_DMA_CDSA_U_CH0 */
943         ch->addr[1] &= 0x0000ffff;
944         ch->addr[1] |= (uint32_t) value << 16;
945         break;
946 
947     case 0x10:	/* SYS_DMA_CEN_CH0 */
948         ch->elements = value;
949         break;
950 
951     case 0x12:	/* SYS_DMA_CFN_CH0 */
952         ch->frames = value;
953         break;
954 
955     case 0x14:	/* SYS_DMA_CFI_CH0 */
956         ch->frame_index[0] = (int16_t) value;
957         break;
958 
959     case 0x16:	/* SYS_DMA_CEI_CH0 */
960         ch->element_index[0] = (int16_t) value;
961         break;
962 
963     case 0x18:	/* SYS_DMA_CPC_CH0 or DMA_CSAC */
964         OMAP_RO_REG((hwaddr) reg);
965         break;
966 
967     case 0x1c:	/* DMA_CDEI */
968         ch->element_index[1] = (int16_t) value;
969         break;
970 
971     case 0x1e:	/* DMA_CDFI */
972         ch->frame_index[1] = (int16_t) value;
973         break;
974 
975     case 0x20:	/* DMA_COLOR_L */
976         ch->color &= 0xffff0000;
977         ch->color |= value;
978         break;
979 
980     case 0x22:	/* DMA_COLOR_U */
981         ch->color &= 0xffff;
982         ch->color |= (uint32_t)value << 16;
983         break;
984 
985     case 0x24:	/* DMA_CCR2 */
986         ch->bs = (value >> 2) & 0x1;
987         ch->transparent_copy = (value >> 1) & 0x1;
988         ch->constant_fill = value & 0x1;
989         break;
990 
991     case 0x28:	/* DMA_CLNK_CTRL */
992         ch->link_enabled = (value >> 15) & 0x1;
993         if (value & (1 << 14)) {			/* Stop_Lnk */
994             ch->link_enabled = 0;
995             omap_dma_disable_channel(s, ch);
996         }
997         ch->link_next_ch = value & 0x1f;
998         break;
999 
1000     case 0x2a:	/* DMA_LCH_CTRL */
1001         ch->interleave_disabled = (value >> 15) & 0x1;
1002         ch->type = value & 0xf;
1003         break;
1004 
1005     default:
1006         return 1;
1007     }
1008     return 0;
1009 }
1010 
1011 static int omap_dma_3_2_lcd_write(struct omap_dma_lcd_channel_s *s, int offset,
1012                 uint16_t value)
1013 {
1014     switch (offset) {
1015     case 0xbc0:	/* DMA_LCD_CSDP */
1016         s->brust_f2 = (value >> 14) & 0x3;
1017         s->pack_f2 = (value >> 13) & 0x1;
1018         s->data_type_f2 = (1 << ((value >> 11) & 0x3));
1019         s->brust_f1 = (value >> 7) & 0x3;
1020         s->pack_f1 = (value >> 6) & 0x1;
1021         s->data_type_f1 = (1 << ((value >> 0) & 0x3));
1022         break;
1023 
1024     case 0xbc2:	/* DMA_LCD_CCR */
1025         s->mode_f2 = (value >> 14) & 0x3;
1026         s->mode_f1 = (value >> 12) & 0x3;
1027         s->end_prog = (value >> 11) & 0x1;
1028         s->omap_3_1_compatible_disable = (value >> 10) & 0x1;
1029         s->repeat = (value >> 9) & 0x1;
1030         s->auto_init = (value >> 8) & 0x1;
1031         s->running = (value >> 7) & 0x1;
1032         s->priority = (value >> 6) & 0x1;
1033         s->bs = (value >> 4) & 0x1;
1034         break;
1035 
1036     case 0xbc4:	/* DMA_LCD_CTRL */
1037         s->dst = (value >> 8) & 0x1;
1038         s->src = ((value >> 6) & 0x3) << 1;
1039         s->condition = 0;
1040         /* Assume no bus errors and thus no BUS_ERROR irq bits.  */
1041         s->interrupts = (value >> 1) & 1;
1042         s->dual = value & 1;
1043         break;
1044 
1045     case 0xbc8:	/* TOP_B1_L */
1046         s->src_f1_top &= 0xffff0000;
1047         s->src_f1_top |= 0x0000ffff & value;
1048         break;
1049 
1050     case 0xbca:	/* TOP_B1_U */
1051         s->src_f1_top &= 0x0000ffff;
1052         s->src_f1_top |= (uint32_t)value << 16;
1053         break;
1054 
1055     case 0xbcc:	/* BOT_B1_L */
1056         s->src_f1_bottom &= 0xffff0000;
1057         s->src_f1_bottom |= 0x0000ffff & value;
1058         break;
1059 
1060     case 0xbce:	/* BOT_B1_U */
1061         s->src_f1_bottom &= 0x0000ffff;
1062         s->src_f1_bottom |= (uint32_t) value << 16;
1063         break;
1064 
1065     case 0xbd0:	/* TOP_B2_L */
1066         s->src_f2_top &= 0xffff0000;
1067         s->src_f2_top |= 0x0000ffff & value;
1068         break;
1069 
1070     case 0xbd2:	/* TOP_B2_U */
1071         s->src_f2_top &= 0x0000ffff;
1072         s->src_f2_top |= (uint32_t) value << 16;
1073         break;
1074 
1075     case 0xbd4:	/* BOT_B2_L */
1076         s->src_f2_bottom &= 0xffff0000;
1077         s->src_f2_bottom |= 0x0000ffff & value;
1078         break;
1079 
1080     case 0xbd6:	/* BOT_B2_U */
1081         s->src_f2_bottom &= 0x0000ffff;
1082         s->src_f2_bottom |= (uint32_t) value << 16;
1083         break;
1084 
1085     case 0xbd8:	/* DMA_LCD_SRC_EI_B1 */
1086         s->element_index_f1 = value;
1087         break;
1088 
1089     case 0xbda:	/* DMA_LCD_SRC_FI_B1_L */
1090         s->frame_index_f1 &= 0xffff0000;
1091         s->frame_index_f1 |= 0x0000ffff & value;
1092         break;
1093 
1094     case 0xbf4:	/* DMA_LCD_SRC_FI_B1_U */
1095         s->frame_index_f1 &= 0x0000ffff;
1096         s->frame_index_f1 |= (uint32_t) value << 16;
1097         break;
1098 
1099     case 0xbdc:	/* DMA_LCD_SRC_EI_B2 */
1100         s->element_index_f2 = value;
1101         break;
1102 
1103     case 0xbde:	/* DMA_LCD_SRC_FI_B2_L */
1104         s->frame_index_f2 &= 0xffff0000;
1105         s->frame_index_f2 |= 0x0000ffff & value;
1106         break;
1107 
1108     case 0xbf6:	/* DMA_LCD_SRC_FI_B2_U */
1109         s->frame_index_f2 &= 0x0000ffff;
1110         s->frame_index_f2 |= (uint32_t) value << 16;
1111         break;
1112 
1113     case 0xbe0:	/* DMA_LCD_SRC_EN_B1 */
1114         s->elements_f1 = value;
1115         break;
1116 
1117     case 0xbe4:	/* DMA_LCD_SRC_FN_B1 */
1118         s->frames_f1 = value;
1119         break;
1120 
1121     case 0xbe2:	/* DMA_LCD_SRC_EN_B2 */
1122         s->elements_f2 = value;
1123         break;
1124 
1125     case 0xbe6:	/* DMA_LCD_SRC_FN_B2 */
1126         s->frames_f2 = value;
1127         break;
1128 
1129     case 0xbea:	/* DMA_LCD_LCH_CTRL */
1130         s->lch_type = value & 0xf;
1131         break;
1132 
1133     default:
1134         return 1;
1135     }
1136     return 0;
1137 }
1138 
1139 static int omap_dma_3_2_lcd_read(struct omap_dma_lcd_channel_s *s, int offset,
1140                 uint16_t *ret)
1141 {
1142     switch (offset) {
1143     case 0xbc0:	/* DMA_LCD_CSDP */
1144         *ret = (s->brust_f2 << 14) |
1145             (s->pack_f2 << 13) |
1146             ((s->data_type_f2 >> 1) << 11) |
1147             (s->brust_f1 << 7) |
1148             (s->pack_f1 << 6) |
1149             ((s->data_type_f1 >> 1) << 0);
1150         break;
1151 
1152     case 0xbc2:	/* DMA_LCD_CCR */
1153         *ret = (s->mode_f2 << 14) |
1154             (s->mode_f1 << 12) |
1155             (s->end_prog << 11) |
1156             (s->omap_3_1_compatible_disable << 10) |
1157             (s->repeat << 9) |
1158             (s->auto_init << 8) |
1159             (s->running << 7) |
1160             (s->priority << 6) |
1161             (s->bs << 4);
1162         break;
1163 
1164     case 0xbc4:	/* DMA_LCD_CTRL */
1165         qemu_irq_lower(s->irq);
1166         *ret = (s->dst << 8) |
1167             ((s->src & 0x6) << 5) |
1168             (s->condition << 3) |
1169             (s->interrupts << 1) |
1170             s->dual;
1171         break;
1172 
1173     case 0xbc8:	/* TOP_B1_L */
1174         *ret = s->src_f1_top & 0xffff;
1175         break;
1176 
1177     case 0xbca:	/* TOP_B1_U */
1178         *ret = s->src_f1_top >> 16;
1179         break;
1180 
1181     case 0xbcc:	/* BOT_B1_L */
1182         *ret = s->src_f1_bottom & 0xffff;
1183         break;
1184 
1185     case 0xbce:	/* BOT_B1_U */
1186         *ret = s->src_f1_bottom >> 16;
1187         break;
1188 
1189     case 0xbd0:	/* TOP_B2_L */
1190         *ret = s->src_f2_top & 0xffff;
1191         break;
1192 
1193     case 0xbd2:	/* TOP_B2_U */
1194         *ret = s->src_f2_top >> 16;
1195         break;
1196 
1197     case 0xbd4:	/* BOT_B2_L */
1198         *ret = s->src_f2_bottom & 0xffff;
1199         break;
1200 
1201     case 0xbd6:	/* BOT_B2_U */
1202         *ret = s->src_f2_bottom >> 16;
1203         break;
1204 
1205     case 0xbd8:	/* DMA_LCD_SRC_EI_B1 */
1206         *ret = s->element_index_f1;
1207         break;
1208 
1209     case 0xbda:	/* DMA_LCD_SRC_FI_B1_L */
1210         *ret = s->frame_index_f1 & 0xffff;
1211         break;
1212 
1213     case 0xbf4:	/* DMA_LCD_SRC_FI_B1_U */
1214         *ret = s->frame_index_f1 >> 16;
1215         break;
1216 
1217     case 0xbdc:	/* DMA_LCD_SRC_EI_B2 */
1218         *ret = s->element_index_f2;
1219         break;
1220 
1221     case 0xbde:	/* DMA_LCD_SRC_FI_B2_L */
1222         *ret = s->frame_index_f2 & 0xffff;
1223         break;
1224 
1225     case 0xbf6:	/* DMA_LCD_SRC_FI_B2_U */
1226         *ret = s->frame_index_f2 >> 16;
1227         break;
1228 
1229     case 0xbe0:	/* DMA_LCD_SRC_EN_B1 */
1230         *ret = s->elements_f1;
1231         break;
1232 
1233     case 0xbe4:	/* DMA_LCD_SRC_FN_B1 */
1234         *ret = s->frames_f1;
1235         break;
1236 
1237     case 0xbe2:	/* DMA_LCD_SRC_EN_B2 */
1238         *ret = s->elements_f2;
1239         break;
1240 
1241     case 0xbe6:	/* DMA_LCD_SRC_FN_B2 */
1242         *ret = s->frames_f2;
1243         break;
1244 
1245     case 0xbea:	/* DMA_LCD_LCH_CTRL */
1246         *ret = s->lch_type;
1247         break;
1248 
1249     default:
1250         return 1;
1251     }
1252     return 0;
1253 }
1254 
1255 static int omap_dma_3_1_lcd_write(struct omap_dma_lcd_channel_s *s, int offset,
1256                 uint16_t value)
1257 {
1258     switch (offset) {
1259     case 0x300:	/* SYS_DMA_LCD_CTRL */
1260         s->src = (value & 0x40) ? imif : emiff;
1261         s->condition = 0;
1262         /* Assume no bus errors and thus no BUS_ERROR irq bits.  */
1263         s->interrupts = (value >> 1) & 1;
1264         s->dual = value & 1;
1265         break;
1266 
1267     case 0x302:	/* SYS_DMA_LCD_TOP_F1_L */
1268         s->src_f1_top &= 0xffff0000;
1269         s->src_f1_top |= 0x0000ffff & value;
1270         break;
1271 
1272     case 0x304:	/* SYS_DMA_LCD_TOP_F1_U */
1273         s->src_f1_top &= 0x0000ffff;
1274         s->src_f1_top |= (uint32_t)value << 16;
1275         break;
1276 
1277     case 0x306:	/* SYS_DMA_LCD_BOT_F1_L */
1278         s->src_f1_bottom &= 0xffff0000;
1279         s->src_f1_bottom |= 0x0000ffff & value;
1280         break;
1281 
1282     case 0x308:	/* SYS_DMA_LCD_BOT_F1_U */
1283         s->src_f1_bottom &= 0x0000ffff;
1284         s->src_f1_bottom |= (uint32_t)value << 16;
1285         break;
1286 
1287     case 0x30a:	/* SYS_DMA_LCD_TOP_F2_L */
1288         s->src_f2_top &= 0xffff0000;
1289         s->src_f2_top |= 0x0000ffff & value;
1290         break;
1291 
1292     case 0x30c:	/* SYS_DMA_LCD_TOP_F2_U */
1293         s->src_f2_top &= 0x0000ffff;
1294         s->src_f2_top |= (uint32_t)value << 16;
1295         break;
1296 
1297     case 0x30e:	/* SYS_DMA_LCD_BOT_F2_L */
1298         s->src_f2_bottom &= 0xffff0000;
1299         s->src_f2_bottom |= 0x0000ffff & value;
1300         break;
1301 
1302     case 0x310:	/* SYS_DMA_LCD_BOT_F2_U */
1303         s->src_f2_bottom &= 0x0000ffff;
1304         s->src_f2_bottom |= (uint32_t)value << 16;
1305         break;
1306 
1307     default:
1308         return 1;
1309     }
1310     return 0;
1311 }
1312 
1313 static int omap_dma_3_1_lcd_read(struct omap_dma_lcd_channel_s *s, int offset,
1314                 uint16_t *ret)
1315 {
1316     int i;
1317 
1318     switch (offset) {
1319     case 0x300:	/* SYS_DMA_LCD_CTRL */
1320         i = s->condition;
1321         s->condition = 0;
1322         qemu_irq_lower(s->irq);
1323         *ret = ((s->src == imif) << 6) | (i << 3) |
1324                 (s->interrupts << 1) | s->dual;
1325         break;
1326 
1327     case 0x302:	/* SYS_DMA_LCD_TOP_F1_L */
1328         *ret = s->src_f1_top & 0xffff;
1329         break;
1330 
1331     case 0x304:	/* SYS_DMA_LCD_TOP_F1_U */
1332         *ret = s->src_f1_top >> 16;
1333         break;
1334 
1335     case 0x306:	/* SYS_DMA_LCD_BOT_F1_L */
1336         *ret = s->src_f1_bottom & 0xffff;
1337         break;
1338 
1339     case 0x308:	/* SYS_DMA_LCD_BOT_F1_U */
1340         *ret = s->src_f1_bottom >> 16;
1341         break;
1342 
1343     case 0x30a:	/* SYS_DMA_LCD_TOP_F2_L */
1344         *ret = s->src_f2_top & 0xffff;
1345         break;
1346 
1347     case 0x30c:	/* SYS_DMA_LCD_TOP_F2_U */
1348         *ret = s->src_f2_top >> 16;
1349         break;
1350 
1351     case 0x30e:	/* SYS_DMA_LCD_BOT_F2_L */
1352         *ret = s->src_f2_bottom & 0xffff;
1353         break;
1354 
1355     case 0x310:	/* SYS_DMA_LCD_BOT_F2_U */
1356         *ret = s->src_f2_bottom >> 16;
1357         break;
1358 
1359     default:
1360         return 1;
1361     }
1362     return 0;
1363 }
1364 
1365 static int omap_dma_sys_write(struct omap_dma_s *s, int offset, uint16_t value)
1366 {
1367     switch (offset) {
1368     case 0x400:	/* SYS_DMA_GCR */
1369         s->gcr = value;
1370         break;
1371 
1372     case 0x404:	/* DMA_GSCR */
1373         if (value & 0x8)
1374             omap_dma_disable_3_1_mapping(s);
1375         else
1376             omap_dma_enable_3_1_mapping(s);
1377         break;
1378 
1379     case 0x408:	/* DMA_GRST */
1380         if (value & 0x1)
1381             omap_dma_reset(s->dma);
1382         break;
1383 
1384     default:
1385         return 1;
1386     }
1387     return 0;
1388 }
1389 
1390 static int omap_dma_sys_read(struct omap_dma_s *s, int offset,
1391                 uint16_t *ret)
1392 {
1393     switch (offset) {
1394     case 0x400:	/* SYS_DMA_GCR */
1395         *ret = s->gcr;
1396         break;
1397 
1398     case 0x404:	/* DMA_GSCR */
1399         *ret = s->omap_3_1_mapping_disabled << 3;
1400         break;
1401 
1402     case 0x408:	/* DMA_GRST */
1403         *ret = 0;
1404         break;
1405 
1406     case 0x442:	/* DMA_HW_ID */
1407     case 0x444:	/* DMA_PCh2_ID */
1408     case 0x446:	/* DMA_PCh0_ID */
1409     case 0x448:	/* DMA_PCh1_ID */
1410     case 0x44a:	/* DMA_PChG_ID */
1411     case 0x44c:	/* DMA_PChD_ID */
1412         *ret = 1;
1413         break;
1414 
1415     case 0x44e:	/* DMA_CAPS_0_U */
1416         *ret = (s->caps[0] >> 16) & 0xffff;
1417         break;
1418     case 0x450:	/* DMA_CAPS_0_L */
1419         *ret = (s->caps[0] >>  0) & 0xffff;
1420         break;
1421 
1422     case 0x452:	/* DMA_CAPS_1_U */
1423         *ret = (s->caps[1] >> 16) & 0xffff;
1424         break;
1425     case 0x454:	/* DMA_CAPS_1_L */
1426         *ret = (s->caps[1] >>  0) & 0xffff;
1427         break;
1428 
1429     case 0x456:	/* DMA_CAPS_2 */
1430         *ret = s->caps[2];
1431         break;
1432 
1433     case 0x458:	/* DMA_CAPS_3 */
1434         *ret = s->caps[3];
1435         break;
1436 
1437     case 0x45a:	/* DMA_CAPS_4 */
1438         *ret = s->caps[4];
1439         break;
1440 
1441     case 0x460:	/* DMA_PCh2_SR */
1442     case 0x480:	/* DMA_PCh0_SR */
1443     case 0x482:	/* DMA_PCh1_SR */
1444     case 0x4c0:	/* DMA_PChD_SR_0 */
1445         qemu_log_mask(LOG_UNIMP,
1446                       "%s: Physical Channel Status Registers not implemented\n",
1447                       __func__);
1448         *ret = 0xff;
1449         break;
1450 
1451     default:
1452         return 1;
1453     }
1454     return 0;
1455 }
1456 
1457 static uint64_t omap_dma_read(void *opaque, hwaddr addr,
1458                               unsigned size)
1459 {
1460     struct omap_dma_s *s = (struct omap_dma_s *) opaque;
1461     int reg, ch;
1462     uint16_t ret;
1463 
1464     if (size != 2) {
1465         return omap_badwidth_read16(opaque, addr);
1466     }
1467 
1468     switch (addr) {
1469     case 0x300 ... 0x3fe:
1470         if (s->model <= omap_dma_3_1 || !s->omap_3_1_mapping_disabled) {
1471             if (omap_dma_3_1_lcd_read(&s->lcd_ch, addr, &ret))
1472                 break;
1473             return ret;
1474         }
1475         /* Fall through. */
1476     case 0x000 ... 0x2fe:
1477         reg = addr & 0x3f;
1478         ch = (addr >> 6) & 0x0f;
1479         if (omap_dma_ch_reg_read(s, &s->ch[ch], reg, &ret))
1480             break;
1481         return ret;
1482 
1483     case 0x404 ... 0x4fe:
1484         if (s->model <= omap_dma_3_1)
1485             break;
1486         /* Fall through. */
1487     case 0x400:
1488         if (omap_dma_sys_read(s, addr, &ret))
1489             break;
1490         return ret;
1491 
1492     case 0xb00 ... 0xbfe:
1493         if (s->model == omap_dma_3_2 && s->omap_3_1_mapping_disabled) {
1494             if (omap_dma_3_2_lcd_read(&s->lcd_ch, addr, &ret))
1495                 break;
1496             return ret;
1497         }
1498         break;
1499     }
1500 
1501     OMAP_BAD_REG(addr);
1502     return 0;
1503 }
1504 
1505 static void omap_dma_write(void *opaque, hwaddr addr,
1506                            uint64_t value, unsigned size)
1507 {
1508     struct omap_dma_s *s = (struct omap_dma_s *) opaque;
1509     int reg, ch;
1510 
1511     if (size != 2) {
1512         omap_badwidth_write16(opaque, addr, value);
1513         return;
1514     }
1515 
1516     switch (addr) {
1517     case 0x300 ... 0x3fe:
1518         if (s->model <= omap_dma_3_1 || !s->omap_3_1_mapping_disabled) {
1519             if (omap_dma_3_1_lcd_write(&s->lcd_ch, addr, value))
1520                 break;
1521             return;
1522         }
1523         /* Fall through.  */
1524     case 0x000 ... 0x2fe:
1525         reg = addr & 0x3f;
1526         ch = (addr >> 6) & 0x0f;
1527         if (omap_dma_ch_reg_write(s, &s->ch[ch], reg, value))
1528             break;
1529         return;
1530 
1531     case 0x404 ... 0x4fe:
1532         if (s->model <= omap_dma_3_1)
1533             break;
1534         /* fall through */
1535     case 0x400:
1536         if (omap_dma_sys_write(s, addr, value))
1537             break;
1538         return;
1539 
1540     case 0xb00 ... 0xbfe:
1541         if (s->model == omap_dma_3_2 && s->omap_3_1_mapping_disabled) {
1542             if (omap_dma_3_2_lcd_write(&s->lcd_ch, addr, value))
1543                 break;
1544             return;
1545         }
1546         break;
1547     }
1548 
1549     OMAP_BAD_REG(addr);
1550 }
1551 
1552 static const MemoryRegionOps omap_dma_ops = {
1553     .read = omap_dma_read,
1554     .write = omap_dma_write,
1555     .endianness = DEVICE_NATIVE_ENDIAN,
1556 };
1557 
1558 static void omap_dma_request(void *opaque, int drq, int req)
1559 {
1560     struct omap_dma_s *s = (struct omap_dma_s *) opaque;
1561     /* The request pins are level triggered in QEMU.  */
1562     if (req) {
1563         if (~s->dma->drqbmp & (1ULL << drq)) {
1564             s->dma->drqbmp |= 1ULL << drq;
1565             omap_dma_process_request(s, drq);
1566         }
1567     } else
1568         s->dma->drqbmp &= ~(1ULL << drq);
1569 }
1570 
1571 /* XXX: this won't be needed once soc_dma knows about clocks.  */
1572 static void omap_dma_clk_update(void *opaque, int line, int on)
1573 {
1574     struct omap_dma_s *s = (struct omap_dma_s *) opaque;
1575     int i;
1576 
1577     s->dma->freq = omap_clk_getrate(s->clk);
1578 
1579     for (i = 0; i < s->chans; i ++)
1580         if (s->ch[i].active)
1581             soc_dma_set_request(s->ch[i].dma, on);
1582 }
1583 
1584 static void omap_dma_setcaps(struct omap_dma_s *s)
1585 {
1586     switch (s->model) {
1587     default:
1588     case omap_dma_3_1:
1589         break;
1590     case omap_dma_3_2:
1591     case omap_dma_4:
1592         /* XXX Only available for sDMA */
1593         s->caps[0] =
1594                 (1 << 19) |	/* Constant Fill Capability */
1595                 (1 << 18);	/* Transparent BLT Capability */
1596         s->caps[1] =
1597                 (1 << 1);	/* 1-bit palettized capability (DMA 3.2 only) */
1598         s->caps[2] =
1599                 (1 << 8) |	/* SEPARATE_SRC_AND_DST_INDEX_CPBLTY */
1600                 (1 << 7) |	/* DST_DOUBLE_INDEX_ADRS_CPBLTY */
1601                 (1 << 6) |	/* DST_SINGLE_INDEX_ADRS_CPBLTY */
1602                 (1 << 5) |	/* DST_POST_INCRMNT_ADRS_CPBLTY */
1603                 (1 << 4) |	/* DST_CONST_ADRS_CPBLTY */
1604                 (1 << 3) |	/* SRC_DOUBLE_INDEX_ADRS_CPBLTY */
1605                 (1 << 2) |	/* SRC_SINGLE_INDEX_ADRS_CPBLTY */
1606                 (1 << 1) |	/* SRC_POST_INCRMNT_ADRS_CPBLTY */
1607                 (1 << 0);	/* SRC_CONST_ADRS_CPBLTY */
1608         s->caps[3] =
1609                 (1 << 6) |	/* BLOCK_SYNCHR_CPBLTY (DMA 4 only) */
1610                 (1 << 7) |	/* PKT_SYNCHR_CPBLTY (DMA 4 only) */
1611                 (1 << 5) |	/* CHANNEL_CHAINING_CPBLTY */
1612                 (1 << 4) |	/* LCh_INTERLEAVE_CPBLTY */
1613                 (1 << 3) |	/* AUTOINIT_REPEAT_CPBLTY (DMA 3.2 only) */
1614                 (1 << 2) |	/* AUTOINIT_ENDPROG_CPBLTY (DMA 3.2 only) */
1615                 (1 << 1) |	/* FRAME_SYNCHR_CPBLTY */
1616                 (1 << 0);	/* ELMNT_SYNCHR_CPBLTY */
1617         s->caps[4] =
1618                 (1 << 7) |	/* PKT_INTERRUPT_CPBLTY (DMA 4 only) */
1619                 (1 << 6) |	/* SYNC_STATUS_CPBLTY */
1620                 (1 << 5) |	/* BLOCK_INTERRUPT_CPBLTY */
1621                 (1 << 4) |	/* LAST_FRAME_INTERRUPT_CPBLTY */
1622                 (1 << 3) |	/* FRAME_INTERRUPT_CPBLTY */
1623                 (1 << 2) |	/* HALF_FRAME_INTERRUPT_CPBLTY */
1624                 (1 << 1) |	/* EVENT_DROP_INTERRUPT_CPBLTY */
1625                 (1 << 0);	/* TIMEOUT_INTERRUPT_CPBLTY (DMA 3.2 only) */
1626         break;
1627     }
1628 }
1629 
1630 struct soc_dma_s *omap_dma_init(hwaddr base, qemu_irq *irqs,
1631                 MemoryRegion *sysmem,
1632                 qemu_irq lcd_irq, struct omap_mpu_state_s *mpu, omap_clk clk,
1633                 enum omap_dma_model model)
1634 {
1635     int num_irqs, memsize, i;
1636     struct omap_dma_s *s = g_new0(struct omap_dma_s, 1);
1637 
1638     if (model <= omap_dma_3_1) {
1639         num_irqs = 6;
1640         memsize = 0x800;
1641     } else {
1642         num_irqs = 16;
1643         memsize = 0xc00;
1644     }
1645     s->model = model;
1646     s->mpu = mpu;
1647     s->clk = clk;
1648     s->lcd_ch.irq = lcd_irq;
1649     s->lcd_ch.mpu = mpu;
1650 
1651     s->dma = soc_dma_init((model <= omap_dma_3_1) ? 9 : 16);
1652     s->dma->freq = omap_clk_getrate(clk);
1653     s->dma->transfer_fn = omap_dma_transfer_generic;
1654     s->dma->setup_fn = omap_dma_transfer_setup;
1655     s->dma->drq = qemu_allocate_irqs(omap_dma_request, s, 32);
1656     s->dma->opaque = s;
1657 
1658     while (num_irqs --)
1659         s->ch[num_irqs].irq = irqs[num_irqs];
1660     for (i = 0; i < 3; i ++) {
1661         s->ch[i].sibling = &s->ch[i + 6];
1662         s->ch[i + 6].sibling = &s->ch[i];
1663     }
1664     for (i = (model <= omap_dma_3_1) ? 8 : 15; i >= 0; i --) {
1665         s->ch[i].dma = &s->dma->ch[i];
1666         s->dma->ch[i].opaque = &s->ch[i];
1667     }
1668 
1669     omap_dma_setcaps(s);
1670     omap_clk_adduser(s->clk, qemu_allocate_irq(omap_dma_clk_update, s, 0));
1671     omap_dma_reset(s->dma);
1672     omap_dma_clk_update(s, 0, 1);
1673 
1674     memory_region_init_io(&s->iomem, NULL, &omap_dma_ops, s, "omap.dma", memsize);
1675     memory_region_add_subregion(sysmem, base, &s->iomem);
1676 
1677     mpu->drq = s->dma->drq;
1678 
1679     return s->dma;
1680 }
1681 
1682 static void omap_dma_interrupts_4_update(struct omap_dma_s *s)
1683 {
1684     struct omap_dma_channel_s *ch = s->ch;
1685     uint32_t bmp, bit;
1686 
1687     for (bmp = 0, bit = 1; bit; ch ++, bit <<= 1)
1688         if (ch->status) {
1689             bmp |= bit;
1690             ch->cstatus |= ch->status;
1691             ch->status = 0;
1692         }
1693     if ((s->irqstat[0] |= s->irqen[0] & bmp))
1694         qemu_irq_raise(s->irq[0]);
1695     if ((s->irqstat[1] |= s->irqen[1] & bmp))
1696         qemu_irq_raise(s->irq[1]);
1697     if ((s->irqstat[2] |= s->irqen[2] & bmp))
1698         qemu_irq_raise(s->irq[2]);
1699     if ((s->irqstat[3] |= s->irqen[3] & bmp))
1700         qemu_irq_raise(s->irq[3]);
1701 }
1702 
1703 static uint64_t omap_dma4_read(void *opaque, hwaddr addr,
1704                                unsigned size)
1705 {
1706     struct omap_dma_s *s = (struct omap_dma_s *) opaque;
1707     int irqn = 0, chnum;
1708     struct omap_dma_channel_s *ch;
1709 
1710     if (size == 1) {
1711         return omap_badwidth_read16(opaque, addr);
1712     }
1713 
1714     switch (addr) {
1715     case 0x00:	/* DMA4_REVISION */
1716         return 0x40;
1717 
1718     case 0x14:	/* DMA4_IRQSTATUS_L3 */
1719         irqn ++;
1720         /* fall through */
1721     case 0x10:	/* DMA4_IRQSTATUS_L2 */
1722         irqn ++;
1723         /* fall through */
1724     case 0x0c:	/* DMA4_IRQSTATUS_L1 */
1725         irqn ++;
1726         /* fall through */
1727     case 0x08:	/* DMA4_IRQSTATUS_L0 */
1728         return s->irqstat[irqn];
1729 
1730     case 0x24:	/* DMA4_IRQENABLE_L3 */
1731         irqn ++;
1732         /* fall through */
1733     case 0x20:	/* DMA4_IRQENABLE_L2 */
1734         irqn ++;
1735         /* fall through */
1736     case 0x1c:	/* DMA4_IRQENABLE_L1 */
1737         irqn ++;
1738         /* fall through */
1739     case 0x18:	/* DMA4_IRQENABLE_L0 */
1740         return s->irqen[irqn];
1741 
1742     case 0x28:	/* DMA4_SYSSTATUS */
1743         return 1;						/* RESETDONE */
1744 
1745     case 0x2c:	/* DMA4_OCP_SYSCONFIG */
1746         return s->ocp;
1747 
1748     case 0x64:	/* DMA4_CAPS_0 */
1749         return s->caps[0];
1750     case 0x6c:	/* DMA4_CAPS_2 */
1751         return s->caps[2];
1752     case 0x70:	/* DMA4_CAPS_3 */
1753         return s->caps[3];
1754     case 0x74:	/* DMA4_CAPS_4 */
1755         return s->caps[4];
1756 
1757     case 0x78:	/* DMA4_GCR */
1758         return s->gcr;
1759 
1760     case 0x80 ... 0xfff:
1761         addr -= 0x80;
1762         chnum = addr / 0x60;
1763         ch = s->ch + chnum;
1764         addr -= chnum * 0x60;
1765         break;
1766 
1767     default:
1768         OMAP_BAD_REG(addr);
1769         return 0;
1770     }
1771 
1772     /* Per-channel registers */
1773     switch (addr) {
1774     case 0x00:	/* DMA4_CCR */
1775         return (ch->buf_disable << 25) |
1776                 (ch->src_sync << 24) |
1777                 (ch->prefetch << 23) |
1778                 ((ch->sync & 0x60) << 14) |
1779                 (ch->bs << 18) |
1780                 (ch->transparent_copy << 17) |
1781                 (ch->constant_fill << 16) |
1782                 (ch->mode[1] << 14) |
1783                 (ch->mode[0] << 12) |
1784                 (0 << 10) | (0 << 9) |
1785                 (ch->suspend << 8) |
1786                 (ch->enable << 7) |
1787                 (ch->priority << 6) |
1788                 (ch->fs << 5) | (ch->sync & 0x1f);
1789 
1790     case 0x04:	/* DMA4_CLNK_CTRL */
1791         return (ch->link_enabled << 15) | ch->link_next_ch;
1792 
1793     case 0x08:	/* DMA4_CICR */
1794         return ch->interrupts;
1795 
1796     case 0x0c:	/* DMA4_CSR */
1797         return ch->cstatus;
1798 
1799     case 0x10:	/* DMA4_CSDP */
1800         return (ch->endian[0] << 21) |
1801                 (ch->endian_lock[0] << 20) |
1802                 (ch->endian[1] << 19) |
1803                 (ch->endian_lock[1] << 18) |
1804                 (ch->write_mode << 16) |
1805                 (ch->burst[1] << 14) |
1806                 (ch->pack[1] << 13) |
1807                 (ch->translate[1] << 9) |
1808                 (ch->burst[0] << 7) |
1809                 (ch->pack[0] << 6) |
1810                 (ch->translate[0] << 2) |
1811                 (ch->data_type >> 1);
1812 
1813     case 0x14:	/* DMA4_CEN */
1814         return ch->elements;
1815 
1816     case 0x18:	/* DMA4_CFN */
1817         return ch->frames;
1818 
1819     case 0x1c:	/* DMA4_CSSA */
1820         return ch->addr[0];
1821 
1822     case 0x20:	/* DMA4_CDSA */
1823         return ch->addr[1];
1824 
1825     case 0x24:	/* DMA4_CSEI */
1826         return ch->element_index[0];
1827 
1828     case 0x28:	/* DMA4_CSFI */
1829         return ch->frame_index[0];
1830 
1831     case 0x2c:	/* DMA4_CDEI */
1832         return ch->element_index[1];
1833 
1834     case 0x30:	/* DMA4_CDFI */
1835         return ch->frame_index[1];
1836 
1837     case 0x34:	/* DMA4_CSAC */
1838         return ch->active_set.src & 0xffff;
1839 
1840     case 0x38:	/* DMA4_CDAC */
1841         return ch->active_set.dest & 0xffff;
1842 
1843     case 0x3c:	/* DMA4_CCEN */
1844         return ch->active_set.element;
1845 
1846     case 0x40:	/* DMA4_CCFN */
1847         return ch->active_set.frame;
1848 
1849     case 0x44:	/* DMA4_COLOR */
1850         /* XXX only in sDMA */
1851         return ch->color;
1852 
1853     default:
1854         OMAP_BAD_REG(addr);
1855         return 0;
1856     }
1857 }
1858 
1859 static void omap_dma4_write(void *opaque, hwaddr addr,
1860                             uint64_t value, unsigned size)
1861 {
1862     struct omap_dma_s *s = (struct omap_dma_s *) opaque;
1863     int chnum, irqn = 0;
1864     struct omap_dma_channel_s *ch;
1865 
1866     if (size == 1) {
1867         omap_badwidth_write16(opaque, addr, value);
1868         return;
1869     }
1870 
1871     switch (addr) {
1872     case 0x14:	/* DMA4_IRQSTATUS_L3 */
1873         irqn ++;
1874         /* fall through */
1875     case 0x10:	/* DMA4_IRQSTATUS_L2 */
1876         irqn ++;
1877         /* fall through */
1878     case 0x0c:	/* DMA4_IRQSTATUS_L1 */
1879         irqn ++;
1880         /* fall through */
1881     case 0x08:	/* DMA4_IRQSTATUS_L0 */
1882         s->irqstat[irqn] &= ~value;
1883         if (!s->irqstat[irqn])
1884             qemu_irq_lower(s->irq[irqn]);
1885         return;
1886 
1887     case 0x24:	/* DMA4_IRQENABLE_L3 */
1888         irqn ++;
1889         /* fall through */
1890     case 0x20:	/* DMA4_IRQENABLE_L2 */
1891         irqn ++;
1892         /* fall through */
1893     case 0x1c:	/* DMA4_IRQENABLE_L1 */
1894         irqn ++;
1895         /* fall through */
1896     case 0x18:	/* DMA4_IRQENABLE_L0 */
1897         s->irqen[irqn] = value;
1898         return;
1899 
1900     case 0x2c:	/* DMA4_OCP_SYSCONFIG */
1901         if (value & 2)						/* SOFTRESET */
1902             omap_dma_reset(s->dma);
1903         s->ocp = value & 0x3321;
1904         if (((s->ocp >> 12) & 3) == 3) { /* MIDLEMODE */
1905             qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid DMA power mode\n",
1906                           __func__);
1907         }
1908         return;
1909 
1910     case 0x78:	/* DMA4_GCR */
1911         s->gcr = value & 0x00ff00ff;
1912         if ((value & 0xff) == 0x00) { /* MAX_CHANNEL_FIFO_DEPTH */
1913             qemu_log_mask(LOG_GUEST_ERROR, "%s: wrong FIFO depth in GCR\n",
1914                           __func__);
1915         }
1916         return;
1917 
1918     case 0x80 ... 0xfff:
1919         addr -= 0x80;
1920         chnum = addr / 0x60;
1921         ch = s->ch + chnum;
1922         addr -= chnum * 0x60;
1923         break;
1924 
1925     case 0x00:	/* DMA4_REVISION */
1926     case 0x28:	/* DMA4_SYSSTATUS */
1927     case 0x64:	/* DMA4_CAPS_0 */
1928     case 0x6c:	/* DMA4_CAPS_2 */
1929     case 0x70:	/* DMA4_CAPS_3 */
1930     case 0x74:	/* DMA4_CAPS_4 */
1931         OMAP_RO_REG(addr);
1932         return;
1933 
1934     default:
1935         OMAP_BAD_REG(addr);
1936         return;
1937     }
1938 
1939     /* Per-channel registers */
1940     switch (addr) {
1941     case 0x00:	/* DMA4_CCR */
1942         ch->buf_disable = (value >> 25) & 1;
1943         ch->src_sync = (value >> 24) & 1;	/* XXX For CamDMA must be 1 */
1944         if (ch->buf_disable && !ch->src_sync) {
1945             qemu_log_mask(LOG_GUEST_ERROR,
1946                           "%s: Buffering disable is not allowed in "
1947                           "destination synchronised mode\n", __func__);
1948         }
1949         ch->prefetch = (value >> 23) & 1;
1950         ch->bs = (value >> 18) & 1;
1951         ch->transparent_copy = (value >> 17) & 1;
1952         ch->constant_fill = (value >> 16) & 1;
1953         ch->mode[1] = (omap_dma_addressing_t) ((value & 0xc000) >> 14);
1954         ch->mode[0] = (omap_dma_addressing_t) ((value & 0x3000) >> 12);
1955         ch->suspend = (value & 0x0100) >> 8;
1956         ch->priority = (value & 0x0040) >> 6;
1957         ch->fs = (value & 0x0020) >> 5;
1958         if (ch->fs && ch->bs && ch->mode[0] && ch->mode[1]) {
1959             qemu_log_mask(LOG_GUEST_ERROR,
1960                           "%s: For a packet transfer at least one port "
1961                           "must be constant-addressed\n", __func__);
1962         }
1963         ch->sync = (value & 0x001f) | ((value >> 14) & 0x0060);
1964         /* XXX must be 0x01 for CamDMA */
1965 
1966         if (value & 0x0080)
1967             omap_dma_enable_channel(s, ch);
1968         else
1969             omap_dma_disable_channel(s, ch);
1970 
1971         break;
1972 
1973     case 0x04:	/* DMA4_CLNK_CTRL */
1974         ch->link_enabled = (value >> 15) & 0x1;
1975         ch->link_next_ch = value & 0x1f;
1976         break;
1977 
1978     case 0x08:	/* DMA4_CICR */
1979         ch->interrupts = value & 0x09be;
1980         break;
1981 
1982     case 0x0c:	/* DMA4_CSR */
1983         ch->cstatus &= ~value;
1984         break;
1985 
1986     case 0x10:	/* DMA4_CSDP */
1987         ch->endian[0] =(value >> 21) & 1;
1988         ch->endian_lock[0] =(value >> 20) & 1;
1989         ch->endian[1] =(value >> 19) & 1;
1990         ch->endian_lock[1] =(value >> 18) & 1;
1991         if (ch->endian[0] != ch->endian[1]) {
1992             qemu_log_mask(LOG_GUEST_ERROR,
1993                           "%s: DMA endianness conversion enable attempt\n",
1994                           __func__);
1995         }
1996         ch->write_mode = (value >> 16) & 3;
1997         ch->burst[1] = (value & 0xc000) >> 14;
1998         ch->pack[1] = (value & 0x2000) >> 13;
1999         ch->translate[1] = (value & 0x1e00) >> 9;
2000         ch->burst[0] = (value & 0x0180) >> 7;
2001         ch->pack[0] = (value & 0x0040) >> 6;
2002         ch->translate[0] = (value & 0x003c) >> 2;
2003         if (ch->translate[0] | ch->translate[1]) {
2004             qemu_log_mask(LOG_GUEST_ERROR,
2005                           "%s: bad MReqAddressTranslate sideband signal\n",
2006                           __func__);
2007         }
2008         ch->data_type = 1 << (value & 3);
2009         if ((value & 3) == 3) {
2010             qemu_log_mask(LOG_GUEST_ERROR,
2011                           "%s: bad data_type for DMA channel\n", __func__);
2012             ch->data_type >>= 1;
2013         }
2014         break;
2015 
2016     case 0x14:	/* DMA4_CEN */
2017         ch->set_update = 1;
2018         ch->elements = value & 0xffffff;
2019         break;
2020 
2021     case 0x18:	/* DMA4_CFN */
2022         ch->frames = value & 0xffff;
2023         ch->set_update = 1;
2024         break;
2025 
2026     case 0x1c:	/* DMA4_CSSA */
2027         ch->addr[0] = (hwaddr) (uint32_t) value;
2028         ch->set_update = 1;
2029         break;
2030 
2031     case 0x20:	/* DMA4_CDSA */
2032         ch->addr[1] = (hwaddr) (uint32_t) value;
2033         ch->set_update = 1;
2034         break;
2035 
2036     case 0x24:	/* DMA4_CSEI */
2037         ch->element_index[0] = (int16_t) value;
2038         ch->set_update = 1;
2039         break;
2040 
2041     case 0x28:	/* DMA4_CSFI */
2042         ch->frame_index[0] = (int32_t) value;
2043         ch->set_update = 1;
2044         break;
2045 
2046     case 0x2c:	/* DMA4_CDEI */
2047         ch->element_index[1] = (int16_t) value;
2048         ch->set_update = 1;
2049         break;
2050 
2051     case 0x30:	/* DMA4_CDFI */
2052         ch->frame_index[1] = (int32_t) value;
2053         ch->set_update = 1;
2054         break;
2055 
2056     case 0x44:	/* DMA4_COLOR */
2057         /* XXX only in sDMA */
2058         ch->color = value;
2059         break;
2060 
2061     case 0x34:	/* DMA4_CSAC */
2062     case 0x38:	/* DMA4_CDAC */
2063     case 0x3c:	/* DMA4_CCEN */
2064     case 0x40:	/* DMA4_CCFN */
2065         OMAP_RO_REG(addr);
2066         break;
2067 
2068     default:
2069         OMAP_BAD_REG(addr);
2070     }
2071 }
2072 
2073 static const MemoryRegionOps omap_dma4_ops = {
2074     .read = omap_dma4_read,
2075     .write = omap_dma4_write,
2076     .endianness = DEVICE_NATIVE_ENDIAN,
2077 };
2078 
2079 struct soc_dma_s *omap_dma4_init(hwaddr base, qemu_irq *irqs,
2080                 MemoryRegion *sysmem,
2081                 struct omap_mpu_state_s *mpu, int fifo,
2082                 int chans, omap_clk iclk, omap_clk fclk)
2083 {
2084     int i;
2085     struct omap_dma_s *s = g_new0(struct omap_dma_s, 1);
2086 
2087     s->model = omap_dma_4;
2088     s->chans = chans;
2089     s->mpu = mpu;
2090     s->clk = fclk;
2091 
2092     s->dma = soc_dma_init(s->chans);
2093     s->dma->freq = omap_clk_getrate(fclk);
2094     s->dma->transfer_fn = omap_dma_transfer_generic;
2095     s->dma->setup_fn = omap_dma_transfer_setup;
2096     s->dma->drq = qemu_allocate_irqs(omap_dma_request, s, 64);
2097     s->dma->opaque = s;
2098     for (i = 0; i < s->chans; i ++) {
2099         s->ch[i].dma = &s->dma->ch[i];
2100         s->dma->ch[i].opaque = &s->ch[i];
2101     }
2102 
2103     memcpy(&s->irq, irqs, sizeof(s->irq));
2104     s->intr_update = omap_dma_interrupts_4_update;
2105 
2106     omap_dma_setcaps(s);
2107     omap_clk_adduser(s->clk, qemu_allocate_irq(omap_dma_clk_update, s, 0));
2108     omap_dma_reset(s->dma);
2109     omap_dma_clk_update(s, 0, !!s->dma->freq);
2110 
2111     memory_region_init_io(&s->iomem, NULL, &omap_dma4_ops, s, "omap.dma4", 0x1000);
2112     memory_region_add_subregion(sysmem, base, &s->iomem);
2113 
2114     mpu->drq = s->dma->drq;
2115 
2116     return s->dma;
2117 }
2118 
2119 struct omap_dma_lcd_channel_s *omap_dma_get_lcdch(struct soc_dma_s *dma)
2120 {
2121     struct omap_dma_s *s = dma->opaque;
2122 
2123     return &s->lcd_ch;
2124 }
2125