1 /* 2 * On-chip DMA controller framework. 3 * 4 * Copyright (C) 2008 Nokia Corporation 5 * Written by Andrzej Zaborowski <andrew@openedhand.com> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License as 9 * published by the Free Software Foundation; either version 2 or 10 * (at your option) version 3 of the License. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License along 18 * with this program; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 #include "qemu/osdep.h" 21 #include "qemu/error-report.h" 22 #include "qemu/timer.h" 23 #include "hw/arm/soc_dma.h" 24 25 static void transfer_mem2mem(struct soc_dma_ch_s *ch) 26 { 27 memcpy(ch->paddr[0], ch->paddr[1], ch->bytes); 28 ch->paddr[0] += ch->bytes; 29 ch->paddr[1] += ch->bytes; 30 } 31 32 static void transfer_mem2fifo(struct soc_dma_ch_s *ch) 33 { 34 ch->io_fn[1](ch->io_opaque[1], ch->paddr[0], ch->bytes); 35 ch->paddr[0] += ch->bytes; 36 } 37 38 static void transfer_fifo2mem(struct soc_dma_ch_s *ch) 39 { 40 ch->io_fn[0](ch->io_opaque[0], ch->paddr[1], ch->bytes); 41 ch->paddr[1] += ch->bytes; 42 } 43 44 /* This is further optimisable but isn't very important because often 45 * DMA peripherals forbid this kind of transfers and even when they don't, 46 * oprating systems may not need to use them. */ 47 static void *fifo_buf; 48 static int fifo_size; 49 static void transfer_fifo2fifo(struct soc_dma_ch_s *ch) 50 { 51 if (ch->bytes > fifo_size) 52 fifo_buf = g_realloc(fifo_buf, fifo_size = ch->bytes); 53 54 /* Implement as transfer_fifo2linear + transfer_linear2fifo. */ 55 ch->io_fn[0](ch->io_opaque[0], fifo_buf, ch->bytes); 56 ch->io_fn[1](ch->io_opaque[1], fifo_buf, ch->bytes); 57 } 58 59 struct dma_s { 60 struct soc_dma_s soc; 61 int chnum; 62 uint64_t ch_enable_mask; 63 int64_t channel_freq; 64 int enabled_count; 65 66 struct memmap_entry_s { 67 enum soc_dma_port_type type; 68 hwaddr addr; 69 union { 70 struct { 71 void *opaque; 72 soc_dma_io_t fn; 73 int out; 74 } fifo; 75 struct { 76 void *base; 77 size_t size; 78 } mem; 79 } u; 80 } *memmap; 81 int memmap_size; 82 83 struct soc_dma_ch_s ch[0]; 84 }; 85 86 static void soc_dma_ch_schedule(struct soc_dma_ch_s *ch, int delay_bytes) 87 { 88 int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 89 struct dma_s *dma = (struct dma_s *) ch->dma; 90 91 timer_mod(ch->timer, now + delay_bytes / dma->channel_freq); 92 } 93 94 static void soc_dma_ch_run(void *opaque) 95 { 96 struct soc_dma_ch_s *ch = (struct soc_dma_ch_s *) opaque; 97 98 ch->running = 1; 99 ch->dma->setup_fn(ch); 100 ch->transfer_fn(ch); 101 ch->running = 0; 102 103 if (ch->enable) 104 soc_dma_ch_schedule(ch, ch->bytes); 105 ch->bytes = 0; 106 } 107 108 static inline struct memmap_entry_s *soc_dma_lookup(struct dma_s *dma, 109 hwaddr addr) 110 { 111 struct memmap_entry_s *lo; 112 int hi; 113 114 lo = dma->memmap; 115 hi = dma->memmap_size; 116 117 while (hi > 1) { 118 hi /= 2; 119 if (lo[hi].addr <= addr) 120 lo += hi; 121 } 122 123 return lo; 124 } 125 126 static inline enum soc_dma_port_type soc_dma_ch_update_type( 127 struct soc_dma_ch_s *ch, int port) 128 { 129 struct dma_s *dma = (struct dma_s *) ch->dma; 130 struct memmap_entry_s *entry = soc_dma_lookup(dma, ch->vaddr[port]); 131 132 if (entry->type == soc_dma_port_fifo) { 133 while (entry < dma->memmap + dma->memmap_size && 134 entry->u.fifo.out != port) 135 entry ++; 136 if (entry->addr != ch->vaddr[port] || entry->u.fifo.out != port) 137 return soc_dma_port_other; 138 139 if (ch->type[port] != soc_dma_access_const) 140 return soc_dma_port_other; 141 142 ch->io_fn[port] = entry->u.fifo.fn; 143 ch->io_opaque[port] = entry->u.fifo.opaque; 144 return soc_dma_port_fifo; 145 } else if (entry->type == soc_dma_port_mem) { 146 if (entry->addr > ch->vaddr[port] || 147 entry->addr + entry->u.mem.size <= ch->vaddr[port]) 148 return soc_dma_port_other; 149 150 /* TODO: support constant memory address for source port as used for 151 * drawing solid rectangles by PalmOS(R). */ 152 if (ch->type[port] != soc_dma_access_const) 153 return soc_dma_port_other; 154 155 ch->paddr[port] = (uint8_t *) entry->u.mem.base + 156 (ch->vaddr[port] - entry->addr); 157 /* TODO: save bytes left to the end of the mapping somewhere so we 158 * can check we're not reading beyond it. */ 159 return soc_dma_port_mem; 160 } else 161 return soc_dma_port_other; 162 } 163 164 void soc_dma_ch_update(struct soc_dma_ch_s *ch) 165 { 166 enum soc_dma_port_type src, dst; 167 168 src = soc_dma_ch_update_type(ch, 0); 169 if (src == soc_dma_port_other) { 170 ch->update = 0; 171 ch->transfer_fn = ch->dma->transfer_fn; 172 return; 173 } 174 dst = soc_dma_ch_update_type(ch, 1); 175 176 /* TODO: use src and dst as array indices. */ 177 if (src == soc_dma_port_mem && dst == soc_dma_port_mem) 178 ch->transfer_fn = transfer_mem2mem; 179 else if (src == soc_dma_port_mem && dst == soc_dma_port_fifo) 180 ch->transfer_fn = transfer_mem2fifo; 181 else if (src == soc_dma_port_fifo && dst == soc_dma_port_mem) 182 ch->transfer_fn = transfer_fifo2mem; 183 else if (src == soc_dma_port_fifo && dst == soc_dma_port_fifo) 184 ch->transfer_fn = transfer_fifo2fifo; 185 else 186 ch->transfer_fn = ch->dma->transfer_fn; 187 188 ch->update = (dst != soc_dma_port_other); 189 } 190 191 static void soc_dma_ch_freq_update(struct dma_s *s) 192 { 193 if (s->enabled_count) 194 /* We completely ignore channel priorities and stuff */ 195 s->channel_freq = s->soc.freq / s->enabled_count; 196 else { 197 /* TODO: Signal that we want to disable the functional clock and let 198 * the platform code decide what to do with it, i.e. check that 199 * auto-idle is enabled in the clock controller and if we are stopping 200 * the clock, do the same with any parent clocks that had only one 201 * user keeping them on and auto-idle enabled. */ 202 } 203 } 204 205 void soc_dma_set_request(struct soc_dma_ch_s *ch, int level) 206 { 207 struct dma_s *dma = (struct dma_s *) ch->dma; 208 209 dma->enabled_count += level - ch->enable; 210 211 if (level) 212 dma->ch_enable_mask |= 1 << ch->num; 213 else 214 dma->ch_enable_mask &= ~(1 << ch->num); 215 216 if (level != ch->enable) { 217 soc_dma_ch_freq_update(dma); 218 ch->enable = level; 219 220 if (!ch->enable) 221 timer_del(ch->timer); 222 else if (!ch->running) 223 soc_dma_ch_run(ch); 224 else 225 soc_dma_ch_schedule(ch, 1); 226 } 227 } 228 229 void soc_dma_reset(struct soc_dma_s *soc) 230 { 231 struct dma_s *s = (struct dma_s *) soc; 232 233 s->soc.drqbmp = 0; 234 s->ch_enable_mask = 0; 235 s->enabled_count = 0; 236 soc_dma_ch_freq_update(s); 237 } 238 239 /* TODO: take a functional-clock argument */ 240 struct soc_dma_s *soc_dma_init(int n) 241 { 242 int i; 243 struct dma_s *s = g_malloc0(sizeof(*s) + n * sizeof(*s->ch)); 244 245 s->chnum = n; 246 s->soc.ch = s->ch; 247 for (i = 0; i < n; i ++) { 248 s->ch[i].dma = &s->soc; 249 s->ch[i].num = i; 250 s->ch[i].timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, soc_dma_ch_run, &s->ch[i]); 251 } 252 253 soc_dma_reset(&s->soc); 254 fifo_size = 0; 255 256 return &s->soc; 257 } 258 259 void soc_dma_port_add_fifo(struct soc_dma_s *soc, hwaddr virt_base, 260 soc_dma_io_t fn, void *opaque, int out) 261 { 262 struct memmap_entry_s *entry; 263 struct dma_s *dma = (struct dma_s *) soc; 264 265 dma->memmap = g_realloc(dma->memmap, sizeof(*entry) * 266 (dma->memmap_size + 1)); 267 entry = soc_dma_lookup(dma, virt_base); 268 269 if (dma->memmap_size) { 270 if (entry->type == soc_dma_port_mem) { 271 if (entry->addr <= virt_base && 272 entry->addr + entry->u.mem.size > virt_base) { 273 error_report("%s: FIFO at %"PRIx64 274 " collides with RAM region at %"PRIx64 275 "-%"PRIx64, __func__, 276 virt_base, entry->addr, 277 (entry->addr + entry->u.mem.size)); 278 exit(-1); 279 } 280 281 if (entry->addr <= virt_base) 282 entry ++; 283 } else 284 while (entry < dma->memmap + dma->memmap_size && 285 entry->addr <= virt_base) { 286 if (entry->addr == virt_base && entry->u.fifo.out == out) { 287 error_report("%s: FIFO at %"PRIx64 288 " collides FIFO at %"PRIx64, 289 __func__, virt_base, entry->addr); 290 exit(-1); 291 } 292 293 entry ++; 294 } 295 296 memmove(entry + 1, entry, 297 (uint8_t *) (dma->memmap + dma->memmap_size ++) - 298 (uint8_t *) entry); 299 } else 300 dma->memmap_size ++; 301 302 entry->addr = virt_base; 303 entry->type = soc_dma_port_fifo; 304 entry->u.fifo.fn = fn; 305 entry->u.fifo.opaque = opaque; 306 entry->u.fifo.out = out; 307 } 308 309 void soc_dma_port_add_mem(struct soc_dma_s *soc, uint8_t *phys_base, 310 hwaddr virt_base, size_t size) 311 { 312 struct memmap_entry_s *entry; 313 struct dma_s *dma = (struct dma_s *) soc; 314 315 dma->memmap = g_realloc(dma->memmap, sizeof(*entry) * 316 (dma->memmap_size + 1)); 317 entry = soc_dma_lookup(dma, virt_base); 318 319 if (dma->memmap_size) { 320 if (entry->type == soc_dma_port_mem) { 321 if ((entry->addr >= virt_base && entry->addr < virt_base + size) || 322 (entry->addr <= virt_base && 323 entry->addr + entry->u.mem.size > virt_base)) { 324 error_report("%s: RAM at %"PRIx64 "-%"PRIx64 325 " collides with RAM region at %"PRIx64 326 "-%"PRIx64, __func__, 327 virt_base, virt_base + size, 328 entry->addr, entry->addr + entry->u.mem.size); 329 exit(-1); 330 } 331 332 if (entry->addr <= virt_base) 333 entry ++; 334 } else { 335 if (entry->addr >= virt_base && 336 entry->addr < virt_base + size) { 337 error_report("%s: RAM at %"PRIx64 "-%"PRIx64 338 " collides with FIFO at %"PRIx64, 339 __func__, virt_base, virt_base + size, 340 entry->addr); 341 exit(-1); 342 } 343 344 while (entry < dma->memmap + dma->memmap_size && 345 entry->addr <= virt_base) 346 entry ++; 347 } 348 349 memmove(entry + 1, entry, 350 (uint8_t *) (dma->memmap + dma->memmap_size ++) - 351 (uint8_t *) entry); 352 } else 353 dma->memmap_size ++; 354 355 entry->addr = virt_base; 356 entry->type = soc_dma_port_mem; 357 entry->u.mem.base = phys_base; 358 entry->u.mem.size = size; 359 } 360 361 /* TODO: port removal for ports like PCMCIA memory */ 362