1 /* 2 * On-chip DMA controller framework. 3 * 4 * Copyright (C) 2008 Nokia Corporation 5 * Written by Andrzej Zaborowski <andrew@openedhand.com> 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License as 9 * published by the Free Software Foundation; either version 2 or 10 * (at your option) version 3 of the License. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License along 18 * with this program; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 #include "qemu/osdep.h" 21 #include "qemu/error-report.h" 22 #include "qemu-common.h" 23 #include "qemu/timer.h" 24 #include "hw/arm/soc_dma.h" 25 26 static void transfer_mem2mem(struct soc_dma_ch_s *ch) 27 { 28 memcpy(ch->paddr[0], ch->paddr[1], ch->bytes); 29 ch->paddr[0] += ch->bytes; 30 ch->paddr[1] += ch->bytes; 31 } 32 33 static void transfer_mem2fifo(struct soc_dma_ch_s *ch) 34 { 35 ch->io_fn[1](ch->io_opaque[1], ch->paddr[0], ch->bytes); 36 ch->paddr[0] += ch->bytes; 37 } 38 39 static void transfer_fifo2mem(struct soc_dma_ch_s *ch) 40 { 41 ch->io_fn[0](ch->io_opaque[0], ch->paddr[1], ch->bytes); 42 ch->paddr[1] += ch->bytes; 43 } 44 45 /* This is further optimisable but isn't very important because often 46 * DMA peripherals forbid this kind of transfers and even when they don't, 47 * oprating systems may not need to use them. */ 48 static void *fifo_buf; 49 static int fifo_size; 50 static void transfer_fifo2fifo(struct soc_dma_ch_s *ch) 51 { 52 if (ch->bytes > fifo_size) 53 fifo_buf = g_realloc(fifo_buf, fifo_size = ch->bytes); 54 55 /* Implement as transfer_fifo2linear + transfer_linear2fifo. */ 56 ch->io_fn[0](ch->io_opaque[0], fifo_buf, ch->bytes); 57 ch->io_fn[1](ch->io_opaque[1], fifo_buf, ch->bytes); 58 } 59 60 struct dma_s { 61 struct soc_dma_s soc; 62 int chnum; 63 uint64_t ch_enable_mask; 64 int64_t channel_freq; 65 int enabled_count; 66 67 struct memmap_entry_s { 68 enum soc_dma_port_type type; 69 hwaddr addr; 70 union { 71 struct { 72 void *opaque; 73 soc_dma_io_t fn; 74 int out; 75 } fifo; 76 struct { 77 void *base; 78 size_t size; 79 } mem; 80 } u; 81 } *memmap; 82 int memmap_size; 83 84 struct soc_dma_ch_s ch[0]; 85 }; 86 87 static void soc_dma_ch_schedule(struct soc_dma_ch_s *ch, int delay_bytes) 88 { 89 int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 90 struct dma_s *dma = (struct dma_s *) ch->dma; 91 92 timer_mod(ch->timer, now + delay_bytes / dma->channel_freq); 93 } 94 95 static void soc_dma_ch_run(void *opaque) 96 { 97 struct soc_dma_ch_s *ch = (struct soc_dma_ch_s *) opaque; 98 99 ch->running = 1; 100 ch->dma->setup_fn(ch); 101 ch->transfer_fn(ch); 102 ch->running = 0; 103 104 if (ch->enable) 105 soc_dma_ch_schedule(ch, ch->bytes); 106 ch->bytes = 0; 107 } 108 109 static inline struct memmap_entry_s *soc_dma_lookup(struct dma_s *dma, 110 hwaddr addr) 111 { 112 struct memmap_entry_s *lo; 113 int hi; 114 115 lo = dma->memmap; 116 hi = dma->memmap_size; 117 118 while (hi > 1) { 119 hi /= 2; 120 if (lo[hi].addr <= addr) 121 lo += hi; 122 } 123 124 return lo; 125 } 126 127 static inline enum soc_dma_port_type soc_dma_ch_update_type( 128 struct soc_dma_ch_s *ch, int port) 129 { 130 struct dma_s *dma = (struct dma_s *) ch->dma; 131 struct memmap_entry_s *entry = soc_dma_lookup(dma, ch->vaddr[port]); 132 133 if (entry->type == soc_dma_port_fifo) { 134 while (entry < dma->memmap + dma->memmap_size && 135 entry->u.fifo.out != port) 136 entry ++; 137 if (entry->addr != ch->vaddr[port] || entry->u.fifo.out != port) 138 return soc_dma_port_other; 139 140 if (ch->type[port] != soc_dma_access_const) 141 return soc_dma_port_other; 142 143 ch->io_fn[port] = entry->u.fifo.fn; 144 ch->io_opaque[port] = entry->u.fifo.opaque; 145 return soc_dma_port_fifo; 146 } else if (entry->type == soc_dma_port_mem) { 147 if (entry->addr > ch->vaddr[port] || 148 entry->addr + entry->u.mem.size <= ch->vaddr[port]) 149 return soc_dma_port_other; 150 151 /* TODO: support constant memory address for source port as used for 152 * drawing solid rectangles by PalmOS(R). */ 153 if (ch->type[port] != soc_dma_access_const) 154 return soc_dma_port_other; 155 156 ch->paddr[port] = (uint8_t *) entry->u.mem.base + 157 (ch->vaddr[port] - entry->addr); 158 /* TODO: save bytes left to the end of the mapping somewhere so we 159 * can check we're not reading beyond it. */ 160 return soc_dma_port_mem; 161 } else 162 return soc_dma_port_other; 163 } 164 165 void soc_dma_ch_update(struct soc_dma_ch_s *ch) 166 { 167 enum soc_dma_port_type src, dst; 168 169 src = soc_dma_ch_update_type(ch, 0); 170 if (src == soc_dma_port_other) { 171 ch->update = 0; 172 ch->transfer_fn = ch->dma->transfer_fn; 173 return; 174 } 175 dst = soc_dma_ch_update_type(ch, 1); 176 177 /* TODO: use src and dst as array indices. */ 178 if (src == soc_dma_port_mem && dst == soc_dma_port_mem) 179 ch->transfer_fn = transfer_mem2mem; 180 else if (src == soc_dma_port_mem && dst == soc_dma_port_fifo) 181 ch->transfer_fn = transfer_mem2fifo; 182 else if (src == soc_dma_port_fifo && dst == soc_dma_port_mem) 183 ch->transfer_fn = transfer_fifo2mem; 184 else if (src == soc_dma_port_fifo && dst == soc_dma_port_fifo) 185 ch->transfer_fn = transfer_fifo2fifo; 186 else 187 ch->transfer_fn = ch->dma->transfer_fn; 188 189 ch->update = (dst != soc_dma_port_other); 190 } 191 192 static void soc_dma_ch_freq_update(struct dma_s *s) 193 { 194 if (s->enabled_count) 195 /* We completely ignore channel priorities and stuff */ 196 s->channel_freq = s->soc.freq / s->enabled_count; 197 else { 198 /* TODO: Signal that we want to disable the functional clock and let 199 * the platform code decide what to do with it, i.e. check that 200 * auto-idle is enabled in the clock controller and if we are stopping 201 * the clock, do the same with any parent clocks that had only one 202 * user keeping them on and auto-idle enabled. */ 203 } 204 } 205 206 void soc_dma_set_request(struct soc_dma_ch_s *ch, int level) 207 { 208 struct dma_s *dma = (struct dma_s *) ch->dma; 209 210 dma->enabled_count += level - ch->enable; 211 212 if (level) 213 dma->ch_enable_mask |= 1 << ch->num; 214 else 215 dma->ch_enable_mask &= ~(1 << ch->num); 216 217 if (level != ch->enable) { 218 soc_dma_ch_freq_update(dma); 219 ch->enable = level; 220 221 if (!ch->enable) 222 timer_del(ch->timer); 223 else if (!ch->running) 224 soc_dma_ch_run(ch); 225 else 226 soc_dma_ch_schedule(ch, 1); 227 } 228 } 229 230 void soc_dma_reset(struct soc_dma_s *soc) 231 { 232 struct dma_s *s = (struct dma_s *) soc; 233 234 s->soc.drqbmp = 0; 235 s->ch_enable_mask = 0; 236 s->enabled_count = 0; 237 soc_dma_ch_freq_update(s); 238 } 239 240 /* TODO: take a functional-clock argument */ 241 struct soc_dma_s *soc_dma_init(int n) 242 { 243 int i; 244 struct dma_s *s = g_malloc0(sizeof(*s) + n * sizeof(*s->ch)); 245 246 s->chnum = n; 247 s->soc.ch = s->ch; 248 for (i = 0; i < n; i ++) { 249 s->ch[i].dma = &s->soc; 250 s->ch[i].num = i; 251 s->ch[i].timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, soc_dma_ch_run, &s->ch[i]); 252 } 253 254 soc_dma_reset(&s->soc); 255 fifo_size = 0; 256 257 return &s->soc; 258 } 259 260 void soc_dma_port_add_fifo(struct soc_dma_s *soc, hwaddr virt_base, 261 soc_dma_io_t fn, void *opaque, int out) 262 { 263 struct memmap_entry_s *entry; 264 struct dma_s *dma = (struct dma_s *) soc; 265 266 dma->memmap = g_realloc(dma->memmap, sizeof(*entry) * 267 (dma->memmap_size + 1)); 268 entry = soc_dma_lookup(dma, virt_base); 269 270 if (dma->memmap_size) { 271 if (entry->type == soc_dma_port_mem) { 272 if (entry->addr <= virt_base && 273 entry->addr + entry->u.mem.size > virt_base) { 274 error_report("%s: FIFO at %"PRIx64 275 " collides with RAM region at %"PRIx64 276 "-%"PRIx64, __func__, 277 virt_base, entry->addr, 278 (entry->addr + entry->u.mem.size)); 279 exit(-1); 280 } 281 282 if (entry->addr <= virt_base) 283 entry ++; 284 } else 285 while (entry < dma->memmap + dma->memmap_size && 286 entry->addr <= virt_base) { 287 if (entry->addr == virt_base && entry->u.fifo.out == out) { 288 error_report("%s: FIFO at %"PRIx64 289 " collides FIFO at %"PRIx64, 290 __func__, virt_base, entry->addr); 291 exit(-1); 292 } 293 294 entry ++; 295 } 296 297 memmove(entry + 1, entry, 298 (uint8_t *) (dma->memmap + dma->memmap_size ++) - 299 (uint8_t *) entry); 300 } else 301 dma->memmap_size ++; 302 303 entry->addr = virt_base; 304 entry->type = soc_dma_port_fifo; 305 entry->u.fifo.fn = fn; 306 entry->u.fifo.opaque = opaque; 307 entry->u.fifo.out = out; 308 } 309 310 void soc_dma_port_add_mem(struct soc_dma_s *soc, uint8_t *phys_base, 311 hwaddr virt_base, size_t size) 312 { 313 struct memmap_entry_s *entry; 314 struct dma_s *dma = (struct dma_s *) soc; 315 316 dma->memmap = g_realloc(dma->memmap, sizeof(*entry) * 317 (dma->memmap_size + 1)); 318 entry = soc_dma_lookup(dma, virt_base); 319 320 if (dma->memmap_size) { 321 if (entry->type == soc_dma_port_mem) { 322 if ((entry->addr >= virt_base && entry->addr < virt_base + size) || 323 (entry->addr <= virt_base && 324 entry->addr + entry->u.mem.size > virt_base)) { 325 error_report("%s: RAM at %"PRIx64 "-%"PRIx64 326 " collides with RAM region at %"PRIx64 327 "-%"PRIx64, __func__, 328 virt_base, virt_base + size, 329 entry->addr, entry->addr + entry->u.mem.size); 330 exit(-1); 331 } 332 333 if (entry->addr <= virt_base) 334 entry ++; 335 } else { 336 if (entry->addr >= virt_base && 337 entry->addr < virt_base + size) { 338 error_report("%s: RAM at %"PRIx64 "-%"PRIx64 339 " collides with FIFO at %"PRIx64, 340 __func__, virt_base, virt_base + size, 341 entry->addr); 342 exit(-1); 343 } 344 345 while (entry < dma->memmap + dma->memmap_size && 346 entry->addr <= virt_base) 347 entry ++; 348 } 349 350 memmove(entry + 1, entry, 351 (uint8_t *) (dma->memmap + dma->memmap_size ++) - 352 (uint8_t *) entry); 353 } else 354 dma->memmap_size ++; 355 356 entry->addr = virt_base; 357 entry->type = soc_dma_port_mem; 358 entry->u.mem.base = phys_base; 359 entry->u.mem.size = size; 360 } 361 362 /* TODO: port removal for ports like PCMCIA memory */ 363