Lines Matching +full:grant +full:- +full:dma
5 * the COPYING file in the top-level directory.
7 * Contributions after 2012-01-13 are licensed under the terms of the
13 #include "qemu/error-report.h"
17 #include "hw/xen/xen-hvm-common.h"
22 #include "system/xen-mapcache.h"
58 bool dma; member
84 qemu_mutex_lock(&mc->lock); in mapcache_lock()
89 qemu_mutex_unlock(&mc->lock); in mapcache_unlock()
113 mc->phys_offset_to_gaddr = f; in xen_map_cache_init_single()
114 mc->opaque = opaque; in xen_map_cache_init_single()
115 qemu_mutex_init(&mc->lock); in xen_map_cache_init_single()
117 QTAILQ_INIT(&mc->locked_entries); in xen_map_cache_init_single()
119 mc->bucket_shift = bucket_shift; in xen_map_cache_init_single()
120 mc->bucket_size = 1UL << bucket_shift; in xen_map_cache_init_single()
121 mc->max_mcache_size = max_size; in xen_map_cache_init_single()
123 mc->nr_buckets = in xen_map_cache_init_single()
124 (((mc->max_mcache_size >> XC_PAGE_SHIFT) + in xen_map_cache_init_single()
125 (1UL << (bucket_shift - XC_PAGE_SHIFT)) - 1) >> in xen_map_cache_init_single()
126 (bucket_shift - XC_PAGE_SHIFT)); in xen_map_cache_init_single()
128 size = mc->nr_buckets * sizeof(MapCacheEntry); in xen_map_cache_init_single()
129 size = (size + XC_PAGE_SIZE - 1) & ~(XC_PAGE_SIZE - 1); in xen_map_cache_init_single()
130 trace_xen_map_cache_init(mc->nr_buckets, size); in xen_map_cache_init_single()
131 mc->entry = g_malloc0(size); in xen_map_cache_init_single()
166 max_mcache_size = rlimit_as.rlim_max - NON_MCACHE_MEMORY_SIZE; in xen_map_cache_init()
177 * Grant mappings must use XC_PAGE_SIZE granularity since we can't in xen_map_cache_init()
196 bool grant, in xen_remap_bucket() argument
209 if (grant) { in xen_remap_bucket()
216 if (entry->vaddr_base != NULL) { in xen_remap_bucket()
217 if (!(entry->flags & XEN_MAPCACHE_ENTRY_DUMMY)) { in xen_remap_bucket()
218 ram_block_notify_remove(entry->vaddr_base, entry->size, in xen_remap_bucket()
219 entry->size); in xen_remap_bucket()
224 * MAP_FIXED flag for it - there is possibility of a race for vaddr in xen_remap_bucket()
230 * Non-identical replacements are not allowed therefore. in xen_remap_bucket()
232 assert(!vaddr || (entry->vaddr_base == vaddr && entry->size == size)); in xen_remap_bucket()
234 if (!vaddr && munmap(entry->vaddr_base, entry->size) != 0) { in xen_remap_bucket()
236 exit(-1); in xen_remap_bucket()
239 g_free(entry->valid_mapping); in xen_remap_bucket()
240 entry->valid_mapping = NULL; in xen_remap_bucket()
242 if (grant) { in xen_remap_bucket()
243 hwaddr grant_base = address_index - (ram_offset >> XC_PAGE_SHIFT); in xen_remap_bucket()
250 pfns[i] = (address_index << (mc->bucket_shift - XC_PAGE_SHIFT)) + i; in xen_remap_bucket()
254 entry->flags &= ~XEN_MAPCACHE_ENTRY_GRANT; in xen_remap_bucket()
257 if (grant) { in xen_remap_bucket()
264 entry->flags |= XEN_MAPCACHE_ENTRY_GRANT; in xen_remap_bucket()
285 perror(grant ? "xengnttab_map_domain_grant_refs" in xen_remap_bucket()
287 exit(-1); in xen_remap_bucket()
296 -1, 0); in xen_remap_bucket()
299 exit(-1); in xen_remap_bucket()
303 if (!(entry->flags & XEN_MAPCACHE_ENTRY_DUMMY)) { in xen_remap_bucket()
307 entry->vaddr_base = vaddr_base; in xen_remap_bucket()
308 entry->paddr_index = address_index; in xen_remap_bucket()
309 entry->size = size; in xen_remap_bucket()
310 entry->valid_mapping = g_new0(unsigned long, in xen_remap_bucket()
314 entry->flags |= XEN_MAPCACHE_ENTRY_DUMMY; in xen_remap_bucket()
316 entry->flags &= ~(XEN_MAPCACHE_ENTRY_DUMMY); in xen_remap_bucket()
319 bitmap_zero(entry->valid_mapping, nb_pfn); in xen_remap_bucket()
322 bitmap_set(entry->valid_mapping, i, 1); in xen_remap_bucket()
330 uint8_t lock, bool dma, in xen_map_cache_unlocked() argument
331 bool grant, bool is_write) in xen_map_cache_unlocked() argument
343 address_index = phys_addr >> mc->bucket_shift; in xen_map_cache_unlocked()
344 address_offset = phys_addr & (mc->bucket_size - 1); in xen_map_cache_unlocked()
350 test_bit_size = size + (phys_addr & (XC_PAGE_SIZE - 1)); in xen_map_cache_unlocked()
353 test_bit_size += XC_PAGE_SIZE - (test_bit_size % XC_PAGE_SIZE); in xen_map_cache_unlocked()
359 if (mc->last_entry != NULL && in xen_map_cache_unlocked()
360 mc->last_entry->paddr_index == address_index && in xen_map_cache_unlocked()
364 mc->last_entry->valid_mapping)) { in xen_map_cache_unlocked()
366 mc->last_entry->vaddr_base + address_offset in xen_map_cache_unlocked()
368 return mc->last_entry->vaddr_base + address_offset; in xen_map_cache_unlocked()
371 /* size is always a multiple of mc->bucket_size */ in xen_map_cache_unlocked()
374 if (cache_size % mc->bucket_size) { in xen_map_cache_unlocked()
375 cache_size += mc->bucket_size - (cache_size % mc->bucket_size); in xen_map_cache_unlocked()
378 cache_size = mc->bucket_size; in xen_map_cache_unlocked()
381 entry = &mc->entry[address_index % mc->nr_buckets]; in xen_map_cache_unlocked()
383 while (entry && (!entry->vaddr_base || in xen_map_cache_unlocked()
384 entry->paddr_index != address_index || entry->size != cache_size || in xen_map_cache_unlocked()
387 entry->valid_mapping))) { in xen_map_cache_unlocked()
388 if (!free_entry && (!entry->lock || !entry->vaddr_base)) { in xen_map_cache_unlocked()
393 entry = entry->next; in xen_map_cache_unlocked()
401 pentry->next = entry; in xen_map_cache_unlocked()
403 grant, is_write, ram_offset); in xen_map_cache_unlocked()
404 } else if (!entry->lock) { in xen_map_cache_unlocked()
405 if (!entry->vaddr_base || entry->paddr_index != address_index || in xen_map_cache_unlocked()
406 entry->size != cache_size || in xen_map_cache_unlocked()
409 entry->valid_mapping)) { in xen_map_cache_unlocked()
411 grant, is_write, ram_offset); in xen_map_cache_unlocked()
417 entry->valid_mapping)) { in xen_map_cache_unlocked()
418 mc->last_entry = NULL; in xen_map_cache_unlocked()
420 if (!translated && mc->phys_offset_to_gaddr) { in xen_map_cache_unlocked()
421 phys_addr = mc->phys_offset_to_gaddr(phys_addr, size); in xen_map_cache_unlocked()
434 mc->last_entry = entry; in xen_map_cache_unlocked()
437 entry->lock++; in xen_map_cache_unlocked()
438 if (entry->lock == 0) { in xen_map_cache_unlocked()
439 error_report("mapcache entry lock overflow: "HWADDR_FMT_plx" -> %p", in xen_map_cache_unlocked()
440 entry->paddr_index, entry->vaddr_base); in xen_map_cache_unlocked()
443 reventry->dma = dma; in xen_map_cache_unlocked()
444 reventry->vaddr_req = mc->last_entry->vaddr_base + address_offset; in xen_map_cache_unlocked()
445 reventry->paddr_index = mc->last_entry->paddr_index; in xen_map_cache_unlocked()
446 reventry->size = entry->size; in xen_map_cache_unlocked()
447 QTAILQ_INSERT_HEAD(&mc->locked_entries, reventry, next); in xen_map_cache_unlocked()
451 mc->last_entry->vaddr_base + address_offset in xen_map_cache_unlocked()
453 return mc->last_entry->vaddr_base + address_offset; in xen_map_cache_unlocked()
459 uint8_t lock, bool dma, in xen_map_cache() argument
462 bool grant = xen_mr_is_grants(mr); in xen_map_cache() local
466 if (grant) { in xen_map_cache()
470 if (grant && !lock) { in xen_map_cache()
478 error_report("Tried to access a grant reference without mapping it."); in xen_map_cache()
484 lock, dma, grant, is_write); in xen_map_cache()
499 QTAILQ_FOREACH(reventry, &mc->locked_entries, next) { in xen_ram_addr_from_mapcache_single()
500 if (reventry->vaddr_req == ptr) { in xen_ram_addr_from_mapcache_single()
501 paddr_index = reventry->paddr_index; in xen_ram_addr_from_mapcache_single()
502 size = reventry->size; in xen_ram_addr_from_mapcache_single()
513 entry = &mc->entry[paddr_index % mc->nr_buckets]; in xen_ram_addr_from_mapcache_single()
514 while (entry && (entry->paddr_index != paddr_index || entry->size != size)) { in xen_ram_addr_from_mapcache_single()
515 entry = entry->next; in xen_ram_addr_from_mapcache_single()
521 raddr = (reventry->paddr_index << mc->bucket_shift) + in xen_ram_addr_from_mapcache_single()
522 ((unsigned long) ptr - (unsigned long) entry->vaddr_base); in xen_ram_addr_from_mapcache_single()
553 QTAILQ_FOREACH(reventry, &mc->locked_entries, next) { in xen_invalidate_map_cache_entry_unlocked()
554 if (reventry->vaddr_req == buffer) { in xen_invalidate_map_cache_entry_unlocked()
555 paddr_index = reventry->paddr_index; in xen_invalidate_map_cache_entry_unlocked()
556 size = reventry->size; in xen_invalidate_map_cache_entry_unlocked()
563 QTAILQ_FOREACH(reventry, &mc->locked_entries, next) { in xen_invalidate_map_cache_entry_unlocked()
565 reventry->paddr_index, in xen_invalidate_map_cache_entry_unlocked()
566 reventry->vaddr_req in xen_invalidate_map_cache_entry_unlocked()
571 QTAILQ_REMOVE(&mc->locked_entries, reventry, next); in xen_invalidate_map_cache_entry_unlocked()
574 if (mc->last_entry != NULL && in xen_invalidate_map_cache_entry_unlocked()
575 mc->last_entry->paddr_index == paddr_index) { in xen_invalidate_map_cache_entry_unlocked()
576 mc->last_entry = NULL; in xen_invalidate_map_cache_entry_unlocked()
579 entry = &mc->entry[paddr_index % mc->nr_buckets]; in xen_invalidate_map_cache_entry_unlocked()
580 while (entry && (entry->paddr_index != paddr_index || entry->size != size)) { in xen_invalidate_map_cache_entry_unlocked()
582 entry = entry->next; in xen_invalidate_map_cache_entry_unlocked()
588 entry->lock--; in xen_invalidate_map_cache_entry_unlocked()
589 if (entry->lock > 0) { in xen_invalidate_map_cache_entry_unlocked()
593 ram_block_notify_remove(entry->vaddr_base, entry->size, entry->size); in xen_invalidate_map_cache_entry_unlocked()
594 if (entry->flags & XEN_MAPCACHE_ENTRY_GRANT) { in xen_invalidate_map_cache_entry_unlocked()
595 rc = xengnttab_unmap(xen_region_gnttabdev, entry->vaddr_base, in xen_invalidate_map_cache_entry_unlocked()
596 entry->size >> mc->bucket_shift); in xen_invalidate_map_cache_entry_unlocked()
598 rc = munmap(entry->vaddr_base, entry->size); in xen_invalidate_map_cache_entry_unlocked()
603 exit(-1); in xen_invalidate_map_cache_entry_unlocked()
606 g_free(entry->valid_mapping); in xen_invalidate_map_cache_entry_unlocked()
608 pentry->next = entry->next; in xen_invalidate_map_cache_entry_unlocked()
612 * Invalidate mapping but keep entry->next pointing to the rest in xen_invalidate_map_cache_entry_unlocked()
617 entry->paddr_index = 0; in xen_invalidate_map_cache_entry_unlocked()
618 entry->vaddr_base = NULL; in xen_invalidate_map_cache_entry_unlocked()
619 entry->valid_mapping = NULL; in xen_invalidate_map_cache_entry_unlocked()
620 entry->flags = 0; in xen_invalidate_map_cache_entry_unlocked()
621 entry->size = 0; in xen_invalidate_map_cache_entry_unlocked()
648 xen_invalidate_map_cache_entry_all(data->buffer); in xen_invalidate_map_cache_entry_bh()
649 aio_co_wake(data->co); in xen_invalidate_map_cache_entry_bh()
674 QTAILQ_FOREACH(reventry, &mc->locked_entries, next) { in xen_invalidate_map_cache_single()
675 if (!reventry->dma) { in xen_invalidate_map_cache_single()
678 trace_xen_invalidate_map_cache(reventry->paddr_index, in xen_invalidate_map_cache_single()
679 reventry->vaddr_req); in xen_invalidate_map_cache_single()
682 for (i = 0; i < mc->nr_buckets; i++) { in xen_invalidate_map_cache_single()
683 MapCacheEntry *entry = &mc->entry[i]; in xen_invalidate_map_cache_single()
685 if (entry->vaddr_base == NULL) { in xen_invalidate_map_cache_single()
688 if (entry->lock > 0) { in xen_invalidate_map_cache_single()
692 if (munmap(entry->vaddr_base, entry->size) != 0) { in xen_invalidate_map_cache_single()
694 exit(-1); in xen_invalidate_map_cache_single()
697 entry->paddr_index = 0; in xen_invalidate_map_cache_single()
698 entry->vaddr_base = NULL; in xen_invalidate_map_cache_single()
699 entry->size = 0; in xen_invalidate_map_cache_single()
700 g_free(entry->valid_mapping); in xen_invalidate_map_cache_single()
701 entry->valid_mapping = NULL; in xen_invalidate_map_cache_single()
704 mc->last_entry = NULL; in xen_invalidate_map_cache_single()
726 address_index = old_phys_addr >> mc->bucket_shift; in xen_replace_cache_entry_unlocked()
727 address_offset = old_phys_addr & (mc->bucket_size - 1); in xen_replace_cache_entry_unlocked()
731 test_bit_size = size + (old_phys_addr & (XC_PAGE_SIZE - 1)); in xen_replace_cache_entry_unlocked()
733 test_bit_size += XC_PAGE_SIZE - (test_bit_size % XC_PAGE_SIZE); in xen_replace_cache_entry_unlocked()
736 if (cache_size % mc->bucket_size) { in xen_replace_cache_entry_unlocked()
737 cache_size += mc->bucket_size - (cache_size % mc->bucket_size); in xen_replace_cache_entry_unlocked()
740 entry = &mc->entry[address_index % mc->nr_buckets]; in xen_replace_cache_entry_unlocked()
741 while (entry && !(entry->paddr_index == address_index && in xen_replace_cache_entry_unlocked()
742 entry->size == cache_size)) { in xen_replace_cache_entry_unlocked()
743 entry = entry->next; in xen_replace_cache_entry_unlocked()
750 assert((entry->flags & XEN_MAPCACHE_ENTRY_GRANT) == 0); in xen_replace_cache_entry_unlocked()
752 address_index = new_phys_addr >> mc->bucket_shift; in xen_replace_cache_entry_unlocked()
753 address_offset = new_phys_addr & (mc->bucket_size - 1); in xen_replace_cache_entry_unlocked()
757 xen_remap_bucket(mc, entry, entry->vaddr_base, in xen_replace_cache_entry_unlocked()
762 entry->valid_mapping)) { in xen_replace_cache_entry_unlocked()
769 return entry->vaddr_base + address_offset; in xen_replace_cache_entry_unlocked()