Lines Matching full:entry

62     MapCacheEntry *entry;  member
130 mc->entry = g_malloc0(size); in xen_map_cache_init_single()
187 MapCacheEntry *entry, in xen_remap_bucket() argument
212 if (entry->vaddr_base != NULL) { in xen_remap_bucket()
213 if (!(entry->flags & XEN_MAPCACHE_ENTRY_DUMMY)) { in xen_remap_bucket()
214 ram_block_notify_remove(entry->vaddr_base, entry->size, in xen_remap_bucket()
215 entry->size); in xen_remap_bucket()
219 * If an entry is being replaced by another mapping and we're using in xen_remap_bucket()
228 assert(!vaddr || (entry->vaddr_base == vaddr && entry->size == size)); in xen_remap_bucket()
230 if (!vaddr && munmap(entry->vaddr_base, entry->size) != 0) { in xen_remap_bucket()
235 g_free(entry->valid_mapping); in xen_remap_bucket()
236 entry->valid_mapping = NULL; in xen_remap_bucket()
250 entry->flags &= ~XEN_MAPCACHE_ENTRY_GRANT; in xen_remap_bucket()
260 entry->flags |= XEN_MAPCACHE_ENTRY_GRANT; in xen_remap_bucket()
299 if (!(entry->flags & XEN_MAPCACHE_ENTRY_DUMMY)) { in xen_remap_bucket()
303 entry->vaddr_base = vaddr_base; in xen_remap_bucket()
304 entry->paddr_index = address_index; in xen_remap_bucket()
305 entry->size = size; in xen_remap_bucket()
306 entry->valid_mapping = g_new0(unsigned long, in xen_remap_bucket()
310 entry->flags |= XEN_MAPCACHE_ENTRY_DUMMY; in xen_remap_bucket()
312 entry->flags &= ~(XEN_MAPCACHE_ENTRY_DUMMY); in xen_remap_bucket()
315 bitmap_zero(entry->valid_mapping, nb_pfn); in xen_remap_bucket()
318 bitmap_set(entry->valid_mapping, i, 1); in xen_remap_bucket()
329 MapCacheEntry *entry, *pentry = NULL, in xen_map_cache_unlocked() local
377 entry = &mc->entry[address_index % mc->nr_buckets]; in xen_map_cache_unlocked()
379 while (entry && (lock || entry->lock) && entry->vaddr_base && in xen_map_cache_unlocked()
380 (entry->paddr_index != address_index || entry->size != cache_size || in xen_map_cache_unlocked()
383 entry->valid_mapping))) { in xen_map_cache_unlocked()
384 if (!free_entry && !entry->lock) { in xen_map_cache_unlocked()
385 free_entry = entry; in xen_map_cache_unlocked()
388 pentry = entry; in xen_map_cache_unlocked()
389 entry = entry->next; in xen_map_cache_unlocked()
391 if (!entry && free_entry) { in xen_map_cache_unlocked()
392 entry = free_entry; in xen_map_cache_unlocked()
395 if (!entry) { in xen_map_cache_unlocked()
396 entry = g_new0(MapCacheEntry, 1); in xen_map_cache_unlocked()
397 pentry->next = entry; in xen_map_cache_unlocked()
398 xen_remap_bucket(mc, entry, NULL, cache_size, address_index, dummy, in xen_map_cache_unlocked()
400 } else if (!entry->lock) { in xen_map_cache_unlocked()
401 if (!entry->vaddr_base || entry->paddr_index != address_index || in xen_map_cache_unlocked()
402 entry->size != cache_size || in xen_map_cache_unlocked()
405 entry->valid_mapping)) { in xen_map_cache_unlocked()
406 xen_remap_bucket(mc, entry, NULL, cache_size, address_index, dummy, in xen_map_cache_unlocked()
413 entry->valid_mapping)) { in xen_map_cache_unlocked()
430 mc->last_entry = entry; in xen_map_cache_unlocked()
433 entry->lock++; in xen_map_cache_unlocked()
434 if (entry->lock == 0) { in xen_map_cache_unlocked()
435 error_report("mapcache entry lock overflow: "HWADDR_FMT_plx" -> %p", in xen_map_cache_unlocked()
436 entry->paddr_index, entry->vaddr_base); in xen_map_cache_unlocked()
442 reventry->size = entry->size; in xen_map_cache_unlocked()
483 MapCacheEntry *entry = NULL; in xen_ram_addr_from_mapcache_single() local
505 entry = &mc->entry[paddr_index % mc->nr_buckets]; in xen_ram_addr_from_mapcache_single()
506 while (entry && (entry->paddr_index != paddr_index || entry->size != size)) { in xen_ram_addr_from_mapcache_single()
507 entry = entry->next; in xen_ram_addr_from_mapcache_single()
509 if (!entry) { in xen_ram_addr_from_mapcache_single()
514 ((unsigned long) ptr - (unsigned long) entry->vaddr_base); in xen_ram_addr_from_mapcache_single()
535 MapCacheEntry *entry = NULL, *pentry = NULL; in xen_invalidate_map_cache_entry_unlocked() local
568 entry = &mc->entry[paddr_index % mc->nr_buckets]; in xen_invalidate_map_cache_entry_unlocked()
569 while (entry && (entry->paddr_index != paddr_index || entry->size != size)) { in xen_invalidate_map_cache_entry_unlocked()
570 pentry = entry; in xen_invalidate_map_cache_entry_unlocked()
571 entry = entry->next; in xen_invalidate_map_cache_entry_unlocked()
573 if (!entry) { in xen_invalidate_map_cache_entry_unlocked()
577 entry->lock--; in xen_invalidate_map_cache_entry_unlocked()
578 if (entry->lock > 0) { in xen_invalidate_map_cache_entry_unlocked()
582 ram_block_notify_remove(entry->vaddr_base, entry->size, entry->size); in xen_invalidate_map_cache_entry_unlocked()
583 if (entry->flags & XEN_MAPCACHE_ENTRY_GRANT) { in xen_invalidate_map_cache_entry_unlocked()
584 rc = xengnttab_unmap(xen_region_gnttabdev, entry->vaddr_base, in xen_invalidate_map_cache_entry_unlocked()
585 entry->size >> mc->bucket_shift); in xen_invalidate_map_cache_entry_unlocked()
587 rc = munmap(entry->vaddr_base, entry->size); in xen_invalidate_map_cache_entry_unlocked()
595 g_free(entry->valid_mapping); in xen_invalidate_map_cache_entry_unlocked()
597 pentry->next = entry->next; in xen_invalidate_map_cache_entry_unlocked()
598 g_free(entry); in xen_invalidate_map_cache_entry_unlocked()
601 * Invalidate mapping but keep entry->next pointing to the rest in xen_invalidate_map_cache_entry_unlocked()
606 entry->paddr_index = 0; in xen_invalidate_map_cache_entry_unlocked()
607 entry->vaddr_base = NULL; in xen_invalidate_map_cache_entry_unlocked()
608 entry->valid_mapping = NULL; in xen_invalidate_map_cache_entry_unlocked()
609 entry->flags = 0; in xen_invalidate_map_cache_entry_unlocked()
610 entry->size = 0; in xen_invalidate_map_cache_entry_unlocked()
671 MapCacheEntry *entry = &mc->entry[i]; in xen_invalidate_map_cache_single() local
673 if (entry->vaddr_base == NULL) { in xen_invalidate_map_cache_single()
676 if (entry->lock > 0) { in xen_invalidate_map_cache_single()
680 if (munmap(entry->vaddr_base, entry->size) != 0) { in xen_invalidate_map_cache_single()
685 entry->paddr_index = 0; in xen_invalidate_map_cache_single()
686 entry->vaddr_base = NULL; in xen_invalidate_map_cache_single()
687 entry->size = 0; in xen_invalidate_map_cache_single()
688 g_free(entry->valid_mapping); in xen_invalidate_map_cache_single()
689 entry->valid_mapping = NULL; in xen_invalidate_map_cache_single()
711 MapCacheEntry *entry; in xen_replace_cache_entry_unlocked() local
729 entry = &mc->entry[address_index % mc->nr_buckets]; in xen_replace_cache_entry_unlocked()
730 while (entry && !(entry->paddr_index == address_index && in xen_replace_cache_entry_unlocked()
731 entry->size == cache_size)) { in xen_replace_cache_entry_unlocked()
732 entry = entry->next; in xen_replace_cache_entry_unlocked()
734 if (!entry) { in xen_replace_cache_entry_unlocked()
739 assert((entry->flags & XEN_MAPCACHE_ENTRY_GRANT) == 0); in xen_replace_cache_entry_unlocked()
746 xen_remap_bucket(mc, entry, entry->vaddr_base, in xen_replace_cache_entry_unlocked()
751 entry->valid_mapping)) { in xen_replace_cache_entry_unlocked()
758 return entry->vaddr_base + address_offset; in xen_replace_cache_entry_unlocked()