Lines Matching defs:va

758  * All vmap_area objects in this tree are sorted by va->va_start
776 va_size(struct vmap_area *va)
778 return (va->va_end - va->va_start);
784 struct vmap_area *va;
786 va = rb_entry_safe(node, struct vmap_area, rb_node);
787 return va ? va->subtree_max_size : 0;
808 struct vmap_area *va = NULL;
818 va = tmp;
827 return va;
837 struct vmap_area *va;
839 va = rb_entry(n, struct vmap_area, rb_node);
840 if (addr < va->va_start)
842 else if (addr >= va->va_end)
845 return va;
860 find_va_links(struct vmap_area *va,
880 * it link, where the new va->rb_node will be attached to.
890 if (va->va_end <= tmp_va->va_start)
892 else if (va->va_start >= tmp_va->va_end)
896 va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end);
925 __link_va(struct vmap_area *va, struct rb_root *root,
940 rb_link_node(&va->rb_node, parent, link);
944 * to the tree. We do not set va->subtree_max_size to
953 rb_insert_augmented(&va->rb_node,
955 va->subtree_max_size = 0;
957 rb_insert_color(&va->rb_node, root);
961 list_add(&va->list, head);
965 link_va(struct vmap_area *va, struct rb_root *root,
969 __link_va(va, root, parent, link, head, false);
973 link_va_augment(struct vmap_area *va, struct rb_root *root,
977 __link_va(va, root, parent, link, head, true);
981 __unlink_va(struct vmap_area *va, struct rb_root *root, bool augment)
983 if (WARN_ON(RB_EMPTY_NODE(&va->rb_node)))
987 rb_erase_augmented(&va->rb_node,
990 rb_erase(&va->rb_node, root);
992 list_del_init(&va->list);
993 RB_CLEAR_NODE(&va->rb_node);
997 unlink_va(struct vmap_area *va, struct rb_root *root)
999 __unlink_va(va, root, false);
1003 unlink_va_augment(struct vmap_area *va, struct rb_root *root)
1005 __unlink_va(va, root, true);
1013 compute_subtree_max_size(struct vmap_area *va)
1015 return max3(va_size(va),
1016 get_subtree_max_size(va->rb_node.rb_left),
1017 get_subtree_max_size(va->rb_node.rb_right));
1023 struct vmap_area *va;
1026 list_for_each_entry(va, &free_vmap_area_list, list) {
1027 computed_size = compute_subtree_max_size(va);
1028 if (computed_size != va->subtree_max_size)
1030 va_size(va), va->subtree_max_size);
1063 augment_tree_propagate_from(struct vmap_area *va)
1070 free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL);
1078 insert_vmap_area(struct vmap_area *va,
1084 link = find_va_links(va, root, NULL, &parent);
1086 link_va(va, root, parent, link, head);
1090 insert_vmap_area_augment(struct vmap_area *va,
1098 link = find_va_links(va, NULL, from, &parent);
1100 link = find_va_links(va, root, NULL, &parent);
1103 link_va_augment(va, root, parent, link, head);
1104 augment_tree_propagate_from(va);
1120 __merge_or_add_vmap_area(struct vmap_area *va,
1133 link = find_va_links(va, root, NULL, &parent);
1153 if (sibling->va_start == va->va_end) {
1154 sibling->va_start = va->va_start;
1157 kmem_cache_free(vmap_area_cachep, va);
1160 va = sibling;
1174 if (sibling->va_end == va->va_start) {
1183 __unlink_va(va, root, augment);
1185 sibling->va_end = va->va_end;
1188 kmem_cache_free(vmap_area_cachep, va);
1191 va = sibling;
1198 __link_va(va, root, parent, link, head, augment);
1200 return va;
1204 merge_or_add_vmap_area(struct vmap_area *va,
1207 return __merge_or_add_vmap_area(va, root, head, false);
1211 merge_or_add_vmap_area_augment(struct vmap_area *va,
1214 va = __merge_or_add_vmap_area(va, root, head, true);
1215 if (va)
1216 augment_tree_propagate_from(va);
1218 return va;
1222 is_within_this_va(struct vmap_area *va, unsigned long size,
1227 if (va->va_start > vstart)
1228 nva_start_addr = ALIGN(va->va_start, align);
1237 return (nva_start_addr + size <= va->va_end);
1251 struct vmap_area *va;
1262 va = rb_entry(node, struct vmap_area, rb_node);
1265 vstart < va->va_start) {
1268 if (is_within_this_va(va, size, align, vstart))
1269 return va;
1288 va = rb_entry(node, struct vmap_area, rb_node);
1289 if (is_within_this_va(va, size, align, vstart))
1290 return va;
1293 vstart <= va->va_start) {
1300 vstart = va->va_start + 1;
1318 struct vmap_area *va;
1320 list_for_each_entry(va, head, list) {
1321 if (!is_within_this_va(va, size, align, vstart))
1324 return va;
1359 classify_va_fit_type(struct vmap_area *va,
1365 if (nva_start_addr < va->va_start ||
1366 nva_start_addr + size > va->va_end)
1370 if (va->va_start == nva_start_addr) {
1371 if (va->va_end == nva_start_addr + size)
1375 } else if (va->va_end == nva_start_addr + size) {
1386 struct vmap_area *va, unsigned long nva_start_addr,
1390 enum fit_type type = classify_va_fit_type(va, nva_start_addr, size);
1400 unlink_va_augment(va, root);
1401 kmem_cache_free(vmap_area_cachep, va);
1410 va->va_start += size;
1419 va->va_end = nva_start_addr;
1463 lva->va_start = va->va_start;
1469 va->va_start = nva_start_addr + size;
1475 augment_tree_propagate_from(va);
1478 insert_vmap_area_augment(lva, &va->rb_node, root, head);
1495 struct vmap_area *va;
1510 va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size);
1511 if (unlikely(!va))
1514 if (va->va_start > vstart)
1515 nva_start_addr = ALIGN(va->va_start, align);
1524 ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size);
1538 static void free_vmap_area(struct vmap_area *va)
1544 unlink_va(va, &vmap_area_root);
1551 merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list);
1558 struct vmap_area *va = NULL;
1570 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1574 if (va && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, va))
1575 kmem_cache_free(vmap_area_cachep, va);
1588 struct vmap_area *va;
1603 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1604 if (unlikely(!va))
1611 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask);
1628 va->va_start = addr;
1629 va->va_end = addr + size;
1630 va->vm = NULL;
1631 va->flags = va_flags;
1634 insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
1637 BUG_ON(!IS_ALIGNED(va->va_start, align));
1638 BUG_ON(va->va_start < vstart);
1639 BUG_ON(va->va_end > vend);
1643 free_vmap_area(va);
1647 return va;
1668 kmem_cache_free(vmap_area_cachep, va);
1729 struct vmap_area *va, *n_va;
1753 list_for_each_entry_safe(va, n_va, &local_purge_list, list) {
1754 unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
1755 unsigned long orig_start = va->va_start;
1756 unsigned long orig_end = va->va_end;
1763 va = merge_or_add_vmap_area_augment(va, &free_vmap_area_root,
1766 if (!va)
1771 va->va_start, va->va_end);
1817 static void free_vmap_area_noflush(struct vmap_area *va)
1820 unsigned long va_start = va->va_start;
1823 if (WARN_ON_ONCE(!list_empty(&va->list)))
1826 nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
1833 merge_or_add_vmap_area(va,
1839 /* After this point, we may free va at any time */
1847 static void free_unmap_vmap_area(struct vmap_area *va)
1849 flush_cache_vunmap(va->va_start, va->va_end);
1850 vunmap_range_noflush(va->va_start, va->va_end);
1852 flush_tlb_kernel_range(va->va_start, va->va_end);
1854 free_vmap_area_noflush(va);
1859 struct vmap_area *va;
1862 va = __find_vmap_area(addr, &vmap_area_root);
1865 return va;
1870 struct vmap_area *va;
1873 va = __find_vmap_area(addr, &vmap_area_root);
1874 if (va)
1875 unlink_va(va, &vmap_area_root);
1878 return va;
1935 struct vmap_area *va;
2035 struct vmap_area *va;
2048 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
2052 if (IS_ERR(va)) {
2054 return ERR_CAST(va);
2057 vaddr = vmap_block_vaddr(va->va_start, 0);
2059 vb->va = va;
2071 xa = addr_to_vb_xa(va->va_start);
2072 vb_idx = addr_to_vb_idx(va->va_start);
2076 free_vmap_area(va);
2099 xa = addr_to_vb_xa(vb->va->va_start);
2100 tmp = xa_erase(xa, addr_to_vb_idx(vb->va->va_start));
2104 unlink_va(vb->va, &vmap_area_root);
2107 free_vmap_area_noflush(vb->va);
2212 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
2302 unsigned long va_start = vb->va->va_start;
2359 struct vmap_area *va;
2375 va = find_unlink_vmap_area(addr);
2376 if (WARN_ON_ONCE(!va))
2379 debug_check_no_locks_freed((void *)va->va_start,
2380 (va->va_end - va->va_start));
2381 free_unmap_vmap_area(va);
2411 struct vmap_area *va;
2412 va = alloc_vmap_area(size, PAGE_SIZE,
2415 if (IS_ERR(va))
2418 addr = va->va_start;
2559 struct vmap_area *va, unsigned long flags, const void *caller)
2562 vm->addr = (void *)va->va_start;
2563 vm->size = va->va_end - va->va_start;
2565 va->vm = vm;
2568 static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
2572 setup_vmalloc_vm_locked(vm, va, flags, caller);
2592 struct vmap_area *va;
2612 va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0);
2613 if (IS_ERR(va)) {
2618 setup_vmalloc_vm(area, va, flags, caller);
2682 struct vmap_area *va;
2684 va = find_vmap_area((unsigned long)addr);
2685 if (!va)
2688 return va->vm;
2703 struct vmap_area *va;
2712 va = find_unlink_vmap_area((unsigned long)addr);
2713 if (!va || !va->vm)
2715 vm = va->vm;
2722 free_unmap_vmap_area(va);
3690 start = vmap_block_vaddr(vb->va->va_start, rs);
3755 struct vmap_area *va;
3769 va = find_vmap_area_exceed_addr((unsigned long)addr);
3770 if (!va)
3774 if ((unsigned long)addr + remains <= va->va_start)
3777 list_for_each_entry_from(va, &vmap_area_list, list) {
3783 vm = va->vm;
3784 flags = va->flags & VMAP_FLAGS_MASK;
3800 vaddr = (char *) va->va_start;
3801 size = vm ? get_vm_area_size(vm) : va_size(va);
3953 * i.e. va->va_start < addr && va->va_end < addr or NULL
3959 struct vmap_area *va, *tmp;
3963 va = NULL;
3968 va = tmp;
3978 return va;
3984 * @va:
3992 pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align)
3997 if (likely(*va)) {
3998 list_for_each_entry_from_reverse((*va),
4000 addr = min((*va)->va_end & ~(align - 1), vmalloc_end);
4001 if ((*va)->va_start < addr)
4039 struct vmap_area **vas, *va;
4092 va = pvm_find_va_enclose_addr(vmalloc_end);
4093 base = pvm_determine_end_from_reverse(&va, align) - end;
4106 if (va == NULL)
4113 if (base + end > va->va_end) {
4114 base = pvm_determine_end_from_reverse(&va, align) - end;
4122 if (base + start < va->va_start) {
4123 va = node_to_va(rb_prev(&va->rb_node));
4124 base = pvm_determine_end_from_reverse(&va, align) - end;
4139 va = pvm_find_va_enclose_addr(base + end);
4142 /* we've found a fitting base, insert all va's */
4149 va = pvm_find_va_enclose_addr(start);
4150 if (WARN_ON_ONCE(va == NULL))
4156 va, start, size);
4162 va = vas[area];
4163 va->va_start = start;
4164 va->va_end = start + size;
4208 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
4210 if (va)
4212 va->va_start, va->va_end);
4258 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
4260 if (va)
4262 va->va_start, va->va_end);
4295 struct vmap_area *va;
4301 va = __find_vmap_area((unsigned long)objp, &vmap_area_root);
4302 if (!va) {
4307 vm = va->vm;
4372 struct vmap_area *va;
4375 list_for_each_entry(va, &purge_vmap_area_list, list) {
4377 (void *)va->va_start, (void *)va->va_end,
4378 va->va_end - va->va_start);
4385 struct vmap_area *va;
4388 va = list_entry(p, struct vmap_area, list);
4390 if (!va->vm) {
4391 if (va->flags & VMAP_RAM)
4393 (void *)va->va_start, (void *)va->va_end,
4394 va->va_end - va->va_start);
4399 v = va->vm;
4438 if (list_is_last(&va->list, &vmap_area_list))
4467 struct vmap_area *va;
4491 va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
4492 if (WARN_ON_ONCE(!va))
4495 va->va_start = (unsigned long)tmp->addr;
4496 va->va_end = va->va_start + tmp->size;
4497 va->vm = tmp;
4498 insert_vmap_area(va, &vmap_area_root, &vmap_area_list);