Home
last modified time | relevance | path

Searched refs:kmap (Results 1 – 25 of 105) sorted by relevance

12345

/openbmc/linux/Documentation/translations/zh_CN/mm/
H A Dhighmem.rst64 在本地的kmap区域中采取pagefaults是有效的,除非获取本地映射的上下文由于其他原因不允许
76 虽然它比kmap()快得多,但在高内存的情况下,它对指针的有效性有限制。与kmap()映射相反,
81 kmap(),将页面映射到将被使用的同一线程中,并优先使用kmap_local_page()。
98 * kmap()。这应该被用来对单个页面进行短时间的映射,对抢占或迁移没有限制。它会带来开销,
102 映射变化必须广播到所有CPU(核)上,kmap()还需要在kmap的池被回绕(TLB项用光了,需要从第
104 槽出现。因此,kmap()只能从可抢占的上下文中调用。
107 高内存映射都是短暂的,而且只在一个地方使用。这意味着在这种情况下,kmap()的成本大
108 多被浪费了。kmap()并不是为长期映射而设计的,但是它已经朝着这个方向发展了,在较新
111 在64位系统中,调用kmap_local_page()、kmap_atomic()和kmap()没有实际作用,因为64位
/openbmc/linux/tools/perf/util/
H A Dmap.c226 map = calloc(1, sizeof(*map) + (dso->kernel ? sizeof(struct kmap) : 0)); in map__new2()
246 struct kmap *kmap = __map__kmap((struct map *)map); in __map__is_extra_kernel_map() local
248 return kmap && kmap->name[0]; in __map__is_extra_kernel_map()
421 size += sizeof(struct kmap); in map__clone()
531 struct kmap *kmap = __map__kmap(map); in map__rip_2objdump() local
539 if (kmap && is_entry_trampoline(kmap->name) && kmap->kmaps) { in map__rip_2objdump()
540 struct machine *machine = maps__machine(kmap->kmaps); in map__rip_2objdump()
605 struct kmap *__map__kmap(struct map *map) in __map__kmap()
611 return (struct kmap *)(&RC_CHK_ACCESS(map)[1]); in __map__kmap()
614 struct kmap *map__kmap(struct map *map) in map__kmap()
[all …]
H A Dbpf_lock_contention.c57 struct map *kmap; in lock_contention_prepare() local
63 &kmap); in lock_contention_prepare()
77 addrs[con->filters->nr_addrs++] = map__unmap_ip(kmap, sym->start); in lock_contention_prepare()
181 struct map *kmap; in lock_contention_get_name() local
212 sym = machine__find_kernel_symbol(machine, key->lock_addr, &kmap); in lock_contention_get_name()
231 sym = machine__find_kernel_symbol(machine, addr, &kmap); in lock_contention_get_name()
236 offset = map__map_ip(kmap, addr) - sym->start; in lock_contention_get_name()
H A Dsymbol-elf.c1340 static bool ref_reloc_sym_not_found(struct kmap *kmap) in ref_reloc_sym_not_found() argument
1342 return kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name && in ref_reloc_sym_not_found()
1343 !kmap->ref_reloc_sym->unrelocated_addr; in ref_reloc_sym_not_found()
1354 static u64 ref_reloc(struct kmap *kmap) in ref_reloc() argument
1356 if (kmap && kmap->ref_reloc_sym && in ref_reloc()
1357 kmap->ref_reloc_sym->unrelocated_addr) in ref_reloc()
1358 return kmap->ref_reloc_sym->addr - in ref_reloc()
1359 kmap->ref_reloc_sym->unrelocated_addr; in ref_reloc()
1368 struct maps *kmaps, struct kmap *kmap, in dso__process_kernel_symbol() argument
1392 map__set_start(map, shdr->sh_addr + ref_reloc(kmap)); in dso__process_kernel_symbol()
[all …]
H A Dsymbol.c1196 struct kmap *kmap = map__kmap(map); in validate_kcore_addresses() local
1198 if (!kmap) in validate_kcore_addresses()
1201 if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) { in validate_kcore_addresses()
1205 kmap->ref_reloc_sym->name, &start)) in validate_kcore_addresses()
1207 if (start != kmap->ref_reloc_sym->addr) in validate_kcore_addresses()
1421 static int kallsyms__delta(struct kmap *kmap, const char *filename, u64 *delta) in kallsyms__delta() argument
1425 if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name) in kallsyms__delta()
1428 if (kallsyms__get_function_start(filename, kmap->ref_reloc_sym->name, &addr)) in kallsyms__delta()
1431 *delta = addr - kmap->ref_reloc_sym->addr; in kallsyms__delta()
1438 struct kmap *kmap = map__kmap(map); in __dso__load_kallsyms() local
[all …]
H A Dmachine.c1231 struct kmap *kmap; in machine__create_extra_kernel_map() local
1242 kmap = map__kmap(map); in machine__create_extra_kernel_map()
1244 strlcpy(kmap->name, xm->name, KMAP_NAME_LEN); in machine__create_extra_kernel_map()
1250 kmap->name, map__start(map), map__end(map)); in machine__create_extra_kernel_map()
1305 struct kmap *kmap = __map__kmap(map); in machine__map_x86_64_entry_trampolines() local
1307 if (!kmap || !is_entry_trampoline(kmap->name)) in machine__map_x86_64_entry_trampolines()
1370 struct kmap *kmap; in machine__destroy_kernel_maps() local
1376 kmap = map__kmap(map); in machine__destroy_kernel_maps()
1378 if (kmap && kmap->ref_reloc_sym) { in machine__destroy_kernel_maps()
1379 zfree((char **)&kmap->ref_reloc_sym->name); in machine__destroy_kernel_maps()
[all …]
H A Dmap.h38 struct kmap;
40 struct kmap *__map__kmap(struct map *map);
41 struct kmap *map__kmap(struct map *map);
H A Dmaps.c77 struct kmap *kmap = map__kmap(map); in maps__insert() local
79 if (kmap) in maps__insert()
80 kmap->kmaps = maps; in maps__insert()
/openbmc/linux/drivers/net/ethernet/chelsio/inline_crypto/chtls/
H A Dchtls_hw.c166 cdev->kmap.size = num_key_ctx; in chtls_init_kmap()
167 cdev->kmap.available = bsize; in chtls_init_kmap()
168 ksize = sizeof(*cdev->kmap.addr) * bsize; in chtls_init_kmap()
169 cdev->kmap.addr = kvzalloc(ksize, GFP_KERNEL); in chtls_init_kmap()
170 if (!cdev->kmap.addr) in chtls_init_kmap()
173 cdev->kmap.start = lldi->vr->key.start; in chtls_init_kmap()
174 spin_lock_init(&cdev->kmap.lock); in chtls_init_kmap()
189 spin_lock_bh(&cdev->kmap.lock); in get_new_keyid()
190 keyid = find_first_zero_bit(cdev->kmap.addr, cdev->kmap.size); in get_new_keyid()
191 if (keyid < cdev->kmap.size) { in get_new_keyid()
[all …]
/openbmc/linux/tools/perf/arch/x86/util/
H A Devent.c34 struct kmap *kmap; in perf_event__synthesize_extra_kmaps() local
41 kmap = map__kmap(map); in perf_event__synthesize_extra_kmaps()
44 PERF_ALIGN(strlen(kmap->name) + 1, sizeof(u64)) + in perf_event__synthesize_extra_kmaps()
67 strlcpy(event->mmap.filename, kmap->name, PATH_MAX); in perf_event__synthesize_extra_kmaps()
/openbmc/linux/arch/m68k/mm/
H A DMakefile9 obj-$(CONFIG_MMU_MOTOROLA) += kmap.o memory.o motorola.o hwtest.o
11 obj-$(CONFIG_MMU_COLDFIRE) += kmap.o memory.o mcfmmu.o
/openbmc/linux/Documentation/mm/
H A Dhighmem.rst61 These functions should always be used, whereas kmap_atomic() and kmap() have
70 It's valid to take pagefaults in a local kmap region, unless the context
87 While they are significantly faster than kmap(), for the highmem case they
88 come with restrictions about the pointers validity. Contrary to kmap()
95 therefore try to design their code to avoid the use of kmap() by mapping
131 * kmap(). This function has been deprecated; use kmap_local_page().
146 Mapping changes must be propagated across all the CPUs. kmap() also
147 requires global TLB invalidation when the kmap's pool wraps and it might
149 available. Therefore, kmap() is only callable from preemptible context.
154 kmap() is mostly wasted in such cases. kmap() was not intended for long
[all …]
/openbmc/linux/drivers/gpu/drm/nouveau/include/nvkm/core/
H A Dmemory.h40 int (*kmap)(struct nvkm_memory *, struct nvkm_memory **); member
67 #define nvkm_memory_kmap(p,i) ((p)->func->kmap ? (p)->func->kmap((p), (i)) : -ENOSYS)
/openbmc/linux/fs/nilfs2/
H A Dalloc.c532 desc_kaddr = kmap(desc_bh->b_page); in nilfs_palloc_prepare_alloc_entry()
544 bitmap_kaddr = kmap(bitmap_bh->b_page); in nilfs_palloc_prepare_alloc_entry()
619 desc_kaddr = kmap(req->pr_desc_bh->b_page); in nilfs_palloc_commit_free_entry()
622 bitmap_kaddr = kmap(req->pr_bitmap_bh->b_page); in nilfs_palloc_commit_free_entry()
660 desc_kaddr = kmap(req->pr_desc_bh->b_page); in nilfs_palloc_abort_alloc_entry()
663 bitmap_kaddr = kmap(req->pr_bitmap_bh->b_page); in nilfs_palloc_abort_alloc_entry()
768 bitmap_kaddr = kmap(bitmap_bh->b_page); in nilfs_palloc_freev()
H A Difile.h24 void *kaddr = kmap(ibh->b_page); in nilfs_ifile_map_inode()
/openbmc/linux/fs/vboxsf/
H A Dfile.c238 buf = kmap(page); in vboxsf_read_folio()
290 buf = kmap(page); in vboxsf_writepage()
324 buf = kmap(page); in vboxsf_write_end()
/openbmc/linux/fs/erofs/
H A Ddecompressor_lzma.c167 kin = kmap(*rq->in); in z_erofs_lzma_decompress()
227 strm->buf.out = kmap(rq->out[no]) + pageofs; in z_erofs_lzma_decompress()
240 kin = kmap(rq->in[ni]); in z_erofs_lzma_decompress()
/openbmc/linux/net/ceph/
H A Dpagelist.c70 pl->mapped_tail = kmap(page); in ceph_pagelist_addpage()
167 pl->mapped_tail = kmap(page); in ceph_pagelist_truncate()
/openbmc/linux/include/linux/
H A Dhighmem-internal.h40 static inline void *kmap(struct page *page) in kmap() function
167 static inline void *kmap(struct page *page) in kmap() function
/openbmc/linux/drivers/gpu/drm/i915/gt/
H A Dshmem_utils.c110 vaddr = kmap(page); in __shmem_rw()
145 vaddr = kmap(page); in shmem_read_to_iosys_map()
/openbmc/linux/fs/ntfs/
H A Daops.h78 kmap(page); in ntfs_map_page()
/openbmc/linux/mm/
H A Dhighmem.c566 void *kmap; in __kmap_local_page_prot() local
577 kmap = arch_kmap_local_high_get(page); in __kmap_local_page_prot()
578 if (kmap) in __kmap_local_page_prot()
579 return kmap; in __kmap_local_page_prot()
/openbmc/linux/drivers/gpu/drm/nouveau/
H A Dnouveau_bo.h20 struct ttm_bo_kmap_obj kmap; member
107 &nvbo->kmap, &is_iomem); in nvbo_kmap_obj_iovirtual()
/openbmc/linux/tools/perf/
H A Dbuiltin-lock.c524 struct map *kmap; in match_callstack_filter() local
553 sym = machine__find_kernel_symbol(machine, ip, &kmap); in match_callstack_filter()
1068 struct map *kmap; in report_lock_contention_begin_event() local
1086 &kmap); in report_lock_contention_begin_event()
1100 addrs[filters.nr_addrs++] = map__unmap_ip(kmap, sym->start); in report_lock_contention_begin_event()
1112 sym = machine__find_kernel_symbol(machine, key, &kmap); in report_lock_contention_begin_event()
1730 struct map *kmap; in print_lock_stat_stdio() local
1740 sym = machine__find_kernel_symbol(con->machine, ip, &kmap); in print_lock_stat_stdio()
1741 get_symbol_name_offset(kmap, sym, ip, buf, sizeof(buf)); in print_lock_stat_stdio()
1780 struct map *kmap; in print_lock_stat_csv() local
[all …]
/openbmc/linux/fs/freevxfs/
H A Dvxfs_subr.c51 kmap(pp); in vxfs_get_page()

12345