Home
last modified time | relevance | path

Searched refs:mapping (Results 101 – 125 of 1578) sorted by relevance

12345678910>>...64

/openbmc/linux/drivers/gpu/drm/tegra/
H A Dsubmit.c148 struct tegra_drm_mapping *mapping; in tegra_drm_mapping_get() local
152 mapping = xa_load(&context->mappings, id); in tegra_drm_mapping_get()
153 if (mapping) in tegra_drm_mapping_get()
154 kref_get(&mapping->ref); in tegra_drm_mapping_get()
158 return mapping; in tegra_drm_mapping_get()
282 struct tegra_drm_mapping *mapping; in submit_process_bufs() local
290 mapping = tegra_drm_mapping_get(context, buf->mapping); in submit_process_bufs()
291 if (!mapping) { in submit_process_bufs()
299 tegra_drm_mapping_put(mapping); in submit_process_bufs()
303 mappings[i].mapping = mapping; in submit_process_bufs()
[all …]
/openbmc/linux/drivers/net/xen-netback/
H A Dhash.c329 memset(vif->hash.mapping[vif->hash.mapping_sel], 0, in xenvif_set_hash_mapping_size()
338 u32 *mapping = vif->hash.mapping[!vif->hash.mapping_sel]; in xenvif_set_hash_mapping() local
344 .len = len * sizeof(*mapping), in xenvif_set_hash_mapping()
349 len > XEN_PAGE_SIZE / sizeof(*mapping)) in xenvif_set_hash_mapping()
352 copy_op[0].dest.u.gmfn = virt_to_gfn(mapping + off); in xenvif_set_hash_mapping()
353 copy_op[0].dest.offset = xen_offset_in_page(mapping + off); in xenvif_set_hash_mapping()
364 memcpy(mapping, vif->hash.mapping[vif->hash.mapping_sel], in xenvif_set_hash_mapping()
365 vif->hash.size * sizeof(*mapping)); in xenvif_set_hash_mapping()
376 if (mapping[off++] >= vif->num_queues) in xenvif_set_hash_mapping()
432 const u32 *mapping = vif->hash.mapping[vif->hash.mapping_sel]; in xenvif_dump_hash_info() local
[all …]
/openbmc/linux/mm/
H A Dworkingset.c603 struct address_space *mapping; in workingset_update_node() local
693 struct address_space *mapping; in shadow_lru_isolate() local
711 if (!xa_trylock(&mapping->i_pages)) { in shadow_lru_isolate()
718 if (mapping->host != NULL) { in shadow_lru_isolate()
719 if (!spin_trylock(&mapping->host->i_lock)) { in shadow_lru_isolate()
720 xa_unlock(&mapping->i_pages); in shadow_lru_isolate()
745 xa_unlock_irq(&mapping->i_pages); in shadow_lru_isolate()
746 if (mapping->host != NULL) { in shadow_lru_isolate()
747 if (mapping_shrinkable(mapping)) in shadow_lru_isolate()
748 inode_add_lru(mapping->host); in shadow_lru_isolate()
[all …]
H A Dmapping_dirty_helpers.c263 unsigned long wp_shared_mapping_range(struct address_space *mapping, in wp_shared_mapping_range() argument
268 i_mmap_lock_read(mapping); in wp_shared_mapping_range()
269 WARN_ON(walk_page_mapping(mapping, first_index, nr, &wp_walk_ops, in wp_shared_mapping_range()
271 i_mmap_unlock_read(mapping); in wp_shared_mapping_range()
313 unsigned long clean_record_shared_mapping_range(struct address_space *mapping, in clean_record_shared_mapping_range() argument
329 i_mmap_lock_read(mapping); in clean_record_shared_mapping_range()
330 WARN_ON(walk_page_mapping(mapping, first_index, nr, &clean_walk_ops, in clean_record_shared_mapping_range()
332 i_mmap_unlock_read(mapping); in clean_record_shared_mapping_range()
/openbmc/linux/drivers/net/wwan/iosm/
H A Diosm_ipc_pcie.c454 size_t size, dma_addr_t *mapping, int direction) in ipc_pcie_addr_map() argument
457 *mapping = dma_map_single(&ipc_pcie->pci->dev, data, size, in ipc_pcie_addr_map()
459 if (dma_mapping_error(&ipc_pcie->pci->dev, *mapping)) { in ipc_pcie_addr_map()
468 dma_addr_t mapping, int direction) in ipc_pcie_addr_unmap() argument
470 if (!mapping) in ipc_pcie_addr_unmap()
473 dma_unmap_single(&ipc_pcie->pci->dev, mapping, size, direction); in ipc_pcie_addr_unmap()
491 IPC_CB(skb)->mapping = 0; in ipc_pcie_alloc_local_skb()
497 gfp_t flags, dma_addr_t *mapping, in ipc_pcie_alloc_skb() argument
516 IPC_CB(skb)->mapping = *mapping; in ipc_pcie_alloc_skb()
528 ipc_pcie_addr_unmap(ipc_pcie, IPC_CB(skb)->len, IPC_CB(skb)->mapping, in ipc_pcie_kfree_skb()
[all …]
H A Diosm_ipc_pcie.h89 dma_addr_t mapping; member
122 size_t size, dma_addr_t *mapping, int direction);
132 dma_addr_t mapping, int direction);
146 gfp_t flags, dma_addr_t *mapping,
/openbmc/linux/fs/netfs/
H A Dbuffered_read.c25 XA_STATE(xas, &rreq->mapping->i_pages, start_page); in netfs_rreq_unlock_folios()
168 struct netfs_inode *ctx = netfs_inode(ractl->mapping->host); in netfs_readahead()
176 rreq = netfs_alloc_request(ractl->mapping, ractl->file, in netfs_readahead()
226 struct address_space *mapping = folio_file_mapping(folio); in netfs_read_folio() local
228 struct netfs_inode *ctx = netfs_inode(mapping->host); in netfs_read_folio()
233 rreq = netfs_alloc_request(mapping, file, in netfs_read_folio()
342 struct file *file, struct address_space *mapping, in netfs_write_begin() argument
351 DEFINE_READAHEAD(ractl, file, NULL, mapping, index); in netfs_write_begin()
354 folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN, in netfs_write_begin()
355 mapping_gfp_mask(mapping)); in netfs_write_begin()
[all …]
/openbmc/linux/drivers/gpu/drm/panfrost/
H A Dpanfrost_perfcnt.c30 struct panfrost_gem_mapping *mapping; member
54 gpuva = pfdev->perfcnt->mapping->mmnode.start << PAGE_SHIFT; in panfrost_perfcnt_dump_locked()
102 perfcnt->mapping = panfrost_gem_mapping_get(to_panfrost_bo(&bo->base), in panfrost_perfcnt_enable_locked()
104 if (!perfcnt->mapping) { in panfrost_perfcnt_enable_locked()
133 as = panfrost_mmu_as_get(pfdev, perfcnt->mapping->mmu); in panfrost_perfcnt_enable_locked()
170 panfrost_gem_mapping_put(perfcnt->mapping); in panfrost_perfcnt_enable_locked()
198 drm_gem_vunmap_unlocked(&perfcnt->mapping->obj->base.base, &map); in panfrost_perfcnt_disable_locked()
200 panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv); in panfrost_perfcnt_disable_locked()
201 panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu); in panfrost_perfcnt_disable_locked()
202 panfrost_gem_mapping_put(perfcnt->mapping); in panfrost_perfcnt_disable_locked()
[all …]
/openbmc/linux/tools/testing/selftests/arm64/mte/
H A Dmte_common_util.c123 static void *__mte_allocate_memory_range(size_t size, int mem_type, int mapping, in __mte_allocate_memory_range() argument
146 map_flag = mapping; in __mte_allocate_memory_range()
149 if (!(mapping & MAP_SHARED)) in __mte_allocate_memory_range()
168 void *mte_allocate_memory_tag_range(size_t size, int mem_type, int mapping, in mte_allocate_memory_tag_range() argument
171 return __mte_allocate_memory_range(size, mem_type, mapping, range_before, in mte_allocate_memory_tag_range()
175 void *mte_allocate_memory(size_t size, int mem_type, int mapping, bool tags) in mte_allocate_memory() argument
177 return __mte_allocate_memory_range(size, mem_type, mapping, 0, 0, tags, -1); in mte_allocate_memory()
180 void *mte_allocate_file_memory(size_t size, int mem_type, int mapping, bool tags, int fd) in mte_allocate_file_memory() argument
202 return __mte_allocate_memory_range(size, mem_type, mapping, 0, 0, tags, fd); in mte_allocate_file_memory()
205 void *mte_allocate_file_memory_tag_range(size_t size, int mem_type, int mapping, in mte_allocate_file_memory_tag_range() argument
[all …]
/openbmc/linux/fs/bfs/
H A Dfile.c163 static void bfs_write_failed(struct address_space *mapping, loff_t to) in bfs_write_failed() argument
165 struct inode *inode = mapping->host; in bfs_write_failed()
171 static int bfs_write_begin(struct file *file, struct address_space *mapping, in bfs_write_begin() argument
177 ret = block_write_begin(mapping, pos, len, pagep, bfs_get_block); in bfs_write_begin()
179 bfs_write_failed(mapping, pos + len); in bfs_write_begin()
184 static sector_t bfs_bmap(struct address_space *mapping, sector_t block) in bfs_bmap() argument
186 return generic_block_bmap(mapping, block, bfs_get_block); in bfs_bmap()
/openbmc/linux/drivers/gpu/drm/etnaviv/
H A Detnaviv_cmdbuf.c65 struct etnaviv_vram_mapping *mapping, in etnaviv_cmdbuf_suballoc_map() argument
68 return etnaviv_iommu_get_suballoc_va(context, mapping, memory_base, in etnaviv_cmdbuf_suballoc_map()
73 struct etnaviv_vram_mapping *mapping) in etnaviv_cmdbuf_suballoc_unmap() argument
75 etnaviv_iommu_put_suballoc_va(context, mapping); in etnaviv_cmdbuf_suballoc_unmap()
137 struct etnaviv_vram_mapping *mapping) in etnaviv_cmdbuf_get_va() argument
139 return mapping->iova + buf->suballoc_offset; in etnaviv_cmdbuf_get_va()
H A Detnaviv_mmu.h91 struct etnaviv_vram_mapping *mapping, u64 va);
93 struct etnaviv_vram_mapping *mapping);
94 void etnaviv_iommu_reap_mapping(struct etnaviv_vram_mapping *mapping);
97 struct etnaviv_vram_mapping *mapping,
101 struct etnaviv_vram_mapping *mapping);
/openbmc/linux/arch/csky/abiv1/
H A Dcacheflush.c20 struct address_space *mapping; in flush_dcache_folio() local
25 mapping = folio_flush_mapping(folio); in flush_dcache_folio()
27 if (mapping && !folio_mapped(folio)) in flush_dcache_folio()
31 if (mapping) in flush_dcache_folio()
/openbmc/linux/Documentation/riscv/
H A Dboot.rst14 mapping is set up.
40 PMPs, in the direct mapping, so the firmware must correctly mark those regions
117 Virtual mapping installation
124 at this point. When establishing this mapping, no allocation can be done
128 2. ``setup_vm_final()`` creates the final kernel mapping in ``swapper_pg_dir``
130 mapping. When establishing this mapping, the kernel can allocate memory but
139 those macros before the final virtual mapping is installed must be carefully
142 Devicetree mapping via fixmap
146 by ``setup_vm()``, and used with the mapping established by
154 A few pieces of code need to run before even the first virtual mapping is
[all …]
/openbmc/linux/drivers/nvdimm/
H A Dregion_devs.c714 nd_mapping = &nd_region->mapping[n]; in mappingN()
728 static DEVICE_ATTR_RO(mapping##idx)
1010 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i]; in nd_region_create() local
1011 struct nvdimm *nvdimm = mapping->nvdimm; in nd_region_create()
1013 if ((mapping->start | mapping->size) % PAGE_SIZE) { in nd_region_create()
1053 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i]; in nd_region_create() local
1054 struct nvdimm *nvdimm = mapping->nvdimm; in nd_region_create()
1056 nd_region->mapping[i].nvdimm = nvdimm; in nd_region_create()
1057 nd_region->mapping[i].start = mapping->start; in nd_region_create()
1058 nd_region->mapping[i].size = mapping->size; in nd_region_create()
[all …]
/openbmc/linux/arch/x86/kernel/
H A Djailhouse.c202 void *mapping; in jailhouse_init_platform() local
221 mapping = early_memremap(pa_data, sizeof(header)); in jailhouse_init_platform()
222 memcpy(&header, mapping, sizeof(header)); in jailhouse_init_platform()
223 early_memunmap(mapping, sizeof(header)); in jailhouse_init_platform()
241 mapping = early_memremap(pa_data, setup_data_len); in jailhouse_init_platform()
242 memcpy(&setup_data, mapping, setup_data_len); in jailhouse_init_platform()
243 early_memunmap(mapping, setup_data_len); in jailhouse_init_platform()
/openbmc/linux/fs/ceph/
H A Dcache.h52 static inline int ceph_fscache_dirty_folio(struct address_space *mapping, in ceph_fscache_dirty_folio() argument
55 struct ceph_inode_info *ci = ceph_inode(mapping->host); in ceph_fscache_dirty_folio()
57 return fscache_dirty_folio(mapping, folio, ceph_fscache_cookie(ci)); in ceph_fscache_dirty_folio()
127 static inline int ceph_fscache_dirty_folio(struct address_space *mapping, in ceph_fscache_dirty_folio() argument
130 return filemap_dirty_folio(mapping, folio); in ceph_fscache_dirty_folio()
/openbmc/linux/arch/csky/abiv1/inc/abi/
H A Dcacheflush.h19 #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages) argument
20 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages) argument
/openbmc/linux/fs/freevxfs/
H A Dvxfs_subr.c44 vxfs_get_page(struct address_space *mapping, u_long n) in vxfs_get_page() argument
48 pp = read_mapping_page(mapping, n, NULL); in vxfs_get_page()
149 vxfs_bmap(struct address_space *mapping, sector_t block) in vxfs_bmap() argument
151 return generic_block_bmap(mapping, block, vxfs_getblk); in vxfs_bmap()
/openbmc/linux/Documentation/devicetree/bindings/sound/
H A Dst,sta32x.txt34 - st,ch1-output-mapping: Channel 1 output mapping
35 - st,ch2-output-mapping: Channel 2 output mapping
36 - st,ch3-output-mapping: Channel 3 output mapping
93 st,ch1-output-mapping = /bits/ 8 <0>; // set channel 1 output ch 1
94 st,ch2-output-mapping = /bits/ 8 <0>; // set channel 2 output ch 1
95 st,ch3-output-mapping = /bits/ 8 <0>; // set channel 3 output ch 1
/openbmc/linux/Documentation/arch/ia64/
H A Daliasing.rst30 page with both a cacheable mapping and an uncacheable mapping[1].
76 can referenced safely by an identity mapping.
102 by a kernel identity mapping, the user mapping must use the same
103 attribute as the kernel mapping.
109 machines, this should use an uncacheable mapping as a fallback.
134 the WC mapping is allowed.
136 Otherwise, the user mapping must use the same attribute as the
137 kernel mapping.
143 identity mapping. This is obviously safe for things in
149 mapping. This would not cause attribute aliasing. But
[all …]
/openbmc/linux/Documentation/core-api/
H A Ddma-attributes.rst6 defined in linux/dma-mapping.h.
11 DMA_ATTR_WEAK_ORDERING specifies that reads and writes to the mapping
21 DMA_ATTR_WRITE_COMBINE specifies that writes to the mapping may be
32 virtual mapping for the allocated buffer. On some architectures creating
33 such mapping is non-trivial task and consumes very limited resources
52 having a mapping created separately for each device and is usually
67 device domain after releasing a mapping for it. Use this attribute with
73 By default DMA-mapping subsystem is allowed to assemble the buffer
96 - You know that the DMA mapping is fairly transitory. If you expect
97 the mapping to have a short lifetime then it may be worth it to
[all …]
/openbmc/linux/fs/hugetlbfs/
H A Dinode.c414 struct address_space *mapping, in hugetlbfs_write_begin() argument
495 struct address_space *mapping, in hugetlb_unmap_file_folio() argument
509 i_mmap_lock_write(mapping); in hugetlb_unmap_file_folio()
537 i_mmap_unlock_write(mapping); in hugetlb_unmap_file_folio()
546 i_mmap_lock_write(mapping); in hugetlb_unmap_file_folio()
614 struct address_space *mapping, in remove_inode_single_folio() argument
739 i_mmap_lock_write(mapping); in hugetlb_vmtruncate()
743 i_mmap_unlock_write(mapping); in hugetlb_vmtruncate()
748 struct address_space *mapping, in hugetlbfs_zero_partial_page() argument
792 i_mmap_lock_write(mapping); in hugetlbfs_punch_hole()
[all …]
/openbmc/linux/Documentation/arch/xtensa/
H A Dbooting.rst14 the address must be a valid address in the current mapping. The kernel will
15 not change the mapping on its own.
17 default virtual mapping (0xd0000000..0xffffffff).
20 virtual mapping. It is considered physical if it is within the range of
21 physical addresses covered by the default KSEG mapping (XCHAL_KSEG_PADDR..
/openbmc/qemu/tests/qemu-iotests/tests/
H A Dbackup-discard-source98 mapping = qemu_img_map(temp_img)
99 self.assertEqual(len(mapping), 1)
100 self.assertEqual(mapping[0]['start'], 0)
101 self.assertEqual(mapping[0]['length'], 1024 * 1024)
102 self.assertEqual(mapping[0]['data'], False)

12345678910>>...64