Home
last modified time | relevance | path

Searched full:pages (Results 1 – 25 of 3498) sorted by relevance

12345678910>>...140

/openbmc/qemu/hw/i2c/
H A Dpmbus_device.c232 pmdev->pages = g_new0(PMBusPage, pmdev->num_pages); in pmbus_pages_alloc()
238 if ((pmdev->pages[i].operation & PB_OP_ON) == 0) { in pmbus_check_limits()
242 if (pmdev->pages[i].read_vout > pmdev->pages[i].vout_ov_fault_limit) { in pmbus_check_limits()
243 pmdev->pages[i].status_word |= PB_STATUS_VOUT; in pmbus_check_limits()
244 pmdev->pages[i].status_vout |= PB_STATUS_VOUT_OV_FAULT; in pmbus_check_limits()
247 if (pmdev->pages[i].read_vout > pmdev->pages[i].vout_ov_warn_limit) { in pmbus_check_limits()
248 pmdev->pages[i].status_word |= PB_STATUS_VOUT; in pmbus_check_limits()
249 pmdev->pages[i].status_vout |= PB_STATUS_VOUT_OV_WARN; in pmbus_check_limits()
252 if (pmdev->pages[i].read_vout < pmdev->pages[i].vout_uv_warn_limit) { in pmbus_check_limits()
253 pmdev->pages[i].status_word |= PB_STATUS_VOUT; in pmbus_check_limits()
[all …]
/openbmc/linux/drivers/iommu/iommufd/
H A Dpages.c69 * allocation can hold about 26M of 4k pages and 13G of 2M pages in an
163 static void iopt_pages_add_npinned(struct iopt_pages *pages, size_t npages) in iopt_pages_add_npinned() argument
167 rc = check_add_overflow(pages->npinned, npages, &pages->npinned); in iopt_pages_add_npinned()
169 WARN_ON(rc || pages->npinned > pages->npages); in iopt_pages_add_npinned()
172 static void iopt_pages_sub_npinned(struct iopt_pages *pages, size_t npages) in iopt_pages_sub_npinned() argument
176 rc = check_sub_overflow(pages->npinned, npages, &pages->npinned); in iopt_pages_sub_npinned()
178 WARN_ON(rc || pages->npinned > pages->npages); in iopt_pages_sub_npinned()
181 static void iopt_pages_err_unpin(struct iopt_pages *pages, in iopt_pages_err_unpin() argument
189 iopt_pages_sub_npinned(pages, npages); in iopt_pages_err_unpin()
195 * covers a portion of the first and last pages in the range.
[all …]
H A Dio_pagetable.c23 struct iopt_pages *pages; member
42 if (!iter->area->pages) { in iopt_area_contig_init()
65 !iter->area->pages) { in iopt_area_contig_next()
195 * The area takes a slice of the pages from start_bytes to start_byte + length
198 struct iopt_pages *pages, unsigned long iova, in iopt_insert_area() argument
204 if ((iommu_prot & IOMMU_WRITE) && !pages->writable) in iopt_insert_area()
220 if (WARN_ON(area->pages_node.last >= pages->npages)) in iopt_insert_area()
224 * The area is inserted with a NULL pages indicating it is not fully in iopt_insert_area()
271 (uintptr_t)elm->pages->uptr + elm->start_byte, length); in iopt_alloc_area_pages()
286 * Areas are created with a NULL pages so that the IOVA space is in iopt_alloc_area_pages()
[all …]
/openbmc/linux/Documentation/admin-guide/mm/
H A Dhugetlbpage.rst2 HugeTLB Pages
28 persistent hugetlb pages in the kernel's huge page pool. It also displays
30 and surplus huge pages in the pool of huge pages of default size.
46 is the size of the pool of huge pages.
48 is the number of huge pages in the pool that are not yet
51 is short for "reserved," and is the number of huge pages for
53 but no allocation has yet been made. Reserved huge pages
55 huge page from the pool of huge pages at fault time.
57 is short for "surplus," and is the number of huge pages in
59 maximum number of surplus huge pages is controlled by
[all …]
H A Dzswap.rst8 Zswap is a lightweight compressed cache for swap pages. It takes pages that are
26 Zswap evicts pages from compressed cache on an LRU basis to the backing swap
40 When zswap is disabled at runtime it will stop storing pages that are
42 back into memory all of the pages stored in the compressed pool. The
43 pages stored in zswap will remain in the compressed pool until they are
45 pages out of the compressed pool, a swapoff on the swap device(s) will
46 fault back into memory all swapped out pages, including those in the
52 Zswap receives pages for compression from the swap subsystem and is able to
53 evict pages from its own compressed pool on an LRU basis and write them back to
60 pages are freed. The pool is not preallocated. By default, a zpool
[all …]
H A Dconcepts.rst41 The physical system memory is divided into page frames, or pages. The
48 pages. These mappings are described by page tables that allow
53 addresses of actual pages used by the software. The tables at higher
54 levels contain physical addresses of the pages belonging to the lower
64 Huge Pages
75 Many modern CPU architectures allow mapping of the memory pages
77 it is possible to map 2M and even 1G pages using entries in the second
78 and the third level page tables. In Linux such pages are called
79 `huge`. Usage of huge pages significantly reduces pressure on TLB,
83 memory with the huge pages. The first one is `HugeTLB filesystem`, or
[all …]
H A Didle_page_tracking.rst8 The idle page tracking feature allows to track which memory pages are being
37 Only accesses to user memory pages are tracked. These are pages mapped to a
38 process address space, page cache and buffer pages, swap cache pages. For other
39 page types (e.g. SLAB pages) an attempt to mark a page idle is silently ignored,
40 and hence such pages are never reported idle.
42 For huge pages the idle flag is set only on the head page, so one has to read
43 ``/proc/kpageflags`` in order to correctly count idle huge pages.
50 That said, in order to estimate the amount of pages that are not used by a
53 1. Mark all the workload's pages as idle by setting corresponding bits in
54 ``/sys/kernel/mm/page_idle/bitmap``. The pages can be found by reading
[all …]
H A Dksm.rst18 which have been registered with it, looking for pages of identical
21 content). The amount of pages that KSM daemon scans in a single pass
25 KSM only merges anonymous (private) pages, never pagecache (file) pages.
26 KSM's merged pages were originally locked into kernel memory, but can now
27 be swapped out just like other user pages (but sharing is broken when they
45 to cancel that advice and restore unshared pages: whereupon KSM
55 cannot contain any pages which KSM could actually merge; even if
80 how many pages to scan before ksmd goes to sleep
92 specifies if pages from different NUMA nodes can be merged.
93 When set to 0, ksm merges only pages which physically reside
[all …]
/openbmc/linux/mm/
H A Dpercpu-vm.c23 * pcpu_get_pages - get temp pages array
30 * Pointer to temp pages array on success.
34 static struct page **pages; in pcpu_get_pages() local
35 size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]); in pcpu_get_pages()
39 if (!pages) in pcpu_get_pages()
40 pages = pcpu_mem_zalloc(pages_size, GFP_KERNEL); in pcpu_get_pages()
41 return pages; in pcpu_get_pages()
45 * pcpu_free_pages - free pages which were allocated for @chunk
46 * @chunk: chunk pages were allocated for
47 * @pages: array of pages to be freed, indexed by pcpu_page_idx()
[all …]
H A Dballoon_compaction.c5 * Common interface for making balloon pages movable by compaction.
30 * balloon_page_list_enqueue() - inserts a list of pages into the balloon page
33 * @pages: pages to enqueue - allocated using balloon_page_alloc.
35 * Driver must call this function to properly enqueue balloon pages before
38 * Return: number of pages that were enqueued.
41 struct list_head *pages) in balloon_page_list_enqueue() argument
48 list_for_each_entry_safe(page, tmp, pages, lru) { in balloon_page_list_enqueue()
59 * balloon_page_list_dequeue() - removes pages from balloon's page list and
60 * returns a list of the pages.
62 * @pages: pointer to the list of pages that would be returned to the caller.
[all …]
H A Dgup.c33 static inline void sanity_check_pinned_pages(struct page **pages, in sanity_check_pinned_pages() argument
40 * We only pin anonymous pages if they are exclusive. Once pinned, we in sanity_check_pinned_pages()
44 * We'd like to verify that our pinned anonymous pages are still mapped in sanity_check_pinned_pages()
51 for (; npages; npages--, pages++) { in sanity_check_pinned_pages()
52 struct page *page = *pages; in sanity_check_pinned_pages()
179 * Pages that were pinned via pin_user_pages*() must be released via either
181 * that such pages can be separately tracked and uniquely handled. In
249 * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
250 * @pages: array of pages to be maybe marked dirty, and definitely released.
251 * @npages: number of pages in the @pages array.
[all …]
H A Dgup_test.c10 static void put_back_pages(unsigned int cmd, struct page **pages, in put_back_pages() argument
19 put_page(pages[i]); in put_back_pages()
25 unpin_user_pages(pages, nr_pages); in put_back_pages()
29 unpin_user_pages(pages, nr_pages); in put_back_pages()
32 put_page(pages[i]); in put_back_pages()
39 static void verify_dma_pinned(unsigned int cmd, struct page **pages, in verify_dma_pinned() argument
50 folio = page_folio(pages[i]); in verify_dma_pinned()
53 "pages[%lu] is NOT dma-pinned\n", i)) { in verify_dma_pinned()
59 "pages[%lu] is NOT pinnable but pinned\n", in verify_dma_pinned()
69 static void dump_pages_test(struct gup_test *gup, struct page **pages, in dump_pages_test() argument
[all …]
/openbmc/qemu/hw/sensor/
H A Disl_pmbus_vr.c73 pmdev->pages[i].operation = ISL_OPERATION_DEFAULT; in isl_pmbus_vr_exit_reset()
74 pmdev->pages[i].on_off_config = ISL_ON_OFF_CONFIG_DEFAULT; in isl_pmbus_vr_exit_reset()
75 pmdev->pages[i].vout_mode = ISL_VOUT_MODE_DEFAULT; in isl_pmbus_vr_exit_reset()
76 pmdev->pages[i].vout_command = ISL_VOUT_COMMAND_DEFAULT; in isl_pmbus_vr_exit_reset()
77 pmdev->pages[i].vout_max = ISL_VOUT_MAX_DEFAULT; in isl_pmbus_vr_exit_reset()
78 pmdev->pages[i].vout_margin_high = ISL_VOUT_MARGIN_HIGH_DEFAULT; in isl_pmbus_vr_exit_reset()
79 pmdev->pages[i].vout_margin_low = ISL_VOUT_MARGIN_LOW_DEFAULT; in isl_pmbus_vr_exit_reset()
80 pmdev->pages[i].vout_transition_rate = ISL_VOUT_TRANSITION_RATE_DEFAULT; in isl_pmbus_vr_exit_reset()
81 pmdev->pages[i].vout_ov_fault_limit = ISL_VOUT_OV_FAULT_LIMIT_DEFAULT; in isl_pmbus_vr_exit_reset()
82 pmdev->pages[i].ot_fault_limit = ISL_OT_FAULT_LIMIT_DEFAULT; in isl_pmbus_vr_exit_reset()
[all …]
H A Dadm1272.c118 if (pmdev->pages[0].read_vout > s->peak_vout) { in adm1272_check_limits()
119 s->peak_vout = pmdev->pages[0].read_vout; in adm1272_check_limits()
122 if (pmdev->pages[0].read_vin > s->peak_vin) { in adm1272_check_limits()
123 s->peak_vin = pmdev->pages[0].read_vin; in adm1272_check_limits()
126 if (pmdev->pages[0].read_iout > s->peak_iout) { in adm1272_check_limits()
127 s->peak_iout = pmdev->pages[0].read_iout; in adm1272_check_limits()
130 if (pmdev->pages[0].read_temperature_1 > s->peak_temperature) { in adm1272_check_limits()
131 s->peak_temperature = pmdev->pages[0].read_temperature_1; in adm1272_check_limits()
134 if (pmdev->pages[0].read_pin > s->peak_pin) { in adm1272_check_limits()
135 s->peak_pin = pmdev->pages[0].read_pin; in adm1272_check_limits()
[all …]
/openbmc/qemu/migration/
H A Dmultifd-nocomp.c50 MultiFDPages_t *pages = &p->data->u.ram; in multifd_set_file_bitmap() local
52 assert(pages->block); in multifd_set_file_bitmap()
54 for (int i = 0; i < pages->normal_num; i++) { in multifd_set_file_bitmap()
55 ramblock_set_file_bmap_atomic(pages->block, pages->offset[i], true); in multifd_set_file_bitmap()
58 for (int i = pages->normal_num; i < pages->num; i++) { in multifd_set_file_bitmap()
59 ramblock_set_file_bmap_atomic(pages->block, pages->offset[i], false); in multifd_set_file_bitmap()
90 MultiFDPages_t *pages = &p->data->u.ram; in multifd_send_prepare_iovs() local
93 for (int i = 0; i < pages->normal_num; i++) { in multifd_send_prepare_iovs()
94 p->iov[p->iovs_num].iov_base = pages->block->host + pages->offset[i]; in multifd_send_prepare_iovs()
99 p->next_packet_size = pages->normal_num * page_size; in multifd_send_prepare_iovs()
[all …]
H A Dmultifd-zero-page.c41 * multifd_send_zero_page_detect: Perform zero page detection on all pages.
43 * Sorts normal pages before zero pages in p->pages->offset and updates
44 * p->pages->normal_num.
50 MultiFDPages_t *pages = &p->data->u.ram; in multifd_send_zero_page_detect() local
51 RAMBlock *rb = pages->block; in multifd_send_zero_page_detect()
53 int j = pages->num - 1; in multifd_send_zero_page_detect()
56 pages->normal_num = pages->num; in multifd_send_zero_page_detect()
61 * Sort the page offset array by moving all normal pages to in multifd_send_zero_page_detect()
62 * the left and all zero pages to the right of the array. in multifd_send_zero_page_detect()
65 uint64_t offset = pages->offset[i]; in multifd_send_zero_page_detect()
[all …]
/openbmc/linux/net/ceph/
H A Dpagevec.c13 void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty) in ceph_put_page_vector() argument
19 set_page_dirty_lock(pages[i]); in ceph_put_page_vector()
20 put_page(pages[i]); in ceph_put_page_vector()
22 kvfree(pages); in ceph_put_page_vector()
26 void ceph_release_page_vector(struct page **pages, int num_pages) in ceph_release_page_vector() argument
31 __free_pages(pages[i], 0); in ceph_release_page_vector()
32 kfree(pages); in ceph_release_page_vector()
37 * allocate a vector new pages
41 struct page **pages; in ceph_alloc_page_vector() local
44 pages = kmalloc_array(num_pages, sizeof(*pages), flags); in ceph_alloc_page_vector()
[all …]
/openbmc/linux/drivers/gpu/drm/i915/gem/selftests/
H A Dhuge_gem_object.c12 struct sg_table *pages) in huge_free_pages() argument
18 for_each_sgt_page(page, sgt_iter, pages) { in huge_free_pages()
24 sg_free_table(pages); in huge_free_pages()
25 kfree(pages); in huge_free_pages()
34 struct sg_table *pages; in huge_get_pages() local
41 pages = kmalloc(sizeof(*pages), GFP); in huge_get_pages()
42 if (!pages) in huge_get_pages()
45 if (sg_alloc_table(pages, npages, GFP)) { in huge_get_pages()
46 kfree(pages); in huge_get_pages()
50 sg = pages->sgl; in huge_get_pages()
[all …]
/openbmc/linux/Documentation/mm/
H A Dunevictable-lru.rst34 main memory will have over 32 million 4k pages in a single node. When a large
35 fraction of these pages are not evictable for any reason [see below], vmscan
37 of pages that are evictable. This can result in a situation where all CPUs are
41 The unevictable list addresses the following classes of unevictable pages:
51 The infrastructure may also be able to handle other conditions that make pages
83 lists (or "Movable" pages: outside of consideration here). If we were to
104 lru_list enum element). The memory controller tracks the movement of pages to
108 not attempt to reclaim pages on the unevictable list. This has a couple of
111 (1) Because the pages are "hidden" from reclaim on the unevictable list, the
112 reclaim process can be more efficient, dealing only with pages that have a
[all …]
H A Dpage_migration.rst5 Page migration allows moving the physical location of pages between
8 system rearranges the physical location of those pages.
10 Also see Documentation/mm/hmm.rst for migrating pages to or from device
14 by moving pages near to the processor where the process accessing that memory
18 pages are located through the MF_MOVE and MF_MOVE_ALL options while setting
19 a new memory policy via mbind(). The pages of a process can also be relocated
21 migrate_pages() function call takes two sets of nodes and moves pages of a
28 pages of a process are located. See also the numa_maps documentation in the
33 administrator may detect the situation and move the pages of the process
36 through user space processes that move pages. A special function call
[all …]
/openbmc/linux/fs/isofs/
H A Dcompress.c37 * to one zisofs block. Store the data in the @pages array with @pcount
42 struct page **pages, unsigned poffset, in zisofs_uncompress_block() argument
68 if (!pages[i]) in zisofs_uncompress_block()
70 memzero_page(pages[i], 0, PAGE_SIZE); in zisofs_uncompress_block()
71 SetPageUptodate(pages[i]); in zisofs_uncompress_block()
121 if (pages[curpage]) { in zisofs_uncompress_block()
122 stream.next_out = kmap_local_page(pages[curpage]) in zisofs_uncompress_block()
174 if (pages[curpage]) { in zisofs_uncompress_block()
175 flush_dcache_page(pages[curpage]); in zisofs_uncompress_block()
176 SetPageUptodate(pages[curpage]); in zisofs_uncompress_block()
[all …]
/openbmc/linux/drivers/gpu/drm/xen/
H A Dxen_drm_front_gem.c29 struct page **pages; member
48 xen_obj->pages = kvmalloc_array(xen_obj->num_pages, in gem_alloc_pages_array()
50 return !xen_obj->pages ? -ENOMEM : 0; in gem_alloc_pages_array()
55 kvfree(xen_obj->pages); in gem_free_pages_array()
56 xen_obj->pages = NULL; in gem_free_pages_array()
87 * touch the memory. Insert pages now, so both CPU and GPU are happy. in xen_drm_front_gem_object_mmap()
89 * FIXME: as we insert all the pages now then no .fault handler must in xen_drm_front_gem_object_mmap()
92 ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages); in xen_drm_front_gem_object_mmap()
94 DRM_ERROR("Failed to map pages into vma: %d\n", ret); in xen_drm_front_gem_object_mmap()
148 * only allocate array of pointers to pages in gem_create()
[all …]
/openbmc/linux/fs/erofs/
H A Dpcpubuf.c6 * per-CPU virtual memory (in pages) in advance to store such inplace I/O
15 struct page **pages; member
64 struct page **pages, **oldpages; in erofs_pcpubuf_growsize() local
67 pages = kmalloc_array(nrpages, sizeof(*pages), GFP_KERNEL); in erofs_pcpubuf_growsize()
68 if (!pages) { in erofs_pcpubuf_growsize()
74 pages[i] = erofs_allocpage(&pagepool, GFP_KERNEL); in erofs_pcpubuf_growsize()
75 if (!pages[i]) { in erofs_pcpubuf_growsize()
77 oldpages = pages; in erofs_pcpubuf_growsize()
81 ptr = vmap(pages, nrpages, VM_MAP, PAGE_KERNEL); in erofs_pcpubuf_growsize()
84 oldpages = pages; in erofs_pcpubuf_growsize()
[all …]
/openbmc/linux/include/drm/ttm/
H A Dttm_tt.h42 * struct ttm_tt - This is a structure holding the pages, caching- and aperture
47 /** @pages: Array of pages backing the data. */
48 struct page **pages; member
54 * TTM_TT_FLAG_SWAPPED: Set by TTM when the pages have been unpopulated
56 * pages back in, and unset the flag. Drivers should in general never
59 * TTM_TT_FLAG_ZERO_ALLOC: Set if the pages will be zeroed on
62 * TTM_TT_FLAG_EXTERNAL: Set if the underlying pages were allocated
64 * TTM swapping out such pages. Also important is to prevent TTM from
65 * ever directly mapping these pages.
72 * still valid to use TTM to map the pages directly. This is useful when
[all …]
/openbmc/linux/drivers/xen/
H A Dxlate_mmu.c47 /* Break down the pages in 4KB chunk and call fn for each gfn */
48 static void xen_for_each_gfn(struct page **pages, unsigned nr_gfn, in xen_for_each_gfn() argument
57 page = pages[i / XEN_PFN_PER_PAGE]; in xen_for_each_gfn()
71 struct page **pages; member
99 struct page *page = info->pages[info->index++]; in remap_pte_fn()
148 struct page **pages) in xen_xlate_remap_gfn_array() argument
163 data.pages = pages; in xen_xlate_remap_gfn_array()
184 int nr, struct page **pages) in xen_xlate_unmap_gfn_range() argument
186 xen_for_each_gfn(pages, nr, unmap_gfn, NULL); in xen_xlate_unmap_gfn_range()
205 * xen_xlate_map_ballooned_pages - map a new set of ballooned pages
[all …]

12345678910>>...140