Home
last modified time | relevance | path

Searched full:pages (Results 1 – 25 of 682) sorted by relevance

12345678910>>...28

/openbmc/qemu/hw/i2c/
H A Dpmbus_device.c232 pmdev->pages = g_new0(PMBusPage, pmdev->num_pages); in pmbus_pages_alloc()
238 if ((pmdev->pages[i].operation & PB_OP_ON) == 0) { in pmbus_check_limits()
242 if (pmdev->pages[i].read_vout > pmdev->pages[i].vout_ov_fault_limit) { in pmbus_check_limits()
243 pmdev->pages[i].status_word |= PB_STATUS_VOUT; in pmbus_check_limits()
244 pmdev->pages[i].status_vout |= PB_STATUS_VOUT_OV_FAULT; in pmbus_check_limits()
247 if (pmdev->pages[i].read_vout > pmdev->pages[i].vout_ov_warn_limit) { in pmbus_check_limits()
248 pmdev->pages[i].status_word |= PB_STATUS_VOUT; in pmbus_check_limits()
249 pmdev->pages[i].status_vout |= PB_STATUS_VOUT_OV_WARN; in pmbus_check_limits()
252 if (pmdev->pages[i].read_vout < pmdev->pages[i].vout_uv_warn_limit) { in pmbus_check_limits()
253 pmdev->pages[i].status_word |= PB_STATUS_VOUT; in pmbus_check_limits()
[all …]
/openbmc/qemu/hw/sensor/
H A Disl_pmbus_vr.c73 pmdev->pages[i].operation = ISL_OPERATION_DEFAULT; in isl_pmbus_vr_exit_reset()
74 pmdev->pages[i].on_off_config = ISL_ON_OFF_CONFIG_DEFAULT; in isl_pmbus_vr_exit_reset()
75 pmdev->pages[i].vout_mode = ISL_VOUT_MODE_DEFAULT; in isl_pmbus_vr_exit_reset()
76 pmdev->pages[i].vout_command = ISL_VOUT_COMMAND_DEFAULT; in isl_pmbus_vr_exit_reset()
77 pmdev->pages[i].vout_max = ISL_VOUT_MAX_DEFAULT; in isl_pmbus_vr_exit_reset()
78 pmdev->pages[i].vout_margin_high = ISL_VOUT_MARGIN_HIGH_DEFAULT; in isl_pmbus_vr_exit_reset()
79 pmdev->pages[i].vout_margin_low = ISL_VOUT_MARGIN_LOW_DEFAULT; in isl_pmbus_vr_exit_reset()
80 pmdev->pages[i].vout_transition_rate = ISL_VOUT_TRANSITION_RATE_DEFAULT; in isl_pmbus_vr_exit_reset()
81 pmdev->pages[i].vout_ov_fault_limit = ISL_VOUT_OV_FAULT_LIMIT_DEFAULT; in isl_pmbus_vr_exit_reset()
82 pmdev->pages[i].ot_fault_limit = ISL_OT_FAULT_LIMIT_DEFAULT; in isl_pmbus_vr_exit_reset()
[all …]
H A Dadm1272.c118 if (pmdev->pages[0].read_vout > s->peak_vout) { in adm1272_check_limits()
119 s->peak_vout = pmdev->pages[0].read_vout; in adm1272_check_limits()
122 if (pmdev->pages[0].read_vin > s->peak_vin) { in adm1272_check_limits()
123 s->peak_vin = pmdev->pages[0].read_vin; in adm1272_check_limits()
126 if (pmdev->pages[0].read_iout > s->peak_iout) { in adm1272_check_limits()
127 s->peak_iout = pmdev->pages[0].read_iout; in adm1272_check_limits()
130 if (pmdev->pages[0].read_temperature_1 > s->peak_temperature) { in adm1272_check_limits()
131 s->peak_temperature = pmdev->pages[0].read_temperature_1; in adm1272_check_limits()
134 if (pmdev->pages[0].read_pin > s->peak_pin) { in adm1272_check_limits()
135 s->peak_pin = pmdev->pages[0].read_pin; in adm1272_check_limits()
[all …]
H A Dmax31785.c61 /* MAX31785 pages */
142 * | 255 | Applies to all pages |
176 pmbus_send8(pmdev, pmdev->pages[pmdev->page].fan_config_1_2); in max31785_read_byte()
182 pmbus_send16(pmdev, pmdev->pages[pmdev->page].fan_command_1); in max31785_read_byte()
188 pmbus_send16(pmdev, pmdev->pages[pmdev->page].read_fan_speed_1); in max31785_read_byte()
194 pmbus_send16(pmdev, pmdev->pages[pmdev->page].status_fans_1_2); in max31785_read_byte()
337 pmdev->pages[pmdev->page].fan_config_1_2 = pmbus_receive8(pmdev); in max31785_write_data()
343 pmdev->pages[pmdev->page].fan_command_1 = pmbus_receive16(pmdev); in max31785_write_data()
344 pmdev->pages[pmdev->page].read_fan_speed_1 = in max31785_write_data()
346 pmdev->pages[pmdev->page].fan_command_1); in max31785_write_data()
[all …]
H A Dadm1266.c88 pmdev->pages[i].operation = ADM1266_OPERATION_DEFAULT; in adm1266_exit_reset()
89 pmdev->pages[i].revision = ADM1266_PMBUS_REVISION_DEFAULT; in adm1266_exit_reset()
90 pmdev->pages[i].vout_mode = 0; in adm1266_exit_reset()
91 pmdev->pages[i].read_vout = pmbus_data2linear_mode(12, 0); in adm1266_exit_reset()
92 pmdev->pages[i].vout_margin_high = pmbus_data2linear_mode(15, 0); in adm1266_exit_reset()
93 pmdev->pages[i].vout_margin_low = pmbus_data2linear_mode(3, 0); in adm1266_exit_reset()
94 pmdev->pages[i].vout_ov_fault_limit = pmbus_data2linear_mode(16, 0); in adm1266_exit_reset()
95 pmdev->pages[i].revision = ADM1266_PMBUS_REVISION_DEFAULT; in adm1266_exit_reset()
174 PMBusVoutMode *mode = (PMBusVoutMode *)&pmdev->pages[0].vout_mode; in adm1266_get()
191 PMBusVoutMode *mode = (PMBusVoutMode *)&pmdev->pages[0].vout_mode; in adm1266_set()
[all …]
H A Dmax34451.c126 | 255 | Applies to all pages. |
181 if (pmdev->pages[i].read_vout == 0) { /* PSU disabled */ in max34451_check_limits()
185 if (pmdev->pages[i].read_vout > s->vout_peak[i]) { in max34451_check_limits()
186 s->vout_peak[i] = pmdev->pages[i].read_vout; in max34451_check_limits()
189 if (pmdev->pages[i].read_vout < s->vout_min[i]) { in max34451_check_limits()
190 s->vout_min[i] = pmdev->pages[i].read_vout; in max34451_check_limits()
193 if (pmdev->pages[i].read_iout > s->iout_peak[i]) { in max34451_check_limits()
194 s->iout_peak[i] = pmdev->pages[i].read_iout; in max34451_check_limits()
199 if (pmdev->pages[i + 16].read_temperature_1 > s->temperature_peak[i]) { in max34451_check_limits()
200 s->temperature_peak[i] = pmdev->pages[i + 16].read_temperature_1; in max34451_check_limits()
[all …]
/openbmc/qemu/migration/
H A Dmultifd-nocomp.c29 void multifd_ram_payload_alloc(MultiFDPages_t *pages) in multifd_ram_payload_alloc() argument
31 pages->offset = g_new0(ram_addr_t, multifd_ram_page_count()); in multifd_ram_payload_alloc()
34 void multifd_ram_payload_free(MultiFDPages_t *pages) in multifd_ram_payload_free() argument
36 g_clear_pointer(&pages->offset, g_free); in multifd_ram_payload_free()
51 MultiFDPages_t *pages = &p->data->u.ram; in multifd_set_file_bitmap() local
53 assert(pages->block); in multifd_set_file_bitmap()
55 for (int i = 0; i < pages->normal_num; i++) { in multifd_set_file_bitmap()
56 ramblock_set_file_bmap_atomic(pages->block, pages->offset[i], true); in multifd_set_file_bitmap()
59 for (int i = pages->normal_num; i < pages->num; i++) { in multifd_set_file_bitmap()
60 ramblock_set_file_bmap_atomic(pages->block, pages->offset[i], false); in multifd_set_file_bitmap()
[all …]
H A Dmultifd-zero-page.c41 * multifd_send_zero_page_detect: Perform zero page detection on all pages.
43 * Sorts normal pages before zero pages in p->pages->offset and updates
44 * p->pages->normal_num.
50 MultiFDPages_t *pages = &p->data->u.ram; in multifd_send_zero_page_detect() local
51 RAMBlock *rb = pages->block; in multifd_send_zero_page_detect()
53 int j = pages->num - 1; in multifd_send_zero_page_detect()
56 pages->normal_num = pages->num; in multifd_send_zero_page_detect()
61 * Sort the page offset array by moving all normal pages to in multifd_send_zero_page_detect()
62 * the left and all zero pages to the right of the array. in multifd_send_zero_page_detect()
65 uint64_t offset = pages->offset[i]; in multifd_send_zero_page_detect()
[all …]
H A Ddirtyrate.h19 * Sample 512 pages per GB as default.
42 * Take 1/16 pages in 1G as the maxmum sample page count
48 uint64_t sample_pages_per_gigabytes; /* sample pages per GB */
61 uint64_t sample_pages_count; /* count of sampled pages */
62 uint64_t sample_dirty_count; /* count of dirty pages we measure */
63 uint32_t *hash_result; /* array of hash result for sampled pages */
68 uint64_t total_sample_count; /* total sampled pages */
69 uint64_t total_block_mem_MB; /* size of total sampled pages in MB */
79 uint64_t sample_pages; /* sample pages per GB */
H A Dmultifd.h88 /* maximum number of allocated pages */
90 /* non zero pages */
92 /* size of the next packet that contains pages */
95 /* zero pages */
102 * - normal pages (initial normal_pages entries)
103 * - zero pages (following zero_pages entries)
119 /* number of used pages */
121 /* number of normal pages */
227 /* size of the next packet that contains pages */
276 /* size of the next packet that contains pages */
[all …]
H A Dmultifd-qpl.c36 /* array of hardware jobs, the number of jobs equals the number pages */
40 /* the number of pages that the QPL needs to process at one time */
181 * @num: the number of pages
381 * multifd_qpl_compress_pages_slow_path: compress pages using slow path
383 * Compress the pages using software. If compression fails, the uncompressed
391 MultiFDPages_t *pages = &p->data->u.ram; in multifd_qpl_compress_pages_slow_path() local
397 for (int i = 0; i < pages->normal_num; i++) { in multifd_qpl_compress_pages_slow_path()
398 buf = pages->block->host + pages->offset[i]; in multifd_qpl_compress_pages_slow_path()
411 * multifd_qpl_compress_pages: compress pages
413 * Submit the pages to the IAA hardware for compression. If hardware
[all …]
H A Dram.c88 * the pages region in the migration file at a time.
112 * right after the requested urgent pages.
124 /* used by the search for pages to send */
368 * PageSearchStatus structures for the channels when send pages.
376 /* Last block that we have visited searching for dirty pages */
382 /* How many times we have dirty too many pages */
389 /* number of dirty pages since start_time */
393 /* Amount of xbzrle pages since the beginning of the period */
402 /* total handled target pages at the beginning of period */
404 /* total handled target pages since start */
[all …]
H A Dmigration.h51 * 1<<6=64 pages -> 256K chunk when page size is 4K. This gives us
52 * the benefit that all the chunks are 64 pages aligned then the
57 * 1<<18=256K pages -> 1G chunk when page size is 4K. This is the
62 * 1<<31=2G pages -> 8T chunk when page size is 4K. This should be
79 /* Number of small pages copied (in size of TARGET_PAGE_SIZE) */
148 /* Postcopy priority thread is used to receive postcopy requested pages */
169 * An array of temp host huge pages to be used, one for each postcopy
215 /* A tree of pages that we requested to the source VM */
223 * The mutex helps to maintain the requested pages that we sent to the
237 * finished loading the urgent pages. If that happens, the two threads
[all …]
/openbmc/qemu/docs/devel/migration/
H A Dmapped-ram.rst9 The core of the feature is to ensure that RAM pages are mapped
12 guest is constantly dirtying pages (i.e. live migration). Another
14 pages which are dirtied multiple times will always go to a fixed
16 sequential stream. Having the pages at fixed offsets also allows the
18 pages are ensured to be written respecting O_DIRECT alignment
55 track dirty pages, the migration will write the RAM pages to the disk
81 Instead of having a sequential stream of pages that follow the
82 RAMBlock headers, the dirty pages for a RAMBlock follow its header
86 A bitmap is introduced to track which pages have been written in the
87 migration file. Pages are written at a fixed location for every
[all …]
H A Dpostcopy.rst13 transferred, and accesses to pages that are yet to be transferred cause
59 (using madvise) to ensure that no new huge pages are created
60 during the postcopy phase, and to cause any huge pages that
68 pages off the migration stream, while the main thread carries
71 any access to missing pages (on Linux using the 'userfault'
117 During postcopy the source scans the list of dirty pages and sends them
120 scanning restarts from the requested location. This causes requested pages
121 to be sent quickly, and also causes pages directly after the requested page
122 to be sent quickly in the hope that those pages are likely to be used
175 pages, however during postcopy the CPUs are stopped and nothing should
[all …]
H A Dvfio.rst112 System memory dirty pages tracking
118 dirty tracking module and marks system memory pages which were DMA-ed by the
134 IOMMU support for dirty page tracking. For this reason, all pages are
135 perpetually marked dirty, unless the device driver pins pages through external
136 APIs in which case only those pinned pages are perpetually marked dirty.
138 If the above two methods are not supported, all pages are perpetually marked
141 By default, dirty pages are tracked during pre-copy as well as stop-and-copy
143 phases. Copying dirty pages in pre-copy phase helps QEMU to predict if it can
145 dirty pages continuously, then it understands that even in stop-and-copy phase,
146 it is likely to find dirty pages and can predict the downtime accordingly.
[all …]
/openbmc/u-boot/lib/efi_loader/
H A Defi_memory.c38 * (multiple) page allocation. We have to track the number of pages
89 uint64_t pages; in efi_mem_sort() local
103 pages = cur->num_pages; in efi_mem_sort()
104 prev->num_pages += pages; in efi_mem_sort()
105 prev->physical_start -= pages << EFI_PAGE_SHIFT; in efi_mem_sort()
106 prev->virtual_start -= pages << EFI_PAGE_SHIFT; in efi_mem_sort()
124 * Return Value: the number of overlapping pages which have been
137 * to re-add the already carved out pages to the mapping.
199 uint64_t efi_add_memory_map(uint64_t start, uint64_t pages, int memory_type, in efi_add_memory_map() argument
208 start, pages, memory_type, overlap_only_ram ? "yes" : "no"); in efi_add_memory_map()
[all …]
/openbmc/qemu/linux-headers/asm-mips/
H A Dmman.h46 #define MAP_LOCKED 0x8000 /* pages are locked */
65 #define MCL_ONFAULT 4 /* lock all pages that are faulted in */
70 #define MLOCK_ONFAULT 0x01 /* Lock pages in range after they are faulted in, do not prefault */
75 #define MADV_WILLNEED 3 /* will need these pages */
76 #define MADV_DONTNEED 4 /* don't need these pages */
79 #define MADV_FREE 8 /* free pages only if memory pressure */
80 #define MADV_REMOVE 9 /* remove these pages & resources */
84 #define MADV_MERGEABLE 12 /* KSM may merge identical pages */
85 #define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */
98 #define MADV_COLD 20 /* deactivate these pages */
[all …]
/openbmc/openbmc/poky/meta/recipes-extended/man-pages/
H A Dman-pages_6.14.bb1 SUMMARY = "Linux man-pages"
2 DESCRIPTION = "The Linux man-pages project documents the Linux kernel and C library interfaces that…
4 HOMEPAGE = "http://www.kernel.org/pub/linux/docs/man-pages"
14 … file://LICENSES/Linux-man-pages-1-para.txt;md5=97ab07585ce6700273bc66461bf46bf2 \
15 … file://LICENSES/Linux-man-pages-copyleft-2-para.txt;md5=1cafc230857da5e43f3d509c425d3c64 \
16 … file://LICENSES/Linux-man-pages-copyleft.txt;md5=173b960c686ff2d26f043ddaeb63f6ce \
17 … file://LICENSES/Linux-man-pages-copyleft-var.txt;md5=d33708712c5918521f47f23b0c4e0d20 \
48 # Only deliveres man-pages so FILES:${PN} gets everything
/openbmc/qemu/linux-headers/asm-generic/
H A Dmman-common.h39 #define MLOCK_ONFAULT 0x01 /* Lock pages in range after they are faulted in, do not prefault */
48 #define MADV_WILLNEED 3 /* will need these pages */
49 #define MADV_DONTNEED 4 /* don't need these pages */
52 #define MADV_FREE 8 /* free pages only if memory pressure */
53 #define MADV_REMOVE 9 /* remove these pages & resources */
59 #define MADV_MERGEABLE 12 /* KSM may merge identical pages */
60 #define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */
72 #define MADV_COLD 20 /* deactivate these pages */
73 #define MADV_PAGEOUT 21 /* reclaim these pages */
78 #define MADV_DONTNEED_LOCKED 24 /* like DONTNEED, but drop locked pages too */
/openbmc/qemu/docs/
H A Dxbzrle.txt14 In order to be able to calculate the update, the previous memory pages need to
15 be stored on the source. Those pages are stored in a dedicated cache
76 Keeping the hot pages in the cache is effective for decreasing cache
79 detected, XBZRLE will only evict pages in the cache that are older than
104 duplicate: E pages
105 normal: F pages
109 xbzrle pages: J pages
110 xbzrle cache miss: K pages
118 could not be compressed. This can happen if the changes in the pages are too
/openbmc/openbmc-test-automation/tools/
H A Dgithub_issues_to_csv79 pages = {
85 while "last" in pages and "next" in pages:
86 pages = {
93 response = requests.get(pages["next"], auth=auth)
95 if pages["next"] == pages["last"]:
/openbmc/qemu/
H A Dpage-target.c12 /* Convert target pages to MiB (2**20). */
13 size_t qemu_target_pages_to_MiB(size_t pages) in qemu_target_pages_to_MiB() argument
20 return pages >> (20 - page_bits); in qemu_target_pages_to_MiB()
/openbmc/u-boot/arch/arm/mach-at91/arm926ejs/
H A Deflash.c11 * AT91SAM9XE128: 1 plane of 8 regions of 32 pages (total 256 pages)
12 * AT91SAM9XE256: 1 plane of 16 regions of 32 pages (total 512 pages)
13 * AT91SAM9XE512: 1 plane of 32 regions of 32 pages (total 1024 pages)
18 * Whole (!) pages can be individually written with erase on the fly.
19 * Writing partial pages will corrupt the rest of the page.
25 * Since pages are erased when written, the "erase" does nothing.
31 * whole pages!
43 * do a read-modify-write for partially programmed pages
/openbmc/webui-vue/.github/workflows/
H A Ddeploy-docs-to-gh-pages.yml1 name: Deploy documentation to GitHub pages
28 - name: Force push to gh-pages branch
32 branch: gh-pages

12345678910>>...28