/openbmc/linux/include/xen/ |
H A D | page.h | 9 #define XEN_PAGE_SIZE (_AC(1, UL) << XEN_PAGE_SHIFT) macro 10 #define XEN_PAGE_MASK (~(XEN_PAGE_SIZE-1)) 23 #define XEN_PFN_PER_PAGE (PAGE_SIZE / XEN_PAGE_SIZE) 26 #define XEN_PFN_UP(x) (((x) + XEN_PAGE_SIZE-1) >> XEN_PAGE_SHIFT)
|
/openbmc/qemu/hw/i386/kvm/ |
H A D | xen_gnttab.c | 39 #define ENTRIES_PER_FRAME_V1 (XEN_PAGE_SIZE / sizeof(grant_entry_v1_t)) 78 XEN_PAGE_SIZE * s->max_frames, &error_abort); in xen_gnttab_realize() 88 i * XEN_PAGE_SIZE, XEN_PAGE_SIZE); in xen_gnttab_realize() 93 memset(s->entries.v1, 0, XEN_PAGE_SIZE * s->max_frames); in xen_gnttab_realize() 258 XEN_PAGE_SIZE); in gnt_unref() 382 act->mrs = memory_region_find(get_system_memory(), gpa, XEN_PAGE_SIZE); in xen_be_gnttab_map_refs() 385 !int128_lt(act->mrs.size, int128_make64(XEN_PAGE_SIZE)) && in xen_be_gnttab_map_refs() 540 memset(s->entries.v1, 0, XEN_PAGE_SIZE * s->max_frames); in xen_gnttab_reset()
|
H A D | xen_overlay.c | 37 #define XEN_PAGE_SIZE (1ULL << XEN_PAGE_SHIFT) macro 95 XEN_PAGE_SIZE, &error_abort); in xen_overlay_realize() 101 memset(s->shinfo_ptr, 0, XEN_PAGE_SIZE); in xen_overlay_realize()
|
H A D | xen_primary_console.c | 61 XEN_PAGE_SIZE, &error_abort); in xen_primary_console_realize() 64 memset(s->cp, 0, XEN_PAGE_SIZE); in xen_primary_console_realize()
|
/openbmc/linux/drivers/xen/ |
H A D | mem-reservation.c | 46 BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE); in __xenmem_reservation_va_mapping_update() 72 BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE); in __xenmem_reservation_va_mapping_reset()
|
H A D | arm-device.c | 24 nr = DIV_ROUND_UP(resource_size(r), XEN_PAGE_SIZE); in xen_unmap_device_mmio() 58 nr = DIV_ROUND_UP(resource_size(r), XEN_PAGE_SIZE); in xen_map_device_mmio()
|
H A D | biomerge.c | 11 #if XEN_PAGE_SIZE == PAGE_SIZE in xen_biovec_phys_mergeable()
|
H A D | grant-dma-ops.c | 95 ret = alloc_pages_exact(n_pages * XEN_PAGE_SIZE, gfp); in xen_grant_dma_alloc() 102 free_pages_exact(ret, n_pages * XEN_PAGE_SIZE); in xen_grant_dma_alloc() 142 free_pages_exact(vaddr, n_pages * XEN_PAGE_SIZE); in xen_grant_dma_free()
|
H A D | grant-table.c | 830 vaddr = memremap(addr, XEN_PAGE_SIZE * max_nr_gframes, MEMREMAP_WB); in gnttab_setup_auto_xlat_frames() 1215 glen = min_t(unsigned int, XEN_PAGE_SIZE - goffset, len); in gnttab_foreach_grant_in_range() 1240 fn(pfn_to_gfn(xen_pfn), goffset, XEN_PAGE_SIZE, data); in gnttab_foreach_grant() 1242 goffset += XEN_PAGE_SIZE; in gnttab_foreach_grant() 1507 .grefs_per_grant_frame = XEN_PAGE_SIZE / 1518 .grefs_per_grant_frame = XEN_PAGE_SIZE /
|
H A D | swiotlb-xen.c | 151 size = ALIGN(size, XEN_PAGE_SIZE); in xen_swiotlb_alloc_coherent() 184 size = ALIGN(size, XEN_PAGE_SIZE); in xen_swiotlb_free_coherent()
|
/openbmc/linux/arch/arm/xen/ |
H A D | mm.c | 57 if (size + cflush.offset > XEN_PAGE_SIZE) in dma_cache_maint() 58 cflush.length = XEN_PAGE_SIZE - cflush.offset; in dma_cache_maint()
|
/openbmc/qemu/hw/net/ |
H A D | xen_nic.c | 175 if ((txreq.offset + txreq.size) > XEN_PAGE_SIZE) { in net_tx_packets() 201 tmpbuf = g_malloc(XEN_PAGE_SIZE); in net_tx_packets() 276 if (size > XEN_PAGE_SIZE - NET_IP_ALIGN) { in net_rx_packet() 279 XEN_PAGE_SIZE - NET_IP_ALIGN); in net_rx_packet() 405 BACK_RING_INIT(&netdev->tx_ring, netdev->txs, XEN_PAGE_SIZE); in xen_netdev_connect() 406 BACK_RING_INIT(&netdev->rx_ring, netdev->rxs, XEN_PAGE_SIZE); in xen_netdev_connect()
|
/openbmc/linux/drivers/xen/xenbus/ |
H A D | xenbus_client.c | 382 unsigned long ring_size = nr_pages * XEN_PAGE_SIZE; in xenbus_setup_ring() 413 addr += XEN_PAGE_SIZE; in xenbus_setup_ring() 451 free_pages_exact(*vaddr, nr_pages * XEN_PAGE_SIZE); in xenbus_teardown_ring() 752 area = get_vm_area(XEN_PAGE_SIZE * nr_grefs, VM_IOREMAP); in xenbus_map_ring_pv() 756 XEN_PAGE_SIZE * nr_grefs, map_ring_apply, info)) in xenbus_map_ring_pv() 815 addr = (unsigned long)vaddr + (XEN_PAGE_SIZE * i); in xenbus_unmap_ring_pv()
|
/openbmc/linux/drivers/net/xen-netback/ |
H A D | common.h | 68 #define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE) 69 #define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE) 89 #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
|
H A D | rx.c | 48 needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE); in xenvif_update_needed_slots() 379 chunk_len = min_t(size_t, frag_len, XEN_PAGE_SIZE - offset); in xenvif_rx_next_chunk() 380 chunk_len = min_t(size_t, chunk_len, XEN_PAGE_SIZE - in xenvif_rx_next_chunk() 411 } while (offset < XEN_PAGE_SIZE && pkt->remaining_len > 0); in xenvif_rx_data_slot()
|
H A D | hash.c | 348 len > XEN_PAGE_SIZE / sizeof(*mapping)) in xenvif_set_hash_mapping() 353 if (copy_op[0].dest.offset + copy_op[0].len > XEN_PAGE_SIZE) { in xenvif_set_hash_mapping() 355 copy_op[1].source.offset = XEN_PAGE_SIZE - copy_op[0].dest.offset; in xenvif_set_hash_mapping()
|
/openbmc/qemu/hw/display/ |
H A D | xenfb.c | 493 munmap(xenfb->pixels, xenfb->fbpages * XEN_PAGE_SIZE); in xenfb_map_fb() 497 xenfb->fbpages = DIV_ROUND_UP(xenfb->fb_len, XEN_PAGE_SIZE); in xenfb_map_fb() 499 n_fbdirs = DIV_ROUND_UP(n_fbdirs, XEN_PAGE_SIZE); in xenfb_map_fb() 532 size_t fb_pages = pd_len * XEN_PAGE_SIZE / mfn_sz; in xenfb_configure_fb() 533 size_t fb_len_max = fb_pages * XEN_PAGE_SIZE; in xenfb_configure_fb() 935 fb->pixels = mmap(fb->pixels, fb->fbpages * XEN_PAGE_SIZE, in fb_disconnect()
|
/openbmc/linux/drivers/block/xen-blkback/ |
H A D | common.h | 59 #define XEN_PAGES_PER_SEGMENT (PAGE_SIZE / XEN_PAGE_SIZE) 62 (XEN_PAGE_SIZE/sizeof(struct blkif_request_segment))
|
H A D | xenbus.c | 221 rsp_prod, XEN_PAGE_SIZE * nr_grefs); in xen_blkif_map() 222 size = __RING_SIZE(sring_native, XEN_PAGE_SIZE * nr_grefs); in xen_blkif_map() 231 rsp_prod, XEN_PAGE_SIZE * nr_grefs); in xen_blkif_map() 232 size = __RING_SIZE(sring_x86_32, XEN_PAGE_SIZE * nr_grefs); in xen_blkif_map() 241 rsp_prod, XEN_PAGE_SIZE * nr_grefs); in xen_blkif_map() 242 size = __RING_SIZE(sring_x86_64, XEN_PAGE_SIZE * nr_grefs); in xen_blkif_map()
|
/openbmc/qemu/hw/block/dataplane/ |
H A D | xen-block.c | 106 request->buf = qemu_memalign(XEN_PAGE_SIZE, in xen_block_start_request() 108 XEN_PAGE_SIZE); in xen_block_start_request() 190 XEN_PAGE_SIZE) { in xen_block_parse_request() 758 ring_size = XEN_PAGE_SIZE * dataplane->nr_ring_ref; in xen_block_dataplane_start()
|
/openbmc/linux/sound/xen/ |
H A D | xen_snd_front.c | 372 if (XEN_PAGE_SIZE != PAGE_SIZE) { in xen_drv_init() 374 XEN_PAGE_SIZE, PAGE_SIZE); in xen_drv_init()
|
/openbmc/qemu/include/hw/xen/ |
H A D | xen_backend_ops.h | 40 #define XEN_PAGE_SIZE (1UL << XEN_PAGE_SHIFT) macro 41 #define XEN_PAGE_MASK (~(XEN_PAGE_SIZE - 1))
|
/openbmc/linux/drivers/net/ |
H A D | xen-netfront.c | 85 #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE) 86 #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE) 682 XEN_PAGE_SIZE); in bounce_skb() 688 if (!IS_ALIGNED((uintptr_t)n->head, XEN_PAGE_SIZE)) { in bounce_skb() 705 #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1) 980 xdp_init_buff(xdp, XEN_PAGE_SIZE - XDP_PACKET_HEADROOM, in xennet_run_xdp() 1064 rx->offset + rx->status > XEN_PAGE_SIZE)) { in xennet_get_responses() 1624 unsigned long max_mtu = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM; in xennet_xdp_set() 1989 XEN_FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE); in setup_netfront() 1996 XEN_FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE); in setup_netfront() [all …]
|
/openbmc/linux/include/xen/interface/io/ |
H A D | displif.h | 921 #define XENDISPL_EVENT_PAGE_SIZE XEN_PAGE_SIZE
|
H A D | sndif.h | 1055 #define XENSND_EVENT_PAGE_SIZE XEN_PAGE_SIZE
|