Lines Matching full:pages

29 	struct page **pages;  member
48 xen_obj->pages = kvmalloc_array(xen_obj->num_pages, in gem_alloc_pages_array()
50 return !xen_obj->pages ? -ENOMEM : 0; in gem_alloc_pages_array()
55 kvfree(xen_obj->pages); in gem_free_pages_array()
56 xen_obj->pages = NULL; in gem_free_pages_array()
87 * touch the memory. Insert pages now, so both CPU and GPU are happy. in xen_drm_front_gem_object_mmap()
89 * FIXME: as we insert all the pages now then no .fault handler must in xen_drm_front_gem_object_mmap()
92 ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages); in xen_drm_front_gem_object_mmap()
94 DRM_ERROR("Failed to map pages into vma: %d\n", ret); in xen_drm_front_gem_object_mmap()
148 * only allocate array of pointers to pages in gem_create()
155 * allocate ballooned pages which will be used to map in gem_create()
159 xen_obj->pages); in gem_create()
161 DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n", in gem_create()
171 * need to allocate backing pages now, so we can share those in gem_create()
175 xen_obj->pages = drm_gem_get_pages(&xen_obj->base); in gem_create()
176 if (IS_ERR(xen_obj->pages)) { in gem_create()
177 ret = PTR_ERR(xen_obj->pages); in gem_create()
178 xen_obj->pages = NULL; in gem_create()
209 if (xen_obj->pages) { in xen_drm_front_gem_free_object_unlocked()
212 xen_obj->pages); in xen_drm_front_gem_free_object_unlocked()
216 xen_obj->pages, true, false); in xen_drm_front_gem_free_object_unlocked()
228 return xen_obj->pages; in xen_drm_front_gem_get_pages()
235 if (!xen_obj->pages) in xen_drm_front_gem_get_sg_table()
239 xen_obj->pages, xen_obj->num_pages); in xen_drm_front_gem_get_sg_table()
263 ret = drm_prime_sg_to_page_array(sgt, xen_obj->pages, in xen_drm_front_gem_import_sg_table()
271 xen_obj->pages); in xen_drm_front_gem_import_sg_table()
287 if (!xen_obj->pages) in xen_drm_front_gem_prime_vmap()
291 vaddr = vmap(xen_obj->pages, xen_obj->num_pages, in xen_drm_front_gem_prime_vmap()