18475355fSChris Wilson /* 28475355fSChris Wilson * SPDX-License-Identifier: MIT 38475355fSChris Wilson * 48475355fSChris Wilson * Copyright © 2014-2016 Intel Corporation 58475355fSChris Wilson */ 68475355fSChris Wilson 78475355fSChris Wilson #include <linux/pagevec.h> 88475355fSChris Wilson #include <linux/swap.h> 98475355fSChris Wilson 108475355fSChris Wilson #include "i915_drv.h" 118475355fSChris Wilson #include "i915_gem_object.h" 128475355fSChris Wilson 138475355fSChris Wilson /* 148475355fSChris Wilson * Move pages to appropriate lru and release the pagevec, decrementing the 158475355fSChris Wilson * ref count of those pages. 168475355fSChris Wilson */ 178475355fSChris Wilson static void check_release_pagevec(struct pagevec *pvec) 188475355fSChris Wilson { 198475355fSChris Wilson check_move_unevictable_pages(pvec); 208475355fSChris Wilson __pagevec_release(pvec); 218475355fSChris Wilson cond_resched(); 228475355fSChris Wilson } 238475355fSChris Wilson 248475355fSChris Wilson static int shmem_get_pages(struct drm_i915_gem_object *obj) 258475355fSChris Wilson { 268475355fSChris Wilson struct drm_i915_private *i915 = to_i915(obj->base.dev); 278475355fSChris Wilson const unsigned long page_count = obj->base.size / PAGE_SIZE; 288475355fSChris Wilson unsigned long i; 298475355fSChris Wilson struct address_space *mapping; 308475355fSChris Wilson struct sg_table *st; 318475355fSChris Wilson struct scatterlist *sg; 328475355fSChris Wilson struct sgt_iter sgt_iter; 338475355fSChris Wilson struct page *page; 348475355fSChris Wilson unsigned long last_pfn = 0; /* suppress gcc warning */ 358475355fSChris Wilson unsigned int max_segment = i915_sg_segment_size(); 368475355fSChris Wilson unsigned int sg_page_sizes; 378475355fSChris Wilson struct pagevec pvec; 388475355fSChris Wilson gfp_t noreclaim; 398475355fSChris Wilson int ret; 408475355fSChris Wilson 418475355fSChris Wilson /* 428475355fSChris Wilson * Assert that the object is not currently in any GPU domain. As it 438475355fSChris Wilson * wasn't in the GTT, there shouldn't be any way it could have been in 448475355fSChris Wilson * a GPU cache 458475355fSChris Wilson */ 468475355fSChris Wilson GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); 478475355fSChris Wilson GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); 488475355fSChris Wilson 498475355fSChris Wilson /* 508475355fSChris Wilson * If there's no chance of allocating enough pages for the whole 518475355fSChris Wilson * object, bail early. 528475355fSChris Wilson */ 538475355fSChris Wilson if (page_count > totalram_pages()) 548475355fSChris Wilson return -ENOMEM; 558475355fSChris Wilson 568475355fSChris Wilson st = kmalloc(sizeof(*st), GFP_KERNEL); 578475355fSChris Wilson if (!st) 588475355fSChris Wilson return -ENOMEM; 598475355fSChris Wilson 608475355fSChris Wilson rebuild_st: 618475355fSChris Wilson if (sg_alloc_table(st, page_count, GFP_KERNEL)) { 628475355fSChris Wilson kfree(st); 638475355fSChris Wilson return -ENOMEM; 648475355fSChris Wilson } 658475355fSChris Wilson 668475355fSChris Wilson /* 678475355fSChris Wilson * Get the list of pages out of our struct file. They'll be pinned 688475355fSChris Wilson * at this point until we release them. 698475355fSChris Wilson * 708475355fSChris Wilson * Fail silently without starting the shrinker 718475355fSChris Wilson */ 728475355fSChris Wilson mapping = obj->base.filp->f_mapping; 738475355fSChris Wilson mapping_set_unevictable(mapping); 748475355fSChris Wilson noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM); 758475355fSChris Wilson noreclaim |= __GFP_NORETRY | __GFP_NOWARN; 768475355fSChris Wilson 778475355fSChris Wilson sg = st->sgl; 788475355fSChris Wilson st->nents = 0; 798475355fSChris Wilson sg_page_sizes = 0; 808475355fSChris Wilson for (i = 0; i < page_count; i++) { 818475355fSChris Wilson const unsigned int shrink[] = { 828475355fSChris Wilson (I915_SHRINK_BOUND | 838475355fSChris Wilson I915_SHRINK_UNBOUND | 848475355fSChris Wilson I915_SHRINK_PURGEABLE), 858475355fSChris Wilson 0, 868475355fSChris Wilson }, *s = shrink; 878475355fSChris Wilson gfp_t gfp = noreclaim; 888475355fSChris Wilson 898475355fSChris Wilson do { 908475355fSChris Wilson cond_resched(); 918475355fSChris Wilson page = shmem_read_mapping_page_gfp(mapping, i, gfp); 928475355fSChris Wilson if (!IS_ERR(page)) 938475355fSChris Wilson break; 948475355fSChris Wilson 958475355fSChris Wilson if (!*s) { 968475355fSChris Wilson ret = PTR_ERR(page); 978475355fSChris Wilson goto err_sg; 988475355fSChris Wilson } 998475355fSChris Wilson 1008475355fSChris Wilson i915_gem_shrink(i915, 2 * page_count, NULL, *s++); 1018475355fSChris Wilson 1028475355fSChris Wilson /* 1038475355fSChris Wilson * We've tried hard to allocate the memory by reaping 1048475355fSChris Wilson * our own buffer, now let the real VM do its job and 1058475355fSChris Wilson * go down in flames if truly OOM. 1068475355fSChris Wilson * 1078475355fSChris Wilson * However, since graphics tend to be disposable, 1088475355fSChris Wilson * defer the oom here by reporting the ENOMEM back 1098475355fSChris Wilson * to userspace. 1108475355fSChris Wilson */ 1118475355fSChris Wilson if (!*s) { 1128475355fSChris Wilson /* reclaim and warn, but no oom */ 1138475355fSChris Wilson gfp = mapping_gfp_mask(mapping); 1148475355fSChris Wilson 1158475355fSChris Wilson /* 1168475355fSChris Wilson * Our bo are always dirty and so we require 1178475355fSChris Wilson * kswapd to reclaim our pages (direct reclaim 1188475355fSChris Wilson * does not effectively begin pageout of our 1198475355fSChris Wilson * buffers on its own). However, direct reclaim 1208475355fSChris Wilson * only waits for kswapd when under allocation 1218475355fSChris Wilson * congestion. So as a result __GFP_RECLAIM is 1228475355fSChris Wilson * unreliable and fails to actually reclaim our 1238475355fSChris Wilson * dirty pages -- unless you try over and over 1248475355fSChris Wilson * again with !__GFP_NORETRY. However, we still 1258475355fSChris Wilson * want to fail this allocation rather than 1268475355fSChris Wilson * trigger the out-of-memory killer and for 1278475355fSChris Wilson * this we want __GFP_RETRY_MAYFAIL. 1288475355fSChris Wilson */ 1298475355fSChris Wilson gfp |= __GFP_RETRY_MAYFAIL; 1308475355fSChris Wilson } 1318475355fSChris Wilson } while (1); 1328475355fSChris Wilson 1338475355fSChris Wilson if (!i || 1348475355fSChris Wilson sg->length >= max_segment || 1358475355fSChris Wilson page_to_pfn(page) != last_pfn + 1) { 1368475355fSChris Wilson if (i) { 1378475355fSChris Wilson sg_page_sizes |= sg->length; 1388475355fSChris Wilson sg = sg_next(sg); 1398475355fSChris Wilson } 1408475355fSChris Wilson st->nents++; 1418475355fSChris Wilson sg_set_page(sg, page, PAGE_SIZE, 0); 1428475355fSChris Wilson } else { 1438475355fSChris Wilson sg->length += PAGE_SIZE; 1448475355fSChris Wilson } 1458475355fSChris Wilson last_pfn = page_to_pfn(page); 1468475355fSChris Wilson 1478475355fSChris Wilson /* Check that the i965g/gm workaround works. */ 1488475355fSChris Wilson WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL)); 1498475355fSChris Wilson } 1508475355fSChris Wilson if (sg) { /* loop terminated early; short sg table */ 1518475355fSChris Wilson sg_page_sizes |= sg->length; 1528475355fSChris Wilson sg_mark_end(sg); 1538475355fSChris Wilson } 1548475355fSChris Wilson 1558475355fSChris Wilson /* Trim unused sg entries to avoid wasting memory. */ 1568475355fSChris Wilson i915_sg_trim(st); 1578475355fSChris Wilson 1588475355fSChris Wilson ret = i915_gem_gtt_prepare_pages(obj, st); 1598475355fSChris Wilson if (ret) { 1608475355fSChris Wilson /* 1618475355fSChris Wilson * DMA remapping failed? One possible cause is that 1628475355fSChris Wilson * it could not reserve enough large entries, asking 1638475355fSChris Wilson * for PAGE_SIZE chunks instead may be helpful. 1648475355fSChris Wilson */ 1658475355fSChris Wilson if (max_segment > PAGE_SIZE) { 1668475355fSChris Wilson for_each_sgt_page(page, sgt_iter, st) 1678475355fSChris Wilson put_page(page); 1688475355fSChris Wilson sg_free_table(st); 1698475355fSChris Wilson 1708475355fSChris Wilson max_segment = PAGE_SIZE; 1718475355fSChris Wilson goto rebuild_st; 1728475355fSChris Wilson } else { 1738475355fSChris Wilson dev_warn(&i915->drm.pdev->dev, 1748475355fSChris Wilson "Failed to DMA remap %lu pages\n", 1758475355fSChris Wilson page_count); 1768475355fSChris Wilson goto err_pages; 1778475355fSChris Wilson } 1788475355fSChris Wilson } 1798475355fSChris Wilson 1808475355fSChris Wilson if (i915_gem_object_needs_bit17_swizzle(obj)) 1818475355fSChris Wilson i915_gem_object_do_bit_17_swizzle(obj, st); 1828475355fSChris Wilson 1838475355fSChris Wilson __i915_gem_object_set_pages(obj, st, sg_page_sizes); 1848475355fSChris Wilson 1858475355fSChris Wilson return 0; 1868475355fSChris Wilson 1878475355fSChris Wilson err_sg: 1888475355fSChris Wilson sg_mark_end(sg); 1898475355fSChris Wilson err_pages: 1908475355fSChris Wilson mapping_clear_unevictable(mapping); 1918475355fSChris Wilson pagevec_init(&pvec); 1928475355fSChris Wilson for_each_sgt_page(page, sgt_iter, st) { 1938475355fSChris Wilson if (!pagevec_add(&pvec, page)) 1948475355fSChris Wilson check_release_pagevec(&pvec); 1958475355fSChris Wilson } 1968475355fSChris Wilson if (pagevec_count(&pvec)) 1978475355fSChris Wilson check_release_pagevec(&pvec); 1988475355fSChris Wilson sg_free_table(st); 1998475355fSChris Wilson kfree(st); 2008475355fSChris Wilson 2018475355fSChris Wilson /* 2028475355fSChris Wilson * shmemfs first checks if there is enough memory to allocate the page 2038475355fSChris Wilson * and reports ENOSPC should there be insufficient, along with the usual 2048475355fSChris Wilson * ENOMEM for a genuine allocation failure. 2058475355fSChris Wilson * 2068475355fSChris Wilson * We use ENOSPC in our driver to mean that we have run out of aperture 2078475355fSChris Wilson * space and so want to translate the error from shmemfs back to our 2088475355fSChris Wilson * usual understanding of ENOMEM. 2098475355fSChris Wilson */ 2108475355fSChris Wilson if (ret == -ENOSPC) 2118475355fSChris Wilson ret = -ENOMEM; 2128475355fSChris Wilson 2138475355fSChris Wilson return ret; 2148475355fSChris Wilson } 2158475355fSChris Wilson 2168475355fSChris Wilson void 2178475355fSChris Wilson __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, 2188475355fSChris Wilson struct sg_table *pages, 2198475355fSChris Wilson bool needs_clflush) 2208475355fSChris Wilson { 2218475355fSChris Wilson GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED); 2228475355fSChris Wilson 2238475355fSChris Wilson if (obj->mm.madv == I915_MADV_DONTNEED) 2248475355fSChris Wilson obj->mm.dirty = false; 2258475355fSChris Wilson 2268475355fSChris Wilson if (needs_clflush && 2278475355fSChris Wilson (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 && 2288475355fSChris Wilson !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) 2298475355fSChris Wilson drm_clflush_sg(pages); 2308475355fSChris Wilson 2318475355fSChris Wilson __start_cpu_write(obj); 2328475355fSChris Wilson } 2338475355fSChris Wilson 2348475355fSChris Wilson static void 2358475355fSChris Wilson shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) 2368475355fSChris Wilson { 2378475355fSChris Wilson struct sgt_iter sgt_iter; 2388475355fSChris Wilson struct pagevec pvec; 2398475355fSChris Wilson struct page *page; 2408475355fSChris Wilson 2418475355fSChris Wilson __i915_gem_object_release_shmem(obj, pages, true); 2428475355fSChris Wilson 2438475355fSChris Wilson i915_gem_gtt_finish_pages(obj, pages); 2448475355fSChris Wilson 2458475355fSChris Wilson if (i915_gem_object_needs_bit17_swizzle(obj)) 2468475355fSChris Wilson i915_gem_object_save_bit_17_swizzle(obj, pages); 2478475355fSChris Wilson 2488475355fSChris Wilson mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping); 2498475355fSChris Wilson 2508475355fSChris Wilson pagevec_init(&pvec); 2518475355fSChris Wilson for_each_sgt_page(page, sgt_iter, pages) { 2528475355fSChris Wilson if (obj->mm.dirty) 2538475355fSChris Wilson set_page_dirty(page); 2548475355fSChris Wilson 2558475355fSChris Wilson if (obj->mm.madv == I915_MADV_WILLNEED) 2568475355fSChris Wilson mark_page_accessed(page); 2578475355fSChris Wilson 2588475355fSChris Wilson if (!pagevec_add(&pvec, page)) 2598475355fSChris Wilson check_release_pagevec(&pvec); 2608475355fSChris Wilson } 2618475355fSChris Wilson if (pagevec_count(&pvec)) 2628475355fSChris Wilson check_release_pagevec(&pvec); 2638475355fSChris Wilson obj->mm.dirty = false; 2648475355fSChris Wilson 2658475355fSChris Wilson sg_free_table(pages); 2668475355fSChris Wilson kfree(pages); 2678475355fSChris Wilson } 2688475355fSChris Wilson 2698475355fSChris Wilson static int 2708475355fSChris Wilson shmem_pwrite(struct drm_i915_gem_object *obj, 2718475355fSChris Wilson const struct drm_i915_gem_pwrite *arg) 2728475355fSChris Wilson { 2738475355fSChris Wilson struct address_space *mapping = obj->base.filp->f_mapping; 2748475355fSChris Wilson char __user *user_data = u64_to_user_ptr(arg->data_ptr); 2758475355fSChris Wilson u64 remain, offset; 2768475355fSChris Wilson unsigned int pg; 2778475355fSChris Wilson 2788475355fSChris Wilson /* Caller already validated user args */ 2798475355fSChris Wilson GEM_BUG_ON(!access_ok(user_data, arg->size)); 2808475355fSChris Wilson 2818475355fSChris Wilson /* 2828475355fSChris Wilson * Before we instantiate/pin the backing store for our use, we 2838475355fSChris Wilson * can prepopulate the shmemfs filp efficiently using a write into 2848475355fSChris Wilson * the pagecache. We avoid the penalty of instantiating all the 2858475355fSChris Wilson * pages, important if the user is just writing to a few and never 2868475355fSChris Wilson * uses the object on the GPU, and using a direct write into shmemfs 2878475355fSChris Wilson * allows it to avoid the cost of retrieving a page (either swapin 2888475355fSChris Wilson * or clearing-before-use) before it is overwritten. 2898475355fSChris Wilson */ 2908475355fSChris Wilson if (i915_gem_object_has_pages(obj)) 2918475355fSChris Wilson return -ENODEV; 2928475355fSChris Wilson 2938475355fSChris Wilson if (obj->mm.madv != I915_MADV_WILLNEED) 2948475355fSChris Wilson return -EFAULT; 2958475355fSChris Wilson 2968475355fSChris Wilson /* 2978475355fSChris Wilson * Before the pages are instantiated the object is treated as being 2988475355fSChris Wilson * in the CPU domain. The pages will be clflushed as required before 2998475355fSChris Wilson * use, and we can freely write into the pages directly. If userspace 3008475355fSChris Wilson * races pwrite with any other operation; corruption will ensue - 3018475355fSChris Wilson * that is userspace's prerogative! 3028475355fSChris Wilson */ 3038475355fSChris Wilson 3048475355fSChris Wilson remain = arg->size; 3058475355fSChris Wilson offset = arg->offset; 3068475355fSChris Wilson pg = offset_in_page(offset); 3078475355fSChris Wilson 3088475355fSChris Wilson do { 3098475355fSChris Wilson unsigned int len, unwritten; 3108475355fSChris Wilson struct page *page; 3118475355fSChris Wilson void *data, *vaddr; 3128475355fSChris Wilson int err; 3138475355fSChris Wilson char c; 3148475355fSChris Wilson 3158475355fSChris Wilson len = PAGE_SIZE - pg; 3168475355fSChris Wilson if (len > remain) 3178475355fSChris Wilson len = remain; 3188475355fSChris Wilson 3198475355fSChris Wilson /* Prefault the user page to reduce potential recursion */ 3208475355fSChris Wilson err = __get_user(c, user_data); 3218475355fSChris Wilson if (err) 3228475355fSChris Wilson return err; 3238475355fSChris Wilson 3248475355fSChris Wilson err = __get_user(c, user_data + len - 1); 3258475355fSChris Wilson if (err) 3268475355fSChris Wilson return err; 3278475355fSChris Wilson 3288475355fSChris Wilson err = pagecache_write_begin(obj->base.filp, mapping, 3298475355fSChris Wilson offset, len, 0, 3308475355fSChris Wilson &page, &data); 3318475355fSChris Wilson if (err < 0) 3328475355fSChris Wilson return err; 3338475355fSChris Wilson 3348475355fSChris Wilson vaddr = kmap_atomic(page); 3358475355fSChris Wilson unwritten = __copy_from_user_inatomic(vaddr + pg, 3368475355fSChris Wilson user_data, 3378475355fSChris Wilson len); 3388475355fSChris Wilson kunmap_atomic(vaddr); 3398475355fSChris Wilson 3408475355fSChris Wilson err = pagecache_write_end(obj->base.filp, mapping, 3418475355fSChris Wilson offset, len, len - unwritten, 3428475355fSChris Wilson page, data); 3438475355fSChris Wilson if (err < 0) 3448475355fSChris Wilson return err; 3458475355fSChris Wilson 3468475355fSChris Wilson /* We don't handle -EFAULT, leave it to the caller to check */ 3478475355fSChris Wilson if (unwritten) 3488475355fSChris Wilson return -ENODEV; 3498475355fSChris Wilson 3508475355fSChris Wilson remain -= len; 3518475355fSChris Wilson user_data += len; 3528475355fSChris Wilson offset += len; 3538475355fSChris Wilson pg = 0; 3548475355fSChris Wilson } while (remain); 3558475355fSChris Wilson 3568475355fSChris Wilson return 0; 3578475355fSChris Wilson } 3588475355fSChris Wilson 3598475355fSChris Wilson const struct drm_i915_gem_object_ops i915_gem_shmem_ops = { 3608475355fSChris Wilson .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | 3618475355fSChris Wilson I915_GEM_OBJECT_IS_SHRINKABLE, 3628475355fSChris Wilson 3638475355fSChris Wilson .get_pages = shmem_get_pages, 3648475355fSChris Wilson .put_pages = shmem_put_pages, 3658475355fSChris Wilson 3668475355fSChris Wilson .pwrite = shmem_pwrite, 3678475355fSChris Wilson }; 3688475355fSChris Wilson 3698475355fSChris Wilson static int create_shmem(struct drm_i915_private *i915, 3708475355fSChris Wilson struct drm_gem_object *obj, 3718475355fSChris Wilson size_t size) 3728475355fSChris Wilson { 3738475355fSChris Wilson unsigned long flags = VM_NORESERVE; 3748475355fSChris Wilson struct file *filp; 3758475355fSChris Wilson 3768475355fSChris Wilson drm_gem_private_object_init(&i915->drm, obj, size); 3778475355fSChris Wilson 3788475355fSChris Wilson if (i915->mm.gemfs) 3798475355fSChris Wilson filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size, 3808475355fSChris Wilson flags); 3818475355fSChris Wilson else 3828475355fSChris Wilson filp = shmem_file_setup("i915", size, flags); 3838475355fSChris Wilson if (IS_ERR(filp)) 3848475355fSChris Wilson return PTR_ERR(filp); 3858475355fSChris Wilson 3868475355fSChris Wilson obj->filp = filp; 3878475355fSChris Wilson return 0; 3888475355fSChris Wilson } 3898475355fSChris Wilson 3908475355fSChris Wilson struct drm_i915_gem_object * 3918475355fSChris Wilson i915_gem_object_create_shmem(struct drm_i915_private *i915, u64 size) 3928475355fSChris Wilson { 3938475355fSChris Wilson struct drm_i915_gem_object *obj; 3948475355fSChris Wilson struct address_space *mapping; 3958475355fSChris Wilson unsigned int cache_level; 3968475355fSChris Wilson gfp_t mask; 3978475355fSChris Wilson int ret; 3988475355fSChris Wilson 3998475355fSChris Wilson /* There is a prevalence of the assumption that we fit the object's 4008475355fSChris Wilson * page count inside a 32bit _signed_ variable. Let's document this and 4018475355fSChris Wilson * catch if we ever need to fix it. In the meantime, if you do spot 4028475355fSChris Wilson * such a local variable, please consider fixing! 4038475355fSChris Wilson */ 4048475355fSChris Wilson if (size >> PAGE_SHIFT > INT_MAX) 4058475355fSChris Wilson return ERR_PTR(-E2BIG); 4068475355fSChris Wilson 4078475355fSChris Wilson if (overflows_type(size, obj->base.size)) 4088475355fSChris Wilson return ERR_PTR(-E2BIG); 4098475355fSChris Wilson 4108475355fSChris Wilson obj = i915_gem_object_alloc(); 4118475355fSChris Wilson if (!obj) 4128475355fSChris Wilson return ERR_PTR(-ENOMEM); 4138475355fSChris Wilson 4148475355fSChris Wilson ret = create_shmem(i915, &obj->base, size); 4158475355fSChris Wilson if (ret) 4168475355fSChris Wilson goto fail; 4178475355fSChris Wilson 4188475355fSChris Wilson mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; 4198475355fSChris Wilson if (IS_I965GM(i915) || IS_I965G(i915)) { 4208475355fSChris Wilson /* 965gm cannot relocate objects above 4GiB. */ 4218475355fSChris Wilson mask &= ~__GFP_HIGHMEM; 4228475355fSChris Wilson mask |= __GFP_DMA32; 4238475355fSChris Wilson } 4248475355fSChris Wilson 4258475355fSChris Wilson mapping = obj->base.filp->f_mapping; 4268475355fSChris Wilson mapping_set_gfp_mask(mapping, mask); 4278475355fSChris Wilson GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM)); 4288475355fSChris Wilson 4298475355fSChris Wilson i915_gem_object_init(obj, &i915_gem_shmem_ops); 4308475355fSChris Wilson 4318475355fSChris Wilson obj->write_domain = I915_GEM_DOMAIN_CPU; 4328475355fSChris Wilson obj->read_domains = I915_GEM_DOMAIN_CPU; 4338475355fSChris Wilson 4348475355fSChris Wilson if (HAS_LLC(i915)) 4358475355fSChris Wilson /* On some devices, we can have the GPU use the LLC (the CPU 4368475355fSChris Wilson * cache) for about a 10% performance improvement 4378475355fSChris Wilson * compared to uncached. Graphics requests other than 4388475355fSChris Wilson * display scanout are coherent with the CPU in 4398475355fSChris Wilson * accessing this cache. This means in this mode we 4408475355fSChris Wilson * don't need to clflush on the CPU side, and on the 4418475355fSChris Wilson * GPU side we only need to flush internal caches to 4428475355fSChris Wilson * get data visible to the CPU. 4438475355fSChris Wilson * 4448475355fSChris Wilson * However, we maintain the display planes as UC, and so 4458475355fSChris Wilson * need to rebind when first used as such. 4468475355fSChris Wilson */ 4478475355fSChris Wilson cache_level = I915_CACHE_LLC; 4488475355fSChris Wilson else 4498475355fSChris Wilson cache_level = I915_CACHE_NONE; 4508475355fSChris Wilson 4518475355fSChris Wilson i915_gem_object_set_cache_coherency(obj, cache_level); 4528475355fSChris Wilson 4538475355fSChris Wilson trace_i915_gem_object_create(obj); 4548475355fSChris Wilson 4558475355fSChris Wilson return obj; 4568475355fSChris Wilson 4578475355fSChris Wilson fail: 4588475355fSChris Wilson i915_gem_object_free(obj); 4598475355fSChris Wilson return ERR_PTR(ret); 4608475355fSChris Wilson } 4618475355fSChris Wilson 4628475355fSChris Wilson /* Allocate a new GEM object and fill it with the supplied data */ 4638475355fSChris Wilson struct drm_i915_gem_object * 4648475355fSChris Wilson i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv, 4658475355fSChris Wilson const void *data, size_t size) 4668475355fSChris Wilson { 4678475355fSChris Wilson struct drm_i915_gem_object *obj; 4688475355fSChris Wilson struct file *file; 4698475355fSChris Wilson size_t offset; 4708475355fSChris Wilson int err; 4718475355fSChris Wilson 4728475355fSChris Wilson obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE)); 4738475355fSChris Wilson if (IS_ERR(obj)) 4748475355fSChris Wilson return obj; 4758475355fSChris Wilson 4768475355fSChris Wilson GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU); 4778475355fSChris Wilson 4788475355fSChris Wilson file = obj->base.filp; 4798475355fSChris Wilson offset = 0; 4808475355fSChris Wilson do { 4818475355fSChris Wilson unsigned int len = min_t(typeof(size), size, PAGE_SIZE); 4828475355fSChris Wilson struct page *page; 4838475355fSChris Wilson void *pgdata, *vaddr; 4848475355fSChris Wilson 4858475355fSChris Wilson err = pagecache_write_begin(file, file->f_mapping, 4868475355fSChris Wilson offset, len, 0, 4878475355fSChris Wilson &page, &pgdata); 4888475355fSChris Wilson if (err < 0) 4898475355fSChris Wilson goto fail; 4908475355fSChris Wilson 4918475355fSChris Wilson vaddr = kmap(page); 4928475355fSChris Wilson memcpy(vaddr, data, len); 4938475355fSChris Wilson kunmap(page); 4948475355fSChris Wilson 4958475355fSChris Wilson err = pagecache_write_end(file, file->f_mapping, 4968475355fSChris Wilson offset, len, len, 4978475355fSChris Wilson page, pgdata); 4988475355fSChris Wilson if (err < 0) 4998475355fSChris Wilson goto fail; 5008475355fSChris Wilson 5018475355fSChris Wilson size -= len; 5028475355fSChris Wilson data += len; 5038475355fSChris Wilson offset += len; 5048475355fSChris Wilson } while (size); 5058475355fSChris Wilson 5068475355fSChris Wilson return obj; 5078475355fSChris Wilson 5088475355fSChris Wilson fail: 5098475355fSChris Wilson i915_gem_object_put(obj); 5108475355fSChris Wilson return ERR_PTR(err); 5118475355fSChris Wilson } 512