1213d5092SThomas Hellström // SPDX-License-Identifier: MIT 2213d5092SThomas Hellström /* 3213d5092SThomas Hellström * Copyright © 2021 Intel Corporation 4213d5092SThomas Hellström */ 5213d5092SThomas Hellström 6213d5092SThomas Hellström #include <drm/ttm/ttm_bo_driver.h> 7213d5092SThomas Hellström #include <drm/ttm/ttm_placement.h> 8213d5092SThomas Hellström 9213d5092SThomas Hellström #include "i915_drv.h" 10213d5092SThomas Hellström #include "intel_memory_region.h" 11213d5092SThomas Hellström #include "intel_region_ttm.h" 12213d5092SThomas Hellström 13c56ce956SThomas Hellström #include "gem/i915_gem_mman.h" 14213d5092SThomas Hellström #include "gem/i915_gem_object.h" 15213d5092SThomas Hellström #include "gem/i915_gem_region.h" 16213d5092SThomas Hellström #include "gem/i915_gem_ttm.h" 17c56ce956SThomas Hellström #include "gem/i915_gem_ttm_pm.h" 18213d5092SThomas Hellström 19c56ce956SThomas Hellström 2050331a7bSRamalingam C #include "gt/intel_engine_pm.h" 21c56ce956SThomas Hellström #include "gt/intel_gt.h" 22c56ce956SThomas Hellström #include "gt/intel_migrate.h" 23213d5092SThomas Hellström 24213d5092SThomas Hellström #define I915_TTM_PRIO_PURGE 0 25213d5092SThomas Hellström #define I915_TTM_PRIO_NO_PAGES 1 26213d5092SThomas Hellström #define I915_TTM_PRIO_HAS_PAGES 2 27213d5092SThomas Hellström 2838f28c06SThomas Hellström /* 2938f28c06SThomas Hellström * Size of struct ttm_place vector in on-stack struct ttm_placement allocs 3038f28c06SThomas Hellström */ 3138f28c06SThomas Hellström #define I915_TTM_MAX_PLACEMENTS INTEL_REGION_UNKNOWN 3238f28c06SThomas Hellström 33213d5092SThomas Hellström /** 34213d5092SThomas Hellström * struct i915_ttm_tt - TTM page vector with additional private information 35213d5092SThomas Hellström * @ttm: The base TTM page vector. 36213d5092SThomas Hellström * @dev: The struct device used for dma mapping and unmapping. 37*cad7109aSThomas Hellström * @cached_rsgt: The cached scatter-gather table. 387ae03459SMatthew Auld * @is_shmem: Set if using shmem. 397ae03459SMatthew Auld * @filp: The shmem file, if using shmem backend. 40213d5092SThomas Hellström * 41213d5092SThomas Hellström * Note that DMA may be going on right up to the point where the page- 42213d5092SThomas Hellström * vector is unpopulated in delayed destroy. Hence keep the 43213d5092SThomas Hellström * scatter-gather table mapped and cached up to that point. This is 44213d5092SThomas Hellström * different from the cached gem object io scatter-gather table which 45213d5092SThomas Hellström * doesn't have an associated dma mapping. 46213d5092SThomas Hellström */ 47213d5092SThomas Hellström struct i915_ttm_tt { 48213d5092SThomas Hellström struct ttm_tt ttm; 49213d5092SThomas Hellström struct device *dev; 50*cad7109aSThomas Hellström struct i915_refct_sgt cached_rsgt; 517ae03459SMatthew Auld 527ae03459SMatthew Auld bool is_shmem; 537ae03459SMatthew Auld struct file *filp; 54213d5092SThomas Hellström }; 55213d5092SThomas Hellström 5638f28c06SThomas Hellström static const struct ttm_place sys_placement_flags = { 57213d5092SThomas Hellström .fpfn = 0, 58213d5092SThomas Hellström .lpfn = 0, 59213d5092SThomas Hellström .mem_type = I915_PL_SYSTEM, 60213d5092SThomas Hellström .flags = 0, 61213d5092SThomas Hellström }; 62213d5092SThomas Hellström 63213d5092SThomas Hellström static struct ttm_placement i915_sys_placement = { 64213d5092SThomas Hellström .num_placement = 1, 6538f28c06SThomas Hellström .placement = &sys_placement_flags, 66213d5092SThomas Hellström .num_busy_placement = 1, 6738f28c06SThomas Hellström .busy_placement = &sys_placement_flags, 68213d5092SThomas Hellström }; 69213d5092SThomas Hellström 70c56ce956SThomas Hellström /** 71c56ce956SThomas Hellström * i915_ttm_sys_placement - Return the struct ttm_placement to be 72c56ce956SThomas Hellström * used for an object in system memory. 73c56ce956SThomas Hellström * 74c56ce956SThomas Hellström * Rather than making the struct extern, use this 75c56ce956SThomas Hellström * function. 76c56ce956SThomas Hellström * 77c56ce956SThomas Hellström * Return: A pointer to a static variable for sys placement. 78c56ce956SThomas Hellström */ 79c56ce956SThomas Hellström struct ttm_placement *i915_ttm_sys_placement(void) 80c56ce956SThomas Hellström { 81c56ce956SThomas Hellström return &i915_sys_placement; 82c56ce956SThomas Hellström } 83c56ce956SThomas Hellström 84b07a6483SThomas Hellström static int i915_ttm_err_to_gem(int err) 85b07a6483SThomas Hellström { 86b07a6483SThomas Hellström /* Fastpath */ 87b07a6483SThomas Hellström if (likely(!err)) 88b07a6483SThomas Hellström return 0; 89b07a6483SThomas Hellström 90b07a6483SThomas Hellström switch (err) { 91b07a6483SThomas Hellström case -EBUSY: 92b07a6483SThomas Hellström /* 93b07a6483SThomas Hellström * TTM likes to convert -EDEADLK to -EBUSY, and wants us to 94b07a6483SThomas Hellström * restart the operation, since we don't record the contending 95b07a6483SThomas Hellström * lock. We use -EAGAIN to restart. 96b07a6483SThomas Hellström */ 97b07a6483SThomas Hellström return -EAGAIN; 98b07a6483SThomas Hellström case -ENOSPC: 99b07a6483SThomas Hellström /* 100b07a6483SThomas Hellström * Memory type / region is full, and we can't evict. 101b07a6483SThomas Hellström * Except possibly system, that returns -ENOMEM; 102b07a6483SThomas Hellström */ 103b07a6483SThomas Hellström return -ENXIO; 104b07a6483SThomas Hellström default: 105b07a6483SThomas Hellström break; 106b07a6483SThomas Hellström } 107b07a6483SThomas Hellström 108b07a6483SThomas Hellström return err; 109b07a6483SThomas Hellström } 110b07a6483SThomas Hellström 1113c2b8f32SThomas Hellström static bool gpu_binds_iomem(struct ttm_resource *mem) 1123c2b8f32SThomas Hellström { 1133c2b8f32SThomas Hellström return mem->mem_type != TTM_PL_SYSTEM; 1143c2b8f32SThomas Hellström } 1153c2b8f32SThomas Hellström 1163c2b8f32SThomas Hellström static bool cpu_maps_iomem(struct ttm_resource *mem) 1173c2b8f32SThomas Hellström { 1183c2b8f32SThomas Hellström /* Once / if we support GGTT, this is also false for cached ttm_tts */ 1193c2b8f32SThomas Hellström return mem->mem_type != TTM_PL_SYSTEM; 1203c2b8f32SThomas Hellström } 1213c2b8f32SThomas Hellström 1223c2b8f32SThomas Hellström static enum i915_cache_level 1233c2b8f32SThomas Hellström i915_ttm_cache_level(struct drm_i915_private *i915, struct ttm_resource *res, 1243c2b8f32SThomas Hellström struct ttm_tt *ttm) 1253c2b8f32SThomas Hellström { 1263c2b8f32SThomas Hellström return ((HAS_LLC(i915) || HAS_SNOOP(i915)) && !gpu_binds_iomem(res) && 1273c2b8f32SThomas Hellström ttm->caching == ttm_cached) ? I915_CACHE_LLC : 1283c2b8f32SThomas Hellström I915_CACHE_NONE; 1293c2b8f32SThomas Hellström } 1303c2b8f32SThomas Hellström 131213d5092SThomas Hellström static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj); 132213d5092SThomas Hellström 13338f28c06SThomas Hellström static enum ttm_caching 13438f28c06SThomas Hellström i915_ttm_select_tt_caching(const struct drm_i915_gem_object *obj) 13538f28c06SThomas Hellström { 13638f28c06SThomas Hellström /* 1372eda4fc6SMatthew Auld * Objects only allowed in system get cached cpu-mappings, or when 1382eda4fc6SMatthew Auld * evicting lmem-only buffers to system for swapping. Other objects get 1392eda4fc6SMatthew Auld * WC mapping for now. Even if in system. 14038f28c06SThomas Hellström */ 1412eda4fc6SMatthew Auld if (obj->mm.n_placements <= 1) 14238f28c06SThomas Hellström return ttm_cached; 14338f28c06SThomas Hellström 14438f28c06SThomas Hellström return ttm_write_combined; 14538f28c06SThomas Hellström } 14638f28c06SThomas Hellström 14738f28c06SThomas Hellström static void 14838f28c06SThomas Hellström i915_ttm_place_from_region(const struct intel_memory_region *mr, 149beb6a229SMatthew Auld struct ttm_place *place, 150beb6a229SMatthew Auld unsigned int flags) 15138f28c06SThomas Hellström { 15238f28c06SThomas Hellström memset(place, 0, sizeof(*place)); 15338f28c06SThomas Hellström place->mem_type = intel_region_to_ttm_type(mr); 154beb6a229SMatthew Auld 155beb6a229SMatthew Auld if (flags & I915_BO_ALLOC_CONTIGUOUS) 156beb6a229SMatthew Auld place->flags = TTM_PL_FLAG_CONTIGUOUS; 15738f28c06SThomas Hellström } 15838f28c06SThomas Hellström 15938f28c06SThomas Hellström static void 16038f28c06SThomas Hellström i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj, 16138f28c06SThomas Hellström struct ttm_place *requested, 16238f28c06SThomas Hellström struct ttm_place *busy, 16338f28c06SThomas Hellström struct ttm_placement *placement) 16438f28c06SThomas Hellström { 16538f28c06SThomas Hellström unsigned int num_allowed = obj->mm.n_placements; 166beb6a229SMatthew Auld unsigned int flags = obj->flags; 16738f28c06SThomas Hellström unsigned int i; 16838f28c06SThomas Hellström 16938f28c06SThomas Hellström placement->num_placement = 1; 17038f28c06SThomas Hellström i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] : 171beb6a229SMatthew Auld obj->mm.region, requested, flags); 17238f28c06SThomas Hellström 17338f28c06SThomas Hellström /* Cache this on object? */ 17438f28c06SThomas Hellström placement->num_busy_placement = num_allowed; 17538f28c06SThomas Hellström for (i = 0; i < placement->num_busy_placement; ++i) 176beb6a229SMatthew Auld i915_ttm_place_from_region(obj->mm.placements[i], busy + i, flags); 17738f28c06SThomas Hellström 17838f28c06SThomas Hellström if (num_allowed == 0) { 17938f28c06SThomas Hellström *busy = *requested; 18038f28c06SThomas Hellström placement->num_busy_placement = 1; 18138f28c06SThomas Hellström } 18238f28c06SThomas Hellström 18338f28c06SThomas Hellström placement->placement = requested; 18438f28c06SThomas Hellström placement->busy_placement = busy; 18538f28c06SThomas Hellström } 18638f28c06SThomas Hellström 1877ae03459SMatthew Auld static int i915_ttm_tt_shmem_populate(struct ttm_device *bdev, 1887ae03459SMatthew Auld struct ttm_tt *ttm, 1897ae03459SMatthew Auld struct ttm_operation_ctx *ctx) 1907ae03459SMatthew Auld { 1917ae03459SMatthew Auld struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev); 1927ae03459SMatthew Auld struct intel_memory_region *mr = i915->mm.regions[INTEL_MEMORY_SYSTEM]; 1937ae03459SMatthew Auld struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); 1947ae03459SMatthew Auld const unsigned int max_segment = i915_sg_segment_size(); 1957ae03459SMatthew Auld const size_t size = ttm->num_pages << PAGE_SHIFT; 1967ae03459SMatthew Auld struct file *filp = i915_tt->filp; 1977ae03459SMatthew Auld struct sgt_iter sgt_iter; 1987ae03459SMatthew Auld struct sg_table *st; 1997ae03459SMatthew Auld struct page *page; 2007ae03459SMatthew Auld unsigned long i; 2017ae03459SMatthew Auld int err; 2027ae03459SMatthew Auld 2037ae03459SMatthew Auld if (!filp) { 2047ae03459SMatthew Auld struct address_space *mapping; 2057ae03459SMatthew Auld gfp_t mask; 2067ae03459SMatthew Auld 2077ae03459SMatthew Auld filp = shmem_file_setup("i915-shmem-tt", size, VM_NORESERVE); 2087ae03459SMatthew Auld if (IS_ERR(filp)) 2097ae03459SMatthew Auld return PTR_ERR(filp); 2107ae03459SMatthew Auld 2117ae03459SMatthew Auld mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; 2127ae03459SMatthew Auld 2137ae03459SMatthew Auld mapping = filp->f_mapping; 2147ae03459SMatthew Auld mapping_set_gfp_mask(mapping, mask); 2157ae03459SMatthew Auld GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM)); 2167ae03459SMatthew Auld 2177ae03459SMatthew Auld i915_tt->filp = filp; 2187ae03459SMatthew Auld } 2197ae03459SMatthew Auld 220*cad7109aSThomas Hellström st = &i915_tt->cached_rsgt.table; 221*cad7109aSThomas Hellström err = shmem_sg_alloc_table(i915, st, size, mr, filp->f_mapping, 222*cad7109aSThomas Hellström max_segment); 223*cad7109aSThomas Hellström if (err) 224*cad7109aSThomas Hellström return err; 2257ae03459SMatthew Auld 226*cad7109aSThomas Hellström err = dma_map_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, 2277ae03459SMatthew Auld DMA_ATTR_SKIP_CPU_SYNC); 228*cad7109aSThomas Hellström if (err) 2297ae03459SMatthew Auld goto err_free_st; 2307ae03459SMatthew Auld 2317ae03459SMatthew Auld i = 0; 2327ae03459SMatthew Auld for_each_sgt_page(page, sgt_iter, st) 2337ae03459SMatthew Auld ttm->pages[i++] = page; 2347ae03459SMatthew Auld 2357ae03459SMatthew Auld if (ttm->page_flags & TTM_TT_FLAG_SWAPPED) 2367ae03459SMatthew Auld ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED; 2377ae03459SMatthew Auld 2387ae03459SMatthew Auld return 0; 2397ae03459SMatthew Auld 2407ae03459SMatthew Auld err_free_st: 241*cad7109aSThomas Hellström shmem_sg_free_table(st, filp->f_mapping, false, false); 242*cad7109aSThomas Hellström 2437ae03459SMatthew Auld return err; 2447ae03459SMatthew Auld } 2457ae03459SMatthew Auld 2467ae03459SMatthew Auld static void i915_ttm_tt_shmem_unpopulate(struct ttm_tt *ttm) 2477ae03459SMatthew Auld { 2487ae03459SMatthew Auld struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); 2497ae03459SMatthew Auld bool backup = ttm->page_flags & TTM_TT_FLAG_SWAPPED; 250*cad7109aSThomas Hellström struct sg_table *st = &i915_tt->cached_rsgt.table; 2517ae03459SMatthew Auld 252*cad7109aSThomas Hellström shmem_sg_free_table(st, file_inode(i915_tt->filp)->i_mapping, 2537ae03459SMatthew Auld backup, backup); 2547ae03459SMatthew Auld } 2557ae03459SMatthew Auld 256*cad7109aSThomas Hellström static void i915_ttm_tt_release(struct kref *ref) 257*cad7109aSThomas Hellström { 258*cad7109aSThomas Hellström struct i915_ttm_tt *i915_tt = 259*cad7109aSThomas Hellström container_of(ref, typeof(*i915_tt), cached_rsgt.kref); 260*cad7109aSThomas Hellström struct sg_table *st = &i915_tt->cached_rsgt.table; 261*cad7109aSThomas Hellström 262*cad7109aSThomas Hellström GEM_WARN_ON(st->sgl); 263*cad7109aSThomas Hellström 264*cad7109aSThomas Hellström kfree(i915_tt); 265*cad7109aSThomas Hellström } 266*cad7109aSThomas Hellström 267*cad7109aSThomas Hellström static const struct i915_refct_sgt_ops tt_rsgt_ops = { 268*cad7109aSThomas Hellström .release = i915_ttm_tt_release 269*cad7109aSThomas Hellström }; 270*cad7109aSThomas Hellström 271213d5092SThomas Hellström static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo, 272213d5092SThomas Hellström uint32_t page_flags) 273213d5092SThomas Hellström { 274213d5092SThomas Hellström struct ttm_resource_manager *man = 275213d5092SThomas Hellström ttm_manager_type(bo->bdev, bo->resource->mem_type); 276213d5092SThomas Hellström struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); 2777ae03459SMatthew Auld enum ttm_caching caching = i915_ttm_select_tt_caching(obj); 278213d5092SThomas Hellström struct i915_ttm_tt *i915_tt; 279213d5092SThomas Hellström int ret; 280213d5092SThomas Hellström 281213d5092SThomas Hellström i915_tt = kzalloc(sizeof(*i915_tt), GFP_KERNEL); 282213d5092SThomas Hellström if (!i915_tt) 283213d5092SThomas Hellström return NULL; 284213d5092SThomas Hellström 285213d5092SThomas Hellström if (obj->flags & I915_BO_ALLOC_CPU_CLEAR && 286213d5092SThomas Hellström man->use_tt) 28743d46f0bSMatthew Auld page_flags |= TTM_TT_FLAG_ZERO_ALLOC; 288213d5092SThomas Hellström 2897ae03459SMatthew Auld if (i915_gem_object_is_shrinkable(obj) && caching == ttm_cached) { 2907ae03459SMatthew Auld page_flags |= TTM_TT_FLAG_EXTERNAL | 2917ae03459SMatthew Auld TTM_TT_FLAG_EXTERNAL_MAPPABLE; 2927ae03459SMatthew Auld i915_tt->is_shmem = true; 293213d5092SThomas Hellström } 294213d5092SThomas Hellström 2957ae03459SMatthew Auld ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags, caching); 2967ae03459SMatthew Auld if (ret) 2977ae03459SMatthew Auld goto err_free; 2987ae03459SMatthew Auld 299*cad7109aSThomas Hellström __i915_refct_sgt_init(&i915_tt->cached_rsgt, bo->base.size, 300*cad7109aSThomas Hellström &tt_rsgt_ops); 301*cad7109aSThomas Hellström 302213d5092SThomas Hellström i915_tt->dev = obj->base.dev->dev; 303213d5092SThomas Hellström 304213d5092SThomas Hellström return &i915_tt->ttm; 3057ae03459SMatthew Auld 3067ae03459SMatthew Auld err_free: 3077ae03459SMatthew Auld kfree(i915_tt); 3087ae03459SMatthew Auld return NULL; 3097ae03459SMatthew Auld } 3107ae03459SMatthew Auld 3117ae03459SMatthew Auld static int i915_ttm_tt_populate(struct ttm_device *bdev, 3127ae03459SMatthew Auld struct ttm_tt *ttm, 3137ae03459SMatthew Auld struct ttm_operation_ctx *ctx) 3147ae03459SMatthew Auld { 3157ae03459SMatthew Auld struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); 3167ae03459SMatthew Auld 3177ae03459SMatthew Auld if (i915_tt->is_shmem) 3187ae03459SMatthew Auld return i915_ttm_tt_shmem_populate(bdev, ttm, ctx); 3197ae03459SMatthew Auld 3207ae03459SMatthew Auld return ttm_pool_alloc(&bdev->pool, ttm, ctx); 321213d5092SThomas Hellström } 322213d5092SThomas Hellström 323213d5092SThomas Hellström static void i915_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm) 324213d5092SThomas Hellström { 325213d5092SThomas Hellström struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); 326*cad7109aSThomas Hellström struct sg_table *st = &i915_tt->cached_rsgt.table; 327*cad7109aSThomas Hellström 328*cad7109aSThomas Hellström if (st->sgl) 329*cad7109aSThomas Hellström dma_unmap_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, 0); 330213d5092SThomas Hellström 3317ae03459SMatthew Auld if (i915_tt->is_shmem) { 3327ae03459SMatthew Auld i915_ttm_tt_shmem_unpopulate(ttm); 3337ae03459SMatthew Auld } else { 334*cad7109aSThomas Hellström sg_free_table(st); 335213d5092SThomas Hellström ttm_pool_free(&bdev->pool, ttm); 336213d5092SThomas Hellström } 3377ae03459SMatthew Auld } 338213d5092SThomas Hellström 339213d5092SThomas Hellström static void i915_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) 340213d5092SThomas Hellström { 341213d5092SThomas Hellström struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); 342213d5092SThomas Hellström 3437ae03459SMatthew Auld if (i915_tt->filp) 3447ae03459SMatthew Auld fput(i915_tt->filp); 3457ae03459SMatthew Auld 346c865204eSThomas Hellström ttm_tt_fini(ttm); 347*cad7109aSThomas Hellström i915_refct_sgt_put(&i915_tt->cached_rsgt); 348213d5092SThomas Hellström } 349213d5092SThomas Hellström 350213d5092SThomas Hellström static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo, 351213d5092SThomas Hellström const struct ttm_place *place) 352213d5092SThomas Hellström { 353213d5092SThomas Hellström struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); 354213d5092SThomas Hellström 3557ae03459SMatthew Auld /* 3567ae03459SMatthew Auld * EXTERNAL objects should never be swapped out by TTM, instead we need 3577ae03459SMatthew Auld * to handle that ourselves. TTM will already skip such objects for us, 3587ae03459SMatthew Auld * but we would like to avoid grabbing locks for no good reason. 3597ae03459SMatthew Auld */ 3607ae03459SMatthew Auld if (bo->ttm && bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL) 3617ae03459SMatthew Auld return -EBUSY; 3627ae03459SMatthew Auld 363213d5092SThomas Hellström /* Will do for now. Our pinned objects are still on TTM's LRU lists */ 364d53ec322SMatthew Auld return i915_gem_object_evictable(obj); 365213d5092SThomas Hellström } 366213d5092SThomas Hellström 367213d5092SThomas Hellström static void i915_ttm_evict_flags(struct ttm_buffer_object *bo, 368213d5092SThomas Hellström struct ttm_placement *placement) 369213d5092SThomas Hellström { 370213d5092SThomas Hellström *placement = i915_sys_placement; 371213d5092SThomas Hellström } 372213d5092SThomas Hellström 373213d5092SThomas Hellström static int i915_ttm_move_notify(struct ttm_buffer_object *bo) 374213d5092SThomas Hellström { 375213d5092SThomas Hellström struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); 376213d5092SThomas Hellström int ret; 377213d5092SThomas Hellström 378213d5092SThomas Hellström ret = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE); 379213d5092SThomas Hellström if (ret) 380213d5092SThomas Hellström return ret; 381213d5092SThomas Hellström 382213d5092SThomas Hellström ret = __i915_gem_object_put_pages(obj); 383213d5092SThomas Hellström if (ret) 384213d5092SThomas Hellström return ret; 385213d5092SThomas Hellström 386213d5092SThomas Hellström return 0; 387213d5092SThomas Hellström } 388213d5092SThomas Hellström 389*cad7109aSThomas Hellström static void i915_ttm_free_cached_io_rsgt(struct drm_i915_gem_object *obj) 390213d5092SThomas Hellström { 391cf3e3e86SMaarten Lankhorst struct radix_tree_iter iter; 392cf3e3e86SMaarten Lankhorst void __rcu **slot; 393cf3e3e86SMaarten Lankhorst 394*cad7109aSThomas Hellström if (!obj->ttm.cached_io_rsgt) 395cf3e3e86SMaarten Lankhorst return; 396cf3e3e86SMaarten Lankhorst 397cf3e3e86SMaarten Lankhorst rcu_read_lock(); 398cf3e3e86SMaarten Lankhorst radix_tree_for_each_slot(slot, &obj->ttm.get_io_page.radix, &iter, 0) 399cf3e3e86SMaarten Lankhorst radix_tree_delete(&obj->ttm.get_io_page.radix, iter.index); 400cf3e3e86SMaarten Lankhorst rcu_read_unlock(); 401cf3e3e86SMaarten Lankhorst 402*cad7109aSThomas Hellström i915_refct_sgt_put(obj->ttm.cached_io_rsgt); 403*cad7109aSThomas Hellström obj->ttm.cached_io_rsgt = NULL; 404213d5092SThomas Hellström } 405213d5092SThomas Hellström 4063c2b8f32SThomas Hellström static void 4073c2b8f32SThomas Hellström i915_ttm_adjust_domains_after_move(struct drm_i915_gem_object *obj) 4083c2b8f32SThomas Hellström { 4093c2b8f32SThomas Hellström struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); 4103c2b8f32SThomas Hellström 4113c2b8f32SThomas Hellström if (cpu_maps_iomem(bo->resource) || bo->ttm->caching != ttm_cached) { 4123c2b8f32SThomas Hellström obj->write_domain = I915_GEM_DOMAIN_WC; 4133c2b8f32SThomas Hellström obj->read_domains = I915_GEM_DOMAIN_WC; 4143c2b8f32SThomas Hellström } else { 4153c2b8f32SThomas Hellström obj->write_domain = I915_GEM_DOMAIN_CPU; 4163c2b8f32SThomas Hellström obj->read_domains = I915_GEM_DOMAIN_CPU; 4173c2b8f32SThomas Hellström } 4183c2b8f32SThomas Hellström } 4193c2b8f32SThomas Hellström 4203c2b8f32SThomas Hellström static void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj) 4213c2b8f32SThomas Hellström { 4223c2b8f32SThomas Hellström struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); 4233c2b8f32SThomas Hellström unsigned int cache_level; 42432b7cf51SThomas Hellström unsigned int i; 42532b7cf51SThomas Hellström 42632b7cf51SThomas Hellström /* 42732b7cf51SThomas Hellström * If object was moved to an allowable region, update the object 42832b7cf51SThomas Hellström * region to consider it migrated. Note that if it's currently not 42932b7cf51SThomas Hellström * in an allowable region, it's evicted and we don't update the 43032b7cf51SThomas Hellström * object region. 43132b7cf51SThomas Hellström */ 43232b7cf51SThomas Hellström if (intel_region_to_ttm_type(obj->mm.region) != bo->resource->mem_type) { 43332b7cf51SThomas Hellström for (i = 0; i < obj->mm.n_placements; ++i) { 43432b7cf51SThomas Hellström struct intel_memory_region *mr = obj->mm.placements[i]; 43532b7cf51SThomas Hellström 43632b7cf51SThomas Hellström if (intel_region_to_ttm_type(mr) == bo->resource->mem_type && 43732b7cf51SThomas Hellström mr != obj->mm.region) { 43832b7cf51SThomas Hellström i915_gem_object_release_memory_region(obj); 43932b7cf51SThomas Hellström i915_gem_object_init_memory_region(obj, mr); 44032b7cf51SThomas Hellström break; 44132b7cf51SThomas Hellström } 44232b7cf51SThomas Hellström } 44332b7cf51SThomas Hellström } 4443c2b8f32SThomas Hellström 4453c2b8f32SThomas Hellström obj->mem_flags &= ~(I915_BO_FLAG_STRUCT_PAGE | I915_BO_FLAG_IOMEM); 4463c2b8f32SThomas Hellström 4473c2b8f32SThomas Hellström obj->mem_flags |= cpu_maps_iomem(bo->resource) ? I915_BO_FLAG_IOMEM : 4483c2b8f32SThomas Hellström I915_BO_FLAG_STRUCT_PAGE; 4493c2b8f32SThomas Hellström 4503c2b8f32SThomas Hellström cache_level = i915_ttm_cache_level(to_i915(bo->base.dev), bo->resource, 4513c2b8f32SThomas Hellström bo->ttm); 4523c2b8f32SThomas Hellström i915_gem_object_set_cache_coherency(obj, cache_level); 4533c2b8f32SThomas Hellström } 4543c2b8f32SThomas Hellström 4557ae03459SMatthew Auld static int i915_ttm_purge(struct drm_i915_gem_object *obj) 456213d5092SThomas Hellström { 457213d5092SThomas Hellström struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); 4587ae03459SMatthew Auld struct i915_ttm_tt *i915_tt = 4597ae03459SMatthew Auld container_of(bo->ttm, typeof(*i915_tt), ttm); 460213d5092SThomas Hellström struct ttm_operation_ctx ctx = { 461213d5092SThomas Hellström .interruptible = true, 462213d5092SThomas Hellström .no_wait_gpu = false, 463213d5092SThomas Hellström }; 464213d5092SThomas Hellström struct ttm_placement place = {}; 465213d5092SThomas Hellström int ret; 466213d5092SThomas Hellström 467213d5092SThomas Hellström if (obj->mm.madv == __I915_MADV_PURGED) 4687ae03459SMatthew Auld return 0; 469213d5092SThomas Hellström 470213d5092SThomas Hellström ret = ttm_bo_validate(bo, &place, &ctx); 4717ae03459SMatthew Auld if (ret) 4727ae03459SMatthew Auld return ret; 4737ae03459SMatthew Auld 4747ae03459SMatthew Auld if (bo->ttm && i915_tt->filp) { 4757ae03459SMatthew Auld /* 4767ae03459SMatthew Auld * The below fput(which eventually calls shmem_truncate) might 4777ae03459SMatthew Auld * be delayed by worker, so when directly called to purge the 4787ae03459SMatthew Auld * pages(like by the shrinker) we should try to be more 4797ae03459SMatthew Auld * aggressive and release the pages immediately. 4807ae03459SMatthew Auld */ 4817ae03459SMatthew Auld shmem_truncate_range(file_inode(i915_tt->filp), 4827ae03459SMatthew Auld 0, (loff_t)-1); 4837ae03459SMatthew Auld fput(fetch_and_zero(&i915_tt->filp)); 4847ae03459SMatthew Auld } 4857ae03459SMatthew Auld 4863c2b8f32SThomas Hellström obj->write_domain = 0; 4873c2b8f32SThomas Hellström obj->read_domains = 0; 4883c2b8f32SThomas Hellström i915_ttm_adjust_gem_after_move(obj); 489*cad7109aSThomas Hellström i915_ttm_free_cached_io_rsgt(obj); 490213d5092SThomas Hellström obj->mm.madv = __I915_MADV_PURGED; 4917ae03459SMatthew Auld return 0; 492213d5092SThomas Hellström } 4937ae03459SMatthew Auld 4947ae03459SMatthew Auld static int i915_ttm_shrinker_release_pages(struct drm_i915_gem_object *obj, 4957ae03459SMatthew Auld bool should_writeback) 4967ae03459SMatthew Auld { 4977ae03459SMatthew Auld struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); 4987ae03459SMatthew Auld struct i915_ttm_tt *i915_tt = 4997ae03459SMatthew Auld container_of(bo->ttm, typeof(*i915_tt), ttm); 5007ae03459SMatthew Auld struct ttm_operation_ctx ctx = { 5017ae03459SMatthew Auld .interruptible = true, 5027ae03459SMatthew Auld .no_wait_gpu = false, 5037ae03459SMatthew Auld }; 5047ae03459SMatthew Auld struct ttm_placement place = {}; 5057ae03459SMatthew Auld int ret; 5067ae03459SMatthew Auld 5077ae03459SMatthew Auld if (!bo->ttm || bo->resource->mem_type != TTM_PL_SYSTEM) 5087ae03459SMatthew Auld return 0; 5097ae03459SMatthew Auld 5107ae03459SMatthew Auld GEM_BUG_ON(!i915_tt->is_shmem); 5117ae03459SMatthew Auld 5127ae03459SMatthew Auld if (!i915_tt->filp) 5137ae03459SMatthew Auld return 0; 5147ae03459SMatthew Auld 5157ae03459SMatthew Auld switch (obj->mm.madv) { 5167ae03459SMatthew Auld case I915_MADV_DONTNEED: 5177ae03459SMatthew Auld return i915_ttm_purge(obj); 5187ae03459SMatthew Auld case __I915_MADV_PURGED: 5197ae03459SMatthew Auld return 0; 5207ae03459SMatthew Auld } 5217ae03459SMatthew Auld 5227ae03459SMatthew Auld if (bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED) 5237ae03459SMatthew Auld return 0; 5247ae03459SMatthew Auld 5257ae03459SMatthew Auld bo->ttm->page_flags |= TTM_TT_FLAG_SWAPPED; 5267ae03459SMatthew Auld ret = ttm_bo_validate(bo, &place, &ctx); 5277ae03459SMatthew Auld if (ret) { 5287ae03459SMatthew Auld bo->ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED; 5297ae03459SMatthew Auld return ret; 5307ae03459SMatthew Auld } 5317ae03459SMatthew Auld 5327ae03459SMatthew Auld if (should_writeback) 5337ae03459SMatthew Auld __shmem_writeback(obj->base.size, i915_tt->filp->f_mapping); 5347ae03459SMatthew Auld 5357ae03459SMatthew Auld return 0; 536213d5092SThomas Hellström } 537213d5092SThomas Hellström 538213d5092SThomas Hellström static void i915_ttm_swap_notify(struct ttm_buffer_object *bo) 539213d5092SThomas Hellström { 540213d5092SThomas Hellström struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); 541213d5092SThomas Hellström int ret = i915_ttm_move_notify(bo); 542213d5092SThomas Hellström 543213d5092SThomas Hellström GEM_WARN_ON(ret); 544*cad7109aSThomas Hellström GEM_WARN_ON(obj->ttm.cached_io_rsgt); 545213d5092SThomas Hellström if (!ret && obj->mm.madv != I915_MADV_WILLNEED) 546213d5092SThomas Hellström i915_ttm_purge(obj); 547213d5092SThomas Hellström } 548213d5092SThomas Hellström 549213d5092SThomas Hellström static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo) 550213d5092SThomas Hellström { 551213d5092SThomas Hellström struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); 552213d5092SThomas Hellström 553213d5092SThomas Hellström if (likely(obj)) { 554068396bbSThomas Hellström __i915_gem_object_pages_fini(obj); 555*cad7109aSThomas Hellström i915_ttm_free_cached_io_rsgt(obj); 556213d5092SThomas Hellström } 557213d5092SThomas Hellström } 558213d5092SThomas Hellström 559213d5092SThomas Hellström static struct intel_memory_region * 560213d5092SThomas Hellström i915_ttm_region(struct ttm_device *bdev, int ttm_mem_type) 561213d5092SThomas Hellström { 562213d5092SThomas Hellström struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev); 563213d5092SThomas Hellström 564213d5092SThomas Hellström /* There's some room for optimization here... */ 565213d5092SThomas Hellström GEM_BUG_ON(ttm_mem_type != I915_PL_SYSTEM && 566213d5092SThomas Hellström ttm_mem_type < I915_PL_LMEM0); 567213d5092SThomas Hellström if (ttm_mem_type == I915_PL_SYSTEM) 568213d5092SThomas Hellström return intel_memory_region_lookup(i915, INTEL_MEMORY_SYSTEM, 569213d5092SThomas Hellström 0); 570213d5092SThomas Hellström 571213d5092SThomas Hellström return intel_memory_region_lookup(i915, INTEL_MEMORY_LOCAL, 572213d5092SThomas Hellström ttm_mem_type - I915_PL_LMEM0); 573213d5092SThomas Hellström } 574213d5092SThomas Hellström 575*cad7109aSThomas Hellström static struct i915_refct_sgt *i915_ttm_tt_get_st(struct ttm_tt *ttm) 576213d5092SThomas Hellström { 577213d5092SThomas Hellström struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); 578213d5092SThomas Hellström struct sg_table *st; 579213d5092SThomas Hellström int ret; 580213d5092SThomas Hellström 581*cad7109aSThomas Hellström if (i915_tt->cached_rsgt.table.sgl) 582*cad7109aSThomas Hellström return i915_refct_sgt_get(&i915_tt->cached_rsgt); 583213d5092SThomas Hellström 584*cad7109aSThomas Hellström st = &i915_tt->cached_rsgt.table; 58523852becSLinus Torvalds ret = sg_alloc_table_from_pages_segment(st, 58623852becSLinus Torvalds ttm->pages, ttm->num_pages, 58723852becSLinus Torvalds 0, (unsigned long)ttm->num_pages << PAGE_SHIFT, 58823852becSLinus Torvalds i915_sg_segment_size(), GFP_KERNEL); 58923852becSLinus Torvalds if (ret) { 590*cad7109aSThomas Hellström st->sgl = NULL; 59123852becSLinus Torvalds return ERR_PTR(ret); 592213d5092SThomas Hellström } 593213d5092SThomas Hellström 594213d5092SThomas Hellström ret = dma_map_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, 0); 595213d5092SThomas Hellström if (ret) { 596213d5092SThomas Hellström sg_free_table(st); 597213d5092SThomas Hellström return ERR_PTR(ret); 598213d5092SThomas Hellström } 599213d5092SThomas Hellström 600*cad7109aSThomas Hellström return i915_refct_sgt_get(&i915_tt->cached_rsgt); 601213d5092SThomas Hellström } 602213d5092SThomas Hellström 603*cad7109aSThomas Hellström static struct i915_refct_sgt * 604213d5092SThomas Hellström i915_ttm_resource_get_st(struct drm_i915_gem_object *obj, 605213d5092SThomas Hellström struct ttm_resource *res) 606213d5092SThomas Hellström { 607213d5092SThomas Hellström struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); 608213d5092SThomas Hellström 6093c2b8f32SThomas Hellström if (!gpu_binds_iomem(res)) 610213d5092SThomas Hellström return i915_ttm_tt_get_st(bo->ttm); 611213d5092SThomas Hellström 6123c2b8f32SThomas Hellström /* 6133c2b8f32SThomas Hellström * If CPU mapping differs, we need to add the ttm_tt pages to 6143c2b8f32SThomas Hellström * the resulting st. Might make sense for GGTT. 6153c2b8f32SThomas Hellström */ 6163c2b8f32SThomas Hellström GEM_WARN_ON(!cpu_maps_iomem(res)); 617*cad7109aSThomas Hellström if (bo->resource == res) { 618*cad7109aSThomas Hellström if (!obj->ttm.cached_io_rsgt) { 619*cad7109aSThomas Hellström struct i915_refct_sgt *rsgt; 620*cad7109aSThomas Hellström 621*cad7109aSThomas Hellström rsgt = intel_region_ttm_resource_to_rsgt(obj->mm.region, 622*cad7109aSThomas Hellström res); 623*cad7109aSThomas Hellström if (IS_ERR(rsgt)) 624*cad7109aSThomas Hellström return rsgt; 625*cad7109aSThomas Hellström 626*cad7109aSThomas Hellström obj->ttm.cached_io_rsgt = rsgt; 627*cad7109aSThomas Hellström } 628*cad7109aSThomas Hellström return i915_refct_sgt_get(obj->ttm.cached_io_rsgt); 629*cad7109aSThomas Hellström } 630*cad7109aSThomas Hellström 631*cad7109aSThomas Hellström return intel_region_ttm_resource_to_rsgt(obj->mm.region, res); 632213d5092SThomas Hellström } 633213d5092SThomas Hellström 63450331a7bSRamalingam C static int i915_ttm_accel_move(struct ttm_buffer_object *bo, 635d8ac30fdSThomas Hellström bool clear, 63650331a7bSRamalingam C struct ttm_resource *dst_mem, 6370d938863SThomas Hellström struct ttm_tt *dst_ttm, 63850331a7bSRamalingam C struct sg_table *dst_st) 63950331a7bSRamalingam C { 64050331a7bSRamalingam C struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915), 64150331a7bSRamalingam C bdev); 64250331a7bSRamalingam C struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); 64350331a7bSRamalingam C struct i915_request *rq; 6440d938863SThomas Hellström struct ttm_tt *src_ttm = bo->ttm; 6453c2b8f32SThomas Hellström enum i915_cache_level src_level, dst_level; 64650331a7bSRamalingam C int ret; 64750331a7bSRamalingam C 648c56ce956SThomas Hellström if (!i915->gt.migrate.context || intel_gt_is_wedged(&i915->gt)) 64950331a7bSRamalingam C return -EINVAL; 65050331a7bSRamalingam C 6510d938863SThomas Hellström dst_level = i915_ttm_cache_level(i915, dst_mem, dst_ttm); 652d8ac30fdSThomas Hellström if (clear) { 65350331a7bSRamalingam C if (bo->type == ttm_bo_type_kernel) 65450331a7bSRamalingam C return -EINVAL; 65550331a7bSRamalingam C 65650331a7bSRamalingam C intel_engine_pm_get(i915->gt.migrate.context->engine); 65750331a7bSRamalingam C ret = intel_context_migrate_clear(i915->gt.migrate.context, NULL, 6583c2b8f32SThomas Hellström dst_st->sgl, dst_level, 6593c2b8f32SThomas Hellström gpu_binds_iomem(dst_mem), 66050331a7bSRamalingam C 0, &rq); 66150331a7bSRamalingam C 66250331a7bSRamalingam C if (!ret && rq) { 66350331a7bSRamalingam C i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT); 66450331a7bSRamalingam C i915_request_put(rq); 66550331a7bSRamalingam C } 66650331a7bSRamalingam C intel_engine_pm_put(i915->gt.migrate.context->engine); 66750331a7bSRamalingam C } else { 668*cad7109aSThomas Hellström struct i915_refct_sgt *src_rsgt = 669*cad7109aSThomas Hellström i915_ttm_resource_get_st(obj, bo->resource); 670*cad7109aSThomas Hellström 671*cad7109aSThomas Hellström if (IS_ERR(src_rsgt)) 672*cad7109aSThomas Hellström return PTR_ERR(src_rsgt); 67350331a7bSRamalingam C 6740d938863SThomas Hellström src_level = i915_ttm_cache_level(i915, bo->resource, src_ttm); 67550331a7bSRamalingam C intel_engine_pm_get(i915->gt.migrate.context->engine); 67650331a7bSRamalingam C ret = intel_context_migrate_copy(i915->gt.migrate.context, 677*cad7109aSThomas Hellström NULL, src_rsgt->table.sgl, 678*cad7109aSThomas Hellström src_level, 6793c2b8f32SThomas Hellström gpu_binds_iomem(bo->resource), 6803c2b8f32SThomas Hellström dst_st->sgl, dst_level, 6813c2b8f32SThomas Hellström gpu_binds_iomem(dst_mem), 68250331a7bSRamalingam C &rq); 683*cad7109aSThomas Hellström i915_refct_sgt_put(src_rsgt); 68450331a7bSRamalingam C if (!ret && rq) { 68550331a7bSRamalingam C i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT); 68650331a7bSRamalingam C i915_request_put(rq); 68750331a7bSRamalingam C } 68850331a7bSRamalingam C intel_engine_pm_put(i915->gt.migrate.context->engine); 68950331a7bSRamalingam C } 69050331a7bSRamalingam C 69150331a7bSRamalingam C return ret; 692213d5092SThomas Hellström } 693213d5092SThomas Hellström 694d8ac30fdSThomas Hellström static void __i915_ttm_move(struct ttm_buffer_object *bo, bool clear, 695d8ac30fdSThomas Hellström struct ttm_resource *dst_mem, 6960d938863SThomas Hellström struct ttm_tt *dst_ttm, 697*cad7109aSThomas Hellström struct i915_refct_sgt *dst_rsgt, 6980d938863SThomas Hellström bool allow_accel) 699d8ac30fdSThomas Hellström { 7000d938863SThomas Hellström int ret = -EINVAL; 701d8ac30fdSThomas Hellström 7020d938863SThomas Hellström if (allow_accel) 703*cad7109aSThomas Hellström ret = i915_ttm_accel_move(bo, clear, dst_mem, dst_ttm, 704*cad7109aSThomas Hellström &dst_rsgt->table); 705d8ac30fdSThomas Hellström if (ret) { 706d8ac30fdSThomas Hellström struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); 707d8ac30fdSThomas Hellström struct intel_memory_region *dst_reg, *src_reg; 708d8ac30fdSThomas Hellström union { 709d8ac30fdSThomas Hellström struct ttm_kmap_iter_tt tt; 710d8ac30fdSThomas Hellström struct ttm_kmap_iter_iomap io; 711d8ac30fdSThomas Hellström } _dst_iter, _src_iter; 712d8ac30fdSThomas Hellström struct ttm_kmap_iter *dst_iter, *src_iter; 713d8ac30fdSThomas Hellström 714d8ac30fdSThomas Hellström dst_reg = i915_ttm_region(bo->bdev, dst_mem->mem_type); 715d8ac30fdSThomas Hellström src_reg = i915_ttm_region(bo->bdev, bo->resource->mem_type); 716d8ac30fdSThomas Hellström GEM_BUG_ON(!dst_reg || !src_reg); 717d8ac30fdSThomas Hellström 718d8ac30fdSThomas Hellström dst_iter = !cpu_maps_iomem(dst_mem) ? 7190d938863SThomas Hellström ttm_kmap_iter_tt_init(&_dst_iter.tt, dst_ttm) : 720d8ac30fdSThomas Hellström ttm_kmap_iter_iomap_init(&_dst_iter.io, &dst_reg->iomap, 721*cad7109aSThomas Hellström &dst_rsgt->table, 722*cad7109aSThomas Hellström dst_reg->region.start); 723d8ac30fdSThomas Hellström 724d8ac30fdSThomas Hellström src_iter = !cpu_maps_iomem(bo->resource) ? 725d8ac30fdSThomas Hellström ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm) : 726d8ac30fdSThomas Hellström ttm_kmap_iter_iomap_init(&_src_iter.io, &src_reg->iomap, 727*cad7109aSThomas Hellström &obj->ttm.cached_io_rsgt->table, 728d8ac30fdSThomas Hellström src_reg->region.start); 729d8ac30fdSThomas Hellström 73066907633SThomas Hellström ttm_move_memcpy(clear, dst_mem->num_pages, dst_iter, src_iter); 731d8ac30fdSThomas Hellström } 732d8ac30fdSThomas Hellström } 733d8ac30fdSThomas Hellström 734213d5092SThomas Hellström static int i915_ttm_move(struct ttm_buffer_object *bo, bool evict, 735213d5092SThomas Hellström struct ttm_operation_ctx *ctx, 736213d5092SThomas Hellström struct ttm_resource *dst_mem, 737213d5092SThomas Hellström struct ttm_place *hop) 738213d5092SThomas Hellström { 739213d5092SThomas Hellström struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); 740213d5092SThomas Hellström struct ttm_resource_manager *dst_man = 741213d5092SThomas Hellström ttm_manager_type(bo->bdev, dst_mem->mem_type); 742d8ac30fdSThomas Hellström struct ttm_tt *ttm = bo->ttm; 743*cad7109aSThomas Hellström struct i915_refct_sgt *dst_rsgt; 744d8ac30fdSThomas Hellström bool clear; 745213d5092SThomas Hellström int ret; 746213d5092SThomas Hellström 747213d5092SThomas Hellström /* Sync for now. We could do the actual copy async. */ 748213d5092SThomas Hellström ret = ttm_bo_wait_ctx(bo, ctx); 749213d5092SThomas Hellström if (ret) 750213d5092SThomas Hellström return ret; 751213d5092SThomas Hellström 752213d5092SThomas Hellström ret = i915_ttm_move_notify(bo); 753213d5092SThomas Hellström if (ret) 754213d5092SThomas Hellström return ret; 755213d5092SThomas Hellström 756213d5092SThomas Hellström if (obj->mm.madv != I915_MADV_WILLNEED) { 757213d5092SThomas Hellström i915_ttm_purge(obj); 758213d5092SThomas Hellström ttm_resource_free(bo, &dst_mem); 759213d5092SThomas Hellström return 0; 760213d5092SThomas Hellström } 761213d5092SThomas Hellström 762213d5092SThomas Hellström /* Populate ttm with pages if needed. Typically system memory. */ 7631176d15fSDave Airlie if (ttm && (dst_man->use_tt || (ttm->page_flags & TTM_TT_FLAG_SWAPPED))) { 764d8ac30fdSThomas Hellström ret = ttm_tt_populate(bo->bdev, ttm, ctx); 765213d5092SThomas Hellström if (ret) 766213d5092SThomas Hellström return ret; 767213d5092SThomas Hellström } 768213d5092SThomas Hellström 769*cad7109aSThomas Hellström dst_rsgt = i915_ttm_resource_get_st(obj, dst_mem); 770*cad7109aSThomas Hellström if (IS_ERR(dst_rsgt)) 771*cad7109aSThomas Hellström return PTR_ERR(dst_rsgt); 772213d5092SThomas Hellström 773d8ac30fdSThomas Hellström clear = !cpu_maps_iomem(bo->resource) && (!ttm || !ttm_tt_is_populated(ttm)); 7741176d15fSDave Airlie if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC))) 775*cad7109aSThomas Hellström __i915_ttm_move(bo, clear, dst_mem, bo->ttm, dst_rsgt, true); 776213d5092SThomas Hellström 777213d5092SThomas Hellström ttm_bo_move_sync_cleanup(bo, dst_mem); 7783c2b8f32SThomas Hellström i915_ttm_adjust_domains_after_move(obj); 779*cad7109aSThomas Hellström i915_ttm_free_cached_io_rsgt(obj); 780213d5092SThomas Hellström 7813c2b8f32SThomas Hellström if (gpu_binds_iomem(dst_mem) || cpu_maps_iomem(dst_mem)) { 782*cad7109aSThomas Hellström obj->ttm.cached_io_rsgt = dst_rsgt; 783*cad7109aSThomas Hellström obj->ttm.get_io_page.sg_pos = dst_rsgt->table.sgl; 784cf3e3e86SMaarten Lankhorst obj->ttm.get_io_page.sg_idx = 0; 785*cad7109aSThomas Hellström } else { 786*cad7109aSThomas Hellström i915_refct_sgt_put(dst_rsgt); 787cf3e3e86SMaarten Lankhorst } 788213d5092SThomas Hellström 789ebd4a8ecSMatthew Auld i915_ttm_adjust_lru(obj); 7903c2b8f32SThomas Hellström i915_ttm_adjust_gem_after_move(obj); 791213d5092SThomas Hellström return 0; 792213d5092SThomas Hellström } 793213d5092SThomas Hellström 794cf3e3e86SMaarten Lankhorst static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem) 795cf3e3e86SMaarten Lankhorst { 7963c2b8f32SThomas Hellström if (!cpu_maps_iomem(mem)) 797cf3e3e86SMaarten Lankhorst return 0; 798cf3e3e86SMaarten Lankhorst 799cf3e3e86SMaarten Lankhorst mem->bus.caching = ttm_write_combined; 800cf3e3e86SMaarten Lankhorst mem->bus.is_iomem = true; 801cf3e3e86SMaarten Lankhorst 802cf3e3e86SMaarten Lankhorst return 0; 803cf3e3e86SMaarten Lankhorst } 804cf3e3e86SMaarten Lankhorst 805cf3e3e86SMaarten Lankhorst static unsigned long i915_ttm_io_mem_pfn(struct ttm_buffer_object *bo, 806cf3e3e86SMaarten Lankhorst unsigned long page_offset) 807cf3e3e86SMaarten Lankhorst { 808cf3e3e86SMaarten Lankhorst struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); 809cf3e3e86SMaarten Lankhorst unsigned long base = obj->mm.region->iomap.base - obj->mm.region->region.start; 810cf3e3e86SMaarten Lankhorst struct scatterlist *sg; 811cf3e3e86SMaarten Lankhorst unsigned int ofs; 812cf3e3e86SMaarten Lankhorst 813cf3e3e86SMaarten Lankhorst GEM_WARN_ON(bo->ttm); 814cf3e3e86SMaarten Lankhorst 8157d6a276eSJason Ekstrand sg = __i915_gem_object_get_sg(obj, &obj->ttm.get_io_page, page_offset, &ofs, true); 816cf3e3e86SMaarten Lankhorst 817cf3e3e86SMaarten Lankhorst return ((base + sg_dma_address(sg)) >> PAGE_SHIFT) + ofs; 818cf3e3e86SMaarten Lankhorst } 819cf3e3e86SMaarten Lankhorst 820213d5092SThomas Hellström static struct ttm_device_funcs i915_ttm_bo_driver = { 821213d5092SThomas Hellström .ttm_tt_create = i915_ttm_tt_create, 8227ae03459SMatthew Auld .ttm_tt_populate = i915_ttm_tt_populate, 823213d5092SThomas Hellström .ttm_tt_unpopulate = i915_ttm_tt_unpopulate, 824213d5092SThomas Hellström .ttm_tt_destroy = i915_ttm_tt_destroy, 825213d5092SThomas Hellström .eviction_valuable = i915_ttm_eviction_valuable, 826213d5092SThomas Hellström .evict_flags = i915_ttm_evict_flags, 827213d5092SThomas Hellström .move = i915_ttm_move, 828213d5092SThomas Hellström .swap_notify = i915_ttm_swap_notify, 829213d5092SThomas Hellström .delete_mem_notify = i915_ttm_delete_mem_notify, 830cf3e3e86SMaarten Lankhorst .io_mem_reserve = i915_ttm_io_mem_reserve, 831cf3e3e86SMaarten Lankhorst .io_mem_pfn = i915_ttm_io_mem_pfn, 832213d5092SThomas Hellström }; 833213d5092SThomas Hellström 834213d5092SThomas Hellström /** 835213d5092SThomas Hellström * i915_ttm_driver - Return a pointer to the TTM device funcs 836213d5092SThomas Hellström * 837213d5092SThomas Hellström * Return: Pointer to statically allocated TTM device funcs. 838213d5092SThomas Hellström */ 839213d5092SThomas Hellström struct ttm_device_funcs *i915_ttm_driver(void) 840213d5092SThomas Hellström { 841213d5092SThomas Hellström return &i915_ttm_bo_driver; 842213d5092SThomas Hellström } 843213d5092SThomas Hellström 844b6e913e1SThomas Hellström static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj, 845b6e913e1SThomas Hellström struct ttm_placement *placement) 846213d5092SThomas Hellström { 847213d5092SThomas Hellström struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); 848213d5092SThomas Hellström struct ttm_operation_ctx ctx = { 849213d5092SThomas Hellström .interruptible = true, 850213d5092SThomas Hellström .no_wait_gpu = false, 851213d5092SThomas Hellström }; 852b07a6483SThomas Hellström int real_num_busy; 853213d5092SThomas Hellström int ret; 854213d5092SThomas Hellström 855b07a6483SThomas Hellström /* First try only the requested placement. No eviction. */ 856b6e913e1SThomas Hellström real_num_busy = fetch_and_zero(&placement->num_busy_placement); 857b6e913e1SThomas Hellström ret = ttm_bo_validate(bo, placement, &ctx); 858b07a6483SThomas Hellström if (ret) { 859b07a6483SThomas Hellström ret = i915_ttm_err_to_gem(ret); 860b07a6483SThomas Hellström /* 861b07a6483SThomas Hellström * Anything that wants to restart the operation gets to 862b07a6483SThomas Hellström * do that. 863b07a6483SThomas Hellström */ 864b07a6483SThomas Hellström if (ret == -EDEADLK || ret == -EINTR || ret == -ERESTARTSYS || 865b07a6483SThomas Hellström ret == -EAGAIN) 866b07a6483SThomas Hellström return ret; 867213d5092SThomas Hellström 868b07a6483SThomas Hellström /* 869b07a6483SThomas Hellström * If the initial attempt fails, allow all accepted placements, 870b07a6483SThomas Hellström * evicting if necessary. 871b07a6483SThomas Hellström */ 872b6e913e1SThomas Hellström placement->num_busy_placement = real_num_busy; 873b6e913e1SThomas Hellström ret = ttm_bo_validate(bo, placement, &ctx); 874213d5092SThomas Hellström if (ret) 875b07a6483SThomas Hellström return i915_ttm_err_to_gem(ret); 876b07a6483SThomas Hellström } 877213d5092SThomas Hellström 8783c2b8f32SThomas Hellström if (bo->ttm && !ttm_tt_is_populated(bo->ttm)) { 8793c2b8f32SThomas Hellström ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx); 8803c2b8f32SThomas Hellström if (ret) 8813c2b8f32SThomas Hellström return ret; 8823c2b8f32SThomas Hellström 8833c2b8f32SThomas Hellström i915_ttm_adjust_domains_after_move(obj); 8843c2b8f32SThomas Hellström i915_ttm_adjust_gem_after_move(obj); 8853c2b8f32SThomas Hellström } 8863c2b8f32SThomas Hellström 88775e38285SJason Ekstrand if (!i915_gem_object_has_pages(obj)) { 888*cad7109aSThomas Hellström struct i915_refct_sgt *rsgt = 889*cad7109aSThomas Hellström i915_ttm_resource_get_st(obj, bo->resource); 890213d5092SThomas Hellström 891*cad7109aSThomas Hellström if (IS_ERR(rsgt)) 892*cad7109aSThomas Hellström return PTR_ERR(rsgt); 893*cad7109aSThomas Hellström 894*cad7109aSThomas Hellström GEM_BUG_ON(obj->mm.rsgt); 895*cad7109aSThomas Hellström obj->mm.rsgt = rsgt; 896*cad7109aSThomas Hellström __i915_gem_object_set_pages(obj, &rsgt->table, 897*cad7109aSThomas Hellström i915_sg_dma_sizes(rsgt->table.sgl)); 89875e38285SJason Ekstrand } 899213d5092SThomas Hellström 900ebd4a8ecSMatthew Auld i915_ttm_adjust_lru(obj); 901213d5092SThomas Hellström return ret; 902213d5092SThomas Hellström } 903213d5092SThomas Hellström 904b6e913e1SThomas Hellström static int i915_ttm_get_pages(struct drm_i915_gem_object *obj) 905b6e913e1SThomas Hellström { 906b6e913e1SThomas Hellström struct ttm_place requested, busy[I915_TTM_MAX_PLACEMENTS]; 907b6e913e1SThomas Hellström struct ttm_placement placement; 908b6e913e1SThomas Hellström 909b6e913e1SThomas Hellström GEM_BUG_ON(obj->mm.n_placements > I915_TTM_MAX_PLACEMENTS); 910b6e913e1SThomas Hellström 911b6e913e1SThomas Hellström /* Move to the requested placement. */ 912b6e913e1SThomas Hellström i915_ttm_placement_from_obj(obj, &requested, busy, &placement); 913b6e913e1SThomas Hellström 914b6e913e1SThomas Hellström return __i915_ttm_get_pages(obj, &placement); 915b6e913e1SThomas Hellström } 916b6e913e1SThomas Hellström 917b6e913e1SThomas Hellström /** 918b6e913e1SThomas Hellström * DOC: Migration vs eviction 919b6e913e1SThomas Hellström * 920b6e913e1SThomas Hellström * GEM migration may not be the same as TTM migration / eviction. If 921b6e913e1SThomas Hellström * the TTM core decides to evict an object it may be evicted to a 922b6e913e1SThomas Hellström * TTM memory type that is not in the object's allowable GEM regions, or 923b6e913e1SThomas Hellström * in fact theoretically to a TTM memory type that doesn't correspond to 924b6e913e1SThomas Hellström * a GEM memory region. In that case the object's GEM region is not 925b6e913e1SThomas Hellström * updated, and the data is migrated back to the GEM region at 926b6e913e1SThomas Hellström * get_pages time. TTM may however set up CPU ptes to the object even 927b6e913e1SThomas Hellström * when it is evicted. 928b6e913e1SThomas Hellström * Gem forced migration using the i915_ttm_migrate() op, is allowed even 929b6e913e1SThomas Hellström * to regions that are not in the object's list of allowable placements. 930b6e913e1SThomas Hellström */ 931b6e913e1SThomas Hellström static int i915_ttm_migrate(struct drm_i915_gem_object *obj, 932b6e913e1SThomas Hellström struct intel_memory_region *mr) 933b6e913e1SThomas Hellström { 934b6e913e1SThomas Hellström struct ttm_place requested; 935b6e913e1SThomas Hellström struct ttm_placement placement; 936b6e913e1SThomas Hellström int ret; 937b6e913e1SThomas Hellström 938b6e913e1SThomas Hellström i915_ttm_place_from_region(mr, &requested, obj->flags); 939b6e913e1SThomas Hellström placement.num_placement = 1; 940b6e913e1SThomas Hellström placement.num_busy_placement = 1; 941b6e913e1SThomas Hellström placement.placement = &requested; 942b6e913e1SThomas Hellström placement.busy_placement = &requested; 943b6e913e1SThomas Hellström 944b6e913e1SThomas Hellström ret = __i915_ttm_get_pages(obj, &placement); 945b6e913e1SThomas Hellström if (ret) 946b6e913e1SThomas Hellström return ret; 947b6e913e1SThomas Hellström 948b6e913e1SThomas Hellström /* 949b6e913e1SThomas Hellström * Reinitialize the region bindings. This is primarily 950b6e913e1SThomas Hellström * required for objects where the new region is not in 951b6e913e1SThomas Hellström * its allowable placements. 952b6e913e1SThomas Hellström */ 953b6e913e1SThomas Hellström if (obj->mm.region != mr) { 954b6e913e1SThomas Hellström i915_gem_object_release_memory_region(obj); 955b6e913e1SThomas Hellström i915_gem_object_init_memory_region(obj, mr); 956b6e913e1SThomas Hellström } 957b6e913e1SThomas Hellström 958b6e913e1SThomas Hellström return 0; 959b6e913e1SThomas Hellström } 960b6e913e1SThomas Hellström 961213d5092SThomas Hellström static void i915_ttm_put_pages(struct drm_i915_gem_object *obj, 962213d5092SThomas Hellström struct sg_table *st) 963213d5092SThomas Hellström { 964213d5092SThomas Hellström /* 965213d5092SThomas Hellström * We're currently not called from a shrinker, so put_pages() 966213d5092SThomas Hellström * typically means the object is about to destroyed, or called 967213d5092SThomas Hellström * from move_notify(). So just avoid doing much for now. 968213d5092SThomas Hellström * If the object is not destroyed next, The TTM eviction logic 969213d5092SThomas Hellström * and shrinkers will move it out if needed. 970213d5092SThomas Hellström */ 971*cad7109aSThomas Hellström 972*cad7109aSThomas Hellström if (obj->mm.rsgt) 973*cad7109aSThomas Hellström i915_refct_sgt_put(fetch_and_zero(&obj->mm.rsgt)); 974213d5092SThomas Hellström } 975213d5092SThomas Hellström 976213d5092SThomas Hellström static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj) 977213d5092SThomas Hellström { 978213d5092SThomas Hellström struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); 9797ae03459SMatthew Auld struct i915_ttm_tt *i915_tt = 9807ae03459SMatthew Auld container_of(bo->ttm, typeof(*i915_tt), ttm); 981ebd4a8ecSMatthew Auld bool shrinkable = 982ebd4a8ecSMatthew Auld bo->ttm && i915_tt->filp && ttm_tt_is_populated(bo->ttm); 983213d5092SThomas Hellström 984213d5092SThomas Hellström /* 985213d5092SThomas Hellström * Don't manipulate the TTM LRUs while in TTM bo destruction. 986213d5092SThomas Hellström * We're called through i915_ttm_delete_mem_notify(). 987213d5092SThomas Hellström */ 988213d5092SThomas Hellström if (!kref_read(&bo->kref)) 989213d5092SThomas Hellström return; 990213d5092SThomas Hellström 991213d5092SThomas Hellström /* 992ebd4a8ecSMatthew Auld * We skip managing the shrinker LRU in set_pages() and just manage 993ebd4a8ecSMatthew Auld * everything here. This does at least solve the issue with having 994ebd4a8ecSMatthew Auld * temporary shmem mappings(like with evicted lmem) not being visible to 995ebd4a8ecSMatthew Auld * the shrinker. Only our shmem objects are shrinkable, everything else 996ebd4a8ecSMatthew Auld * we keep as unshrinkable. 997ebd4a8ecSMatthew Auld * 998ebd4a8ecSMatthew Auld * To make sure everything plays nice we keep an extra shrink pin in TTM 999ebd4a8ecSMatthew Auld * if the underlying pages are not currently shrinkable. Once we release 1000ebd4a8ecSMatthew Auld * our pin, like when the pages are moved to shmem, the pages will then 1001ebd4a8ecSMatthew Auld * be added to the shrinker LRU, assuming the caller isn't also holding 1002ebd4a8ecSMatthew Auld * a pin. 1003ebd4a8ecSMatthew Auld * 1004ebd4a8ecSMatthew Auld * TODO: consider maybe also bumping the shrinker list here when we have 1005ebd4a8ecSMatthew Auld * already unpinned it, which should give us something more like an LRU. 1006ebd4a8ecSMatthew Auld */ 1007ebd4a8ecSMatthew Auld if (shrinkable != obj->mm.ttm_shrinkable) { 1008ebd4a8ecSMatthew Auld if (shrinkable) { 1009ebd4a8ecSMatthew Auld if (obj->mm.madv == I915_MADV_WILLNEED) 1010ebd4a8ecSMatthew Auld __i915_gem_object_make_shrinkable(obj); 1011ebd4a8ecSMatthew Auld else 1012ebd4a8ecSMatthew Auld __i915_gem_object_make_purgeable(obj); 1013ebd4a8ecSMatthew Auld } else { 1014ebd4a8ecSMatthew Auld i915_gem_object_make_unshrinkable(obj); 1015ebd4a8ecSMatthew Auld } 1016ebd4a8ecSMatthew Auld 1017ebd4a8ecSMatthew Auld obj->mm.ttm_shrinkable = shrinkable; 1018ebd4a8ecSMatthew Auld } 1019ebd4a8ecSMatthew Auld 1020ebd4a8ecSMatthew Auld /* 1021213d5092SThomas Hellström * Put on the correct LRU list depending on the MADV status 1022213d5092SThomas Hellström */ 1023213d5092SThomas Hellström spin_lock(&bo->bdev->lru_lock); 1024ebd4a8ecSMatthew Auld if (shrinkable) { 10257ae03459SMatthew Auld /* Try to keep shmem_tt from being considered for shrinking. */ 10267ae03459SMatthew Auld bo->priority = TTM_MAX_BO_PRIORITY - 1; 10277ae03459SMatthew Auld } else if (obj->mm.madv != I915_MADV_WILLNEED) { 1028213d5092SThomas Hellström bo->priority = I915_TTM_PRIO_PURGE; 1029213d5092SThomas Hellström } else if (!i915_gem_object_has_pages(obj)) { 1030213d5092SThomas Hellström if (bo->priority < I915_TTM_PRIO_HAS_PAGES) 1031213d5092SThomas Hellström bo->priority = I915_TTM_PRIO_HAS_PAGES; 1032213d5092SThomas Hellström } else { 1033213d5092SThomas Hellström if (bo->priority > I915_TTM_PRIO_NO_PAGES) 1034213d5092SThomas Hellström bo->priority = I915_TTM_PRIO_NO_PAGES; 1035213d5092SThomas Hellström } 1036213d5092SThomas Hellström 1037213d5092SThomas Hellström ttm_bo_move_to_lru_tail(bo, bo->resource, NULL); 1038213d5092SThomas Hellström spin_unlock(&bo->bdev->lru_lock); 1039213d5092SThomas Hellström } 1040213d5092SThomas Hellström 1041213d5092SThomas Hellström /* 1042213d5092SThomas Hellström * TTM-backed gem object destruction requires some clarification. 1043213d5092SThomas Hellström * Basically we have two possibilities here. We can either rely on the 1044213d5092SThomas Hellström * i915 delayed destruction and put the TTM object when the object 1045213d5092SThomas Hellström * is idle. This would be detected by TTM which would bypass the 1046213d5092SThomas Hellström * TTM delayed destroy handling. The other approach is to put the TTM 1047213d5092SThomas Hellström * object early and rely on the TTM destroyed handling, and then free 1048213d5092SThomas Hellström * the leftover parts of the GEM object once TTM's destroyed list handling is 1049213d5092SThomas Hellström * complete. For now, we rely on the latter for two reasons: 1050213d5092SThomas Hellström * a) TTM can evict an object even when it's on the delayed destroy list, 1051213d5092SThomas Hellström * which in theory allows for complete eviction. 1052213d5092SThomas Hellström * b) There is work going on in TTM to allow freeing an object even when 1053213d5092SThomas Hellström * it's not idle, and using the TTM destroyed list handling could help us 1054213d5092SThomas Hellström * benefit from that. 1055213d5092SThomas Hellström */ 1056213d5092SThomas Hellström static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj) 1057213d5092SThomas Hellström { 1058068396bbSThomas Hellström GEM_BUG_ON(!obj->ttm.created); 1059068396bbSThomas Hellström 1060213d5092SThomas Hellström ttm_bo_put(i915_gem_to_ttm(obj)); 1061213d5092SThomas Hellström } 1062213d5092SThomas Hellström 1063cf3e3e86SMaarten Lankhorst static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) 1064cf3e3e86SMaarten Lankhorst { 1065cf3e3e86SMaarten Lankhorst struct vm_area_struct *area = vmf->vma; 1066cf3e3e86SMaarten Lankhorst struct drm_i915_gem_object *obj = 1067cf3e3e86SMaarten Lankhorst i915_ttm_to_gem(area->vm_private_data); 1068ebd4a8ecSMatthew Auld struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); 1069ebd4a8ecSMatthew Auld struct drm_device *dev = bo->base.dev; 1070ebd4a8ecSMatthew Auld vm_fault_t ret; 1071ebd4a8ecSMatthew Auld int idx; 1072cf3e3e86SMaarten Lankhorst 1073cf3e3e86SMaarten Lankhorst /* Sanity check that we allow writing into this object */ 1074cf3e3e86SMaarten Lankhorst if (unlikely(i915_gem_object_is_readonly(obj) && 1075cf3e3e86SMaarten Lankhorst area->vm_flags & VM_WRITE)) 1076cf3e3e86SMaarten Lankhorst return VM_FAULT_SIGBUS; 1077cf3e3e86SMaarten Lankhorst 1078ebd4a8ecSMatthew Auld ret = ttm_bo_vm_reserve(bo, vmf); 1079ebd4a8ecSMatthew Auld if (ret) 1080ebd4a8ecSMatthew Auld return ret; 1081ebd4a8ecSMatthew Auld 1082ebd4a8ecSMatthew Auld if (drm_dev_enter(dev, &idx)) { 1083ebd4a8ecSMatthew Auld ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, 1084ebd4a8ecSMatthew Auld TTM_BO_VM_NUM_PREFAULT, 1); 1085ebd4a8ecSMatthew Auld drm_dev_exit(idx); 1086ebd4a8ecSMatthew Auld } else { 1087ebd4a8ecSMatthew Auld ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); 1088ebd4a8ecSMatthew Auld } 1089ebd4a8ecSMatthew Auld if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) 1090ebd4a8ecSMatthew Auld return ret; 1091ebd4a8ecSMatthew Auld 1092ebd4a8ecSMatthew Auld i915_ttm_adjust_lru(obj); 1093ebd4a8ecSMatthew Auld 1094ebd4a8ecSMatthew Auld dma_resv_unlock(bo->base.resv); 1095ebd4a8ecSMatthew Auld return ret; 1096cf3e3e86SMaarten Lankhorst } 1097cf3e3e86SMaarten Lankhorst 1098cf3e3e86SMaarten Lankhorst static int 1099cf3e3e86SMaarten Lankhorst vm_access_ttm(struct vm_area_struct *area, unsigned long addr, 1100cf3e3e86SMaarten Lankhorst void *buf, int len, int write) 1101cf3e3e86SMaarten Lankhorst { 1102cf3e3e86SMaarten Lankhorst struct drm_i915_gem_object *obj = 1103cf3e3e86SMaarten Lankhorst i915_ttm_to_gem(area->vm_private_data); 1104cf3e3e86SMaarten Lankhorst 1105cf3e3e86SMaarten Lankhorst if (i915_gem_object_is_readonly(obj) && write) 1106cf3e3e86SMaarten Lankhorst return -EACCES; 1107cf3e3e86SMaarten Lankhorst 1108cf3e3e86SMaarten Lankhorst return ttm_bo_vm_access(area, addr, buf, len, write); 1109cf3e3e86SMaarten Lankhorst } 1110cf3e3e86SMaarten Lankhorst 1111cf3e3e86SMaarten Lankhorst static void ttm_vm_open(struct vm_area_struct *vma) 1112cf3e3e86SMaarten Lankhorst { 1113cf3e3e86SMaarten Lankhorst struct drm_i915_gem_object *obj = 1114cf3e3e86SMaarten Lankhorst i915_ttm_to_gem(vma->vm_private_data); 1115cf3e3e86SMaarten Lankhorst 1116cf3e3e86SMaarten Lankhorst GEM_BUG_ON(!obj); 1117cf3e3e86SMaarten Lankhorst i915_gem_object_get(obj); 1118cf3e3e86SMaarten Lankhorst } 1119cf3e3e86SMaarten Lankhorst 1120cf3e3e86SMaarten Lankhorst static void ttm_vm_close(struct vm_area_struct *vma) 1121cf3e3e86SMaarten Lankhorst { 1122cf3e3e86SMaarten Lankhorst struct drm_i915_gem_object *obj = 1123cf3e3e86SMaarten Lankhorst i915_ttm_to_gem(vma->vm_private_data); 1124cf3e3e86SMaarten Lankhorst 1125cf3e3e86SMaarten Lankhorst GEM_BUG_ON(!obj); 1126cf3e3e86SMaarten Lankhorst i915_gem_object_put(obj); 1127cf3e3e86SMaarten Lankhorst } 1128cf3e3e86SMaarten Lankhorst 1129cf3e3e86SMaarten Lankhorst static const struct vm_operations_struct vm_ops_ttm = { 1130cf3e3e86SMaarten Lankhorst .fault = vm_fault_ttm, 1131cf3e3e86SMaarten Lankhorst .access = vm_access_ttm, 1132cf3e3e86SMaarten Lankhorst .open = ttm_vm_open, 1133cf3e3e86SMaarten Lankhorst .close = ttm_vm_close, 1134cf3e3e86SMaarten Lankhorst }; 1135cf3e3e86SMaarten Lankhorst 1136cf3e3e86SMaarten Lankhorst static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj) 1137cf3e3e86SMaarten Lankhorst { 1138cf3e3e86SMaarten Lankhorst /* The ttm_bo must be allocated with I915_BO_ALLOC_USER */ 1139cf3e3e86SMaarten Lankhorst GEM_BUG_ON(!drm_mm_node_allocated(&obj->base.vma_node.vm_node)); 1140cf3e3e86SMaarten Lankhorst 1141cf3e3e86SMaarten Lankhorst return drm_vma_node_offset_addr(&obj->base.vma_node); 1142cf3e3e86SMaarten Lankhorst } 1143cf3e3e86SMaarten Lankhorst 11444bc2d574SMatthew Auld static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = { 1145213d5092SThomas Hellström .name = "i915_gem_object_ttm", 11465d12ffe6SMatthew Auld .flags = I915_GEM_OBJECT_IS_SHRINKABLE | 11475d12ffe6SMatthew Auld I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST, 1148213d5092SThomas Hellström 1149213d5092SThomas Hellström .get_pages = i915_ttm_get_pages, 1150213d5092SThomas Hellström .put_pages = i915_ttm_put_pages, 1151213d5092SThomas Hellström .truncate = i915_ttm_purge, 11527ae03459SMatthew Auld .shrinker_release_pages = i915_ttm_shrinker_release_pages, 11537ae03459SMatthew Auld 1154213d5092SThomas Hellström .adjust_lru = i915_ttm_adjust_lru, 1155213d5092SThomas Hellström .delayed_free = i915_ttm_delayed_free, 1156b6e913e1SThomas Hellström .migrate = i915_ttm_migrate, 11577ae03459SMatthew Auld 1158cf3e3e86SMaarten Lankhorst .mmap_offset = i915_ttm_mmap_offset, 1159cf3e3e86SMaarten Lankhorst .mmap_ops = &vm_ops_ttm, 1160213d5092SThomas Hellström }; 1161213d5092SThomas Hellström 1162213d5092SThomas Hellström void i915_ttm_bo_destroy(struct ttm_buffer_object *bo) 1163213d5092SThomas Hellström { 1164213d5092SThomas Hellström struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); 1165213d5092SThomas Hellström 1166213d5092SThomas Hellström i915_gem_object_release_memory_region(obj); 1167cf3e3e86SMaarten Lankhorst mutex_destroy(&obj->ttm.get_io_page.lock); 1168068396bbSThomas Hellström 1169068396bbSThomas Hellström if (obj->ttm.created) { 1170ebd4a8ecSMatthew Auld /* 1171ebd4a8ecSMatthew Auld * We freely manage the shrinker LRU outide of the mm.pages life 1172ebd4a8ecSMatthew Auld * cycle. As a result when destroying the object we should be 1173ebd4a8ecSMatthew Auld * extra paranoid and ensure we remove it from the LRU, before 1174ebd4a8ecSMatthew Auld * we free the object. 1175ebd4a8ecSMatthew Auld * 1176ebd4a8ecSMatthew Auld * Touching the ttm_shrinkable outside of the object lock here 1177ebd4a8ecSMatthew Auld * should be safe now that the last GEM object ref was dropped. 1178ebd4a8ecSMatthew Auld */ 1179ebd4a8ecSMatthew Auld if (obj->mm.ttm_shrinkable) 1180ebd4a8ecSMatthew Auld i915_gem_object_make_unshrinkable(obj); 1181ebd4a8ecSMatthew Auld 1182c56ce956SThomas Hellström i915_ttm_backup_free(obj); 1183c56ce956SThomas Hellström 118448b09612SMaarten Lankhorst /* This releases all gem object bindings to the backend. */ 118548b09612SMaarten Lankhorst __i915_gem_free_object(obj); 118648b09612SMaarten Lankhorst 1187213d5092SThomas Hellström call_rcu(&obj->rcu, __i915_gem_free_object_rcu); 1188068396bbSThomas Hellström } else { 1189068396bbSThomas Hellström __i915_gem_object_fini(obj); 1190068396bbSThomas Hellström } 1191213d5092SThomas Hellström } 1192213d5092SThomas Hellström 1193213d5092SThomas Hellström /** 1194213d5092SThomas Hellström * __i915_gem_ttm_object_init - Initialize a ttm-backed i915 gem object 1195213d5092SThomas Hellström * @mem: The initial memory region for the object. 1196213d5092SThomas Hellström * @obj: The gem object. 1197213d5092SThomas Hellström * @size: Object size in bytes. 1198213d5092SThomas Hellström * @flags: gem object flags. 1199213d5092SThomas Hellström * 1200213d5092SThomas Hellström * Return: 0 on success, negative error code on failure. 1201213d5092SThomas Hellström */ 1202213d5092SThomas Hellström int __i915_gem_ttm_object_init(struct intel_memory_region *mem, 1203213d5092SThomas Hellström struct drm_i915_gem_object *obj, 1204213d5092SThomas Hellström resource_size_t size, 1205d22632c8SMatthew Auld resource_size_t page_size, 1206213d5092SThomas Hellström unsigned int flags) 1207213d5092SThomas Hellström { 1208213d5092SThomas Hellström static struct lock_class_key lock_class; 1209213d5092SThomas Hellström struct drm_i915_private *i915 = mem->i915; 12103c2b8f32SThomas Hellström struct ttm_operation_ctx ctx = { 12113c2b8f32SThomas Hellström .interruptible = true, 12123c2b8f32SThomas Hellström .no_wait_gpu = false, 12133c2b8f32SThomas Hellström }; 1214213d5092SThomas Hellström enum ttm_bo_type bo_type; 1215213d5092SThomas Hellström int ret; 1216213d5092SThomas Hellström 1217213d5092SThomas Hellström drm_gem_private_object_init(&i915->drm, &obj->base, size); 1218213d5092SThomas Hellström i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags); 1219068396bbSThomas Hellström 1220068396bbSThomas Hellström /* Don't put on a region list until we're either locked or fully initialized. */ 1221068396bbSThomas Hellström obj->mm.region = intel_memory_region_get(mem); 1222068396bbSThomas Hellström INIT_LIST_HEAD(&obj->mm.region_link); 1223068396bbSThomas Hellström 1224cf3e3e86SMaarten Lankhorst INIT_RADIX_TREE(&obj->ttm.get_io_page.radix, GFP_KERNEL | __GFP_NOWARN); 1225cf3e3e86SMaarten Lankhorst mutex_init(&obj->ttm.get_io_page.lock); 1226213d5092SThomas Hellström bo_type = (obj->flags & I915_BO_ALLOC_USER) ? ttm_bo_type_device : 1227213d5092SThomas Hellström ttm_bo_type_kernel; 1228213d5092SThomas Hellström 12293c2b8f32SThomas Hellström obj->base.vma_node.driver_private = i915_gem_to_ttm(obj); 12303c2b8f32SThomas Hellström 1231d22632c8SMatthew Auld /* Forcing the page size is kernel internal only */ 1232d22632c8SMatthew Auld GEM_BUG_ON(page_size && obj->mm.n_placements); 1233d22632c8SMatthew Auld 1234213d5092SThomas Hellström /* 1235ebd4a8ecSMatthew Auld * Keep an extra shrink pin to prevent the object from being made 1236ebd4a8ecSMatthew Auld * shrinkable too early. If the ttm_tt is ever allocated in shmem, we 1237ebd4a8ecSMatthew Auld * drop the pin. The TTM backend manages the shrinker LRU itself, 1238ebd4a8ecSMatthew Auld * outside of the normal mm.pages life cycle. 1239ebd4a8ecSMatthew Auld */ 1240ebd4a8ecSMatthew Auld i915_gem_object_make_unshrinkable(obj); 1241ebd4a8ecSMatthew Auld 1242ebd4a8ecSMatthew Auld /* 1243213d5092SThomas Hellström * If this function fails, it will call the destructor, but 1244213d5092SThomas Hellström * our caller still owns the object. So no freeing in the 1245213d5092SThomas Hellström * destructor until obj->ttm.created is true. 1246213d5092SThomas Hellström * Similarly, in delayed_destroy, we can't call ttm_bo_put() 1247213d5092SThomas Hellström * until successful initialization. 1248213d5092SThomas Hellström */ 12493c2b8f32SThomas Hellström ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), size, 125013c2ceb6SMatthew Auld bo_type, &i915_sys_placement, 1251d22632c8SMatthew Auld page_size >> PAGE_SHIFT, 12523c2b8f32SThomas Hellström &ctx, NULL, NULL, i915_ttm_bo_destroy); 12533c2b8f32SThomas Hellström if (ret) 1254b07a6483SThomas Hellström return i915_ttm_err_to_gem(ret); 1255213d5092SThomas Hellström 1256213d5092SThomas Hellström obj->ttm.created = true; 1257068396bbSThomas Hellström i915_gem_object_release_memory_region(obj); 1258068396bbSThomas Hellström i915_gem_object_init_memory_region(obj, mem); 12593c2b8f32SThomas Hellström i915_ttm_adjust_domains_after_move(obj); 12603c2b8f32SThomas Hellström i915_ttm_adjust_gem_after_move(obj); 12613c2b8f32SThomas Hellström i915_gem_object_unlock(obj); 1262213d5092SThomas Hellström 12633c2b8f32SThomas Hellström return 0; 1264213d5092SThomas Hellström } 126532b7cf51SThomas Hellström 126632b7cf51SThomas Hellström static const struct intel_memory_region_ops ttm_system_region_ops = { 126732b7cf51SThomas Hellström .init_object = __i915_gem_ttm_object_init, 126832b7cf51SThomas Hellström }; 126932b7cf51SThomas Hellström 127032b7cf51SThomas Hellström struct intel_memory_region * 127132b7cf51SThomas Hellström i915_gem_ttm_system_setup(struct drm_i915_private *i915, 127232b7cf51SThomas Hellström u16 type, u16 instance) 127332b7cf51SThomas Hellström { 127432b7cf51SThomas Hellström struct intel_memory_region *mr; 127532b7cf51SThomas Hellström 127632b7cf51SThomas Hellström mr = intel_memory_region_create(i915, 0, 127732b7cf51SThomas Hellström totalram_pages() << PAGE_SHIFT, 127832b7cf51SThomas Hellström PAGE_SIZE, 0, 127932b7cf51SThomas Hellström type, instance, 128032b7cf51SThomas Hellström &ttm_system_region_ops); 128132b7cf51SThomas Hellström if (IS_ERR(mr)) 128232b7cf51SThomas Hellström return mr; 128332b7cf51SThomas Hellström 128432b7cf51SThomas Hellström intel_memory_region_set_name(mr, "system-ttm"); 128532b7cf51SThomas Hellström return mr; 1286213d5092SThomas Hellström } 12870d938863SThomas Hellström 12880d938863SThomas Hellström /** 12890d938863SThomas Hellström * i915_gem_obj_copy_ttm - Copy the contents of one ttm-based gem object to 12900d938863SThomas Hellström * another 12910d938863SThomas Hellström * @dst: The destination object 12920d938863SThomas Hellström * @src: The source object 12930d938863SThomas Hellström * @allow_accel: Allow using the blitter. Otherwise TTM memcpy is used. 12940d938863SThomas Hellström * @intr: Whether to perform waits interruptible: 12950d938863SThomas Hellström * 12960d938863SThomas Hellström * Note: The caller is responsible for assuring that the underlying 12970d938863SThomas Hellström * TTM objects are populated if needed and locked. 12980d938863SThomas Hellström * 12990d938863SThomas Hellström * Return: Zero on success. Negative error code on error. If @intr == true, 13000d938863SThomas Hellström * then it may return -ERESTARTSYS or -EINTR. 13010d938863SThomas Hellström */ 13020d938863SThomas Hellström int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst, 13030d938863SThomas Hellström struct drm_i915_gem_object *src, 13040d938863SThomas Hellström bool allow_accel, bool intr) 13050d938863SThomas Hellström { 13060d938863SThomas Hellström struct ttm_buffer_object *dst_bo = i915_gem_to_ttm(dst); 13070d938863SThomas Hellström struct ttm_buffer_object *src_bo = i915_gem_to_ttm(src); 13080d938863SThomas Hellström struct ttm_operation_ctx ctx = { 13090d938863SThomas Hellström .interruptible = intr, 13100d938863SThomas Hellström }; 1311*cad7109aSThomas Hellström struct i915_refct_sgt *dst_rsgt; 13120d938863SThomas Hellström int ret; 13130d938863SThomas Hellström 13140d938863SThomas Hellström assert_object_held(dst); 13150d938863SThomas Hellström assert_object_held(src); 13160d938863SThomas Hellström 13170d938863SThomas Hellström /* 13180d938863SThomas Hellström * Sync for now. This will change with async moves. 13190d938863SThomas Hellström */ 13200d938863SThomas Hellström ret = ttm_bo_wait_ctx(dst_bo, &ctx); 13210d938863SThomas Hellström if (!ret) 13220d938863SThomas Hellström ret = ttm_bo_wait_ctx(src_bo, &ctx); 13230d938863SThomas Hellström if (ret) 13240d938863SThomas Hellström return ret; 13250d938863SThomas Hellström 1326*cad7109aSThomas Hellström dst_rsgt = i915_ttm_resource_get_st(dst, dst_bo->resource); 13270d938863SThomas Hellström __i915_ttm_move(src_bo, false, dst_bo->resource, dst_bo->ttm, 1328*cad7109aSThomas Hellström dst_rsgt, allow_accel); 1329*cad7109aSThomas Hellström 1330*cad7109aSThomas Hellström i915_refct_sgt_put(dst_rsgt); 13310d938863SThomas Hellström 13320d938863SThomas Hellström return 0; 13330d938863SThomas Hellström } 1334