1213d5092SThomas Hellström // SPDX-License-Identifier: MIT 2213d5092SThomas Hellström /* 3213d5092SThomas Hellström * Copyright © 2021 Intel Corporation 4213d5092SThomas Hellström */ 5213d5092SThomas Hellström 682508de2SJani Nikula #include <linux/shmem_fs.h> 782508de2SJani Nikula 8213d5092SThomas Hellström #include <drm/ttm/ttm_bo_driver.h> 9213d5092SThomas Hellström #include <drm/ttm/ttm_placement.h> 1093735059SMatthew Auld #include <drm/drm_buddy.h> 11213d5092SThomas Hellström 12213d5092SThomas Hellström #include "i915_drv.h" 1393735059SMatthew Auld #include "i915_ttm_buddy_manager.h" 14213d5092SThomas Hellström #include "intel_memory_region.h" 15213d5092SThomas Hellström #include "intel_region_ttm.h" 16213d5092SThomas Hellström 17c56ce956SThomas Hellström #include "gem/i915_gem_mman.h" 18213d5092SThomas Hellström #include "gem/i915_gem_object.h" 19213d5092SThomas Hellström #include "gem/i915_gem_region.h" 20213d5092SThomas Hellström #include "gem/i915_gem_ttm.h" 213589fdbdSThomas Hellström #include "gem/i915_gem_ttm_move.h" 22c56ce956SThomas Hellström #include "gem/i915_gem_ttm_pm.h" 2376a6d563SRamalingam C #include "gt/intel_gpu_commands.h" 24213d5092SThomas Hellström 25213d5092SThomas Hellström #define I915_TTM_PRIO_PURGE 0 26213d5092SThomas Hellström #define I915_TTM_PRIO_NO_PAGES 1 27213d5092SThomas Hellström #define I915_TTM_PRIO_HAS_PAGES 2 2893735059SMatthew Auld #define I915_TTM_PRIO_NEEDS_CPU_ACCESS 3 29213d5092SThomas Hellström 3038f28c06SThomas Hellström /* 3138f28c06SThomas Hellström * Size of struct ttm_place vector in on-stack struct ttm_placement allocs 3238f28c06SThomas Hellström */ 3338f28c06SThomas Hellström #define I915_TTM_MAX_PLACEMENTS INTEL_REGION_UNKNOWN 3438f28c06SThomas Hellström 35213d5092SThomas Hellström /** 36213d5092SThomas Hellström * struct i915_ttm_tt - TTM page vector with additional private information 37213d5092SThomas Hellström * @ttm: The base TTM page vector. 38213d5092SThomas Hellström * @dev: The struct device used for dma mapping and unmapping. 39cad7109aSThomas Hellström * @cached_rsgt: The cached scatter-gather table. 407ae03459SMatthew Auld * @is_shmem: Set if using shmem. 417ae03459SMatthew Auld * @filp: The shmem file, if using shmem backend. 42213d5092SThomas Hellström * 43213d5092SThomas Hellström * Note that DMA may be going on right up to the point where the page- 44213d5092SThomas Hellström * vector is unpopulated in delayed destroy. Hence keep the 45213d5092SThomas Hellström * scatter-gather table mapped and cached up to that point. This is 46213d5092SThomas Hellström * different from the cached gem object io scatter-gather table which 47213d5092SThomas Hellström * doesn't have an associated dma mapping. 48213d5092SThomas Hellström */ 49213d5092SThomas Hellström struct i915_ttm_tt { 50213d5092SThomas Hellström struct ttm_tt ttm; 51213d5092SThomas Hellström struct device *dev; 52cad7109aSThomas Hellström struct i915_refct_sgt cached_rsgt; 537ae03459SMatthew Auld 547ae03459SMatthew Auld bool is_shmem; 557ae03459SMatthew Auld struct file *filp; 56213d5092SThomas Hellström }; 57213d5092SThomas Hellström 5838f28c06SThomas Hellström static const struct ttm_place sys_placement_flags = { 59213d5092SThomas Hellström .fpfn = 0, 60213d5092SThomas Hellström .lpfn = 0, 61213d5092SThomas Hellström .mem_type = I915_PL_SYSTEM, 62213d5092SThomas Hellström .flags = 0, 63213d5092SThomas Hellström }; 64213d5092SThomas Hellström 65213d5092SThomas Hellström static struct ttm_placement i915_sys_placement = { 66213d5092SThomas Hellström .num_placement = 1, 6738f28c06SThomas Hellström .placement = &sys_placement_flags, 68213d5092SThomas Hellström .num_busy_placement = 1, 6938f28c06SThomas Hellström .busy_placement = &sys_placement_flags, 70213d5092SThomas Hellström }; 71213d5092SThomas Hellström 72c56ce956SThomas Hellström /** 73c56ce956SThomas Hellström * i915_ttm_sys_placement - Return the struct ttm_placement to be 74c56ce956SThomas Hellström * used for an object in system memory. 75c56ce956SThomas Hellström * 76c56ce956SThomas Hellström * Rather than making the struct extern, use this 77c56ce956SThomas Hellström * function. 78c56ce956SThomas Hellström * 79c56ce956SThomas Hellström * Return: A pointer to a static variable for sys placement. 80c56ce956SThomas Hellström */ 81c56ce956SThomas Hellström struct ttm_placement *i915_ttm_sys_placement(void) 82c56ce956SThomas Hellström { 83c56ce956SThomas Hellström return &i915_sys_placement; 84c56ce956SThomas Hellström } 85c56ce956SThomas Hellström 86b07a6483SThomas Hellström static int i915_ttm_err_to_gem(int err) 87b07a6483SThomas Hellström { 88b07a6483SThomas Hellström /* Fastpath */ 89b07a6483SThomas Hellström if (likely(!err)) 90b07a6483SThomas Hellström return 0; 91b07a6483SThomas Hellström 92b07a6483SThomas Hellström switch (err) { 93b07a6483SThomas Hellström case -EBUSY: 94b07a6483SThomas Hellström /* 95b07a6483SThomas Hellström * TTM likes to convert -EDEADLK to -EBUSY, and wants us to 96b07a6483SThomas Hellström * restart the operation, since we don't record the contending 97b07a6483SThomas Hellström * lock. We use -EAGAIN to restart. 98b07a6483SThomas Hellström */ 99b07a6483SThomas Hellström return -EAGAIN; 100b07a6483SThomas Hellström case -ENOSPC: 101b07a6483SThomas Hellström /* 102b07a6483SThomas Hellström * Memory type / region is full, and we can't evict. 103b07a6483SThomas Hellström * Except possibly system, that returns -ENOMEM; 104b07a6483SThomas Hellström */ 105b07a6483SThomas Hellström return -ENXIO; 106b07a6483SThomas Hellström default: 107b07a6483SThomas Hellström break; 108b07a6483SThomas Hellström } 109b07a6483SThomas Hellström 110b07a6483SThomas Hellström return err; 111b07a6483SThomas Hellström } 112b07a6483SThomas Hellström 11338f28c06SThomas Hellström static enum ttm_caching 11438f28c06SThomas Hellström i915_ttm_select_tt_caching(const struct drm_i915_gem_object *obj) 11538f28c06SThomas Hellström { 11638f28c06SThomas Hellström /* 1172eda4fc6SMatthew Auld * Objects only allowed in system get cached cpu-mappings, or when 1182eda4fc6SMatthew Auld * evicting lmem-only buffers to system for swapping. Other objects get 1192eda4fc6SMatthew Auld * WC mapping for now. Even if in system. 12038f28c06SThomas Hellström */ 1212eda4fc6SMatthew Auld if (obj->mm.n_placements <= 1) 12238f28c06SThomas Hellström return ttm_cached; 12338f28c06SThomas Hellström 12438f28c06SThomas Hellström return ttm_write_combined; 12538f28c06SThomas Hellström } 12638f28c06SThomas Hellström 12738f28c06SThomas Hellström static void 12838f28c06SThomas Hellström i915_ttm_place_from_region(const struct intel_memory_region *mr, 129beb6a229SMatthew Auld struct ttm_place *place, 130ecbf2060SMatthew Auld resource_size_t offset, 131ecbf2060SMatthew Auld resource_size_t size, 132beb6a229SMatthew Auld unsigned int flags) 13338f28c06SThomas Hellström { 13438f28c06SThomas Hellström memset(place, 0, sizeof(*place)); 13538f28c06SThomas Hellström place->mem_type = intel_region_to_ttm_type(mr); 136beb6a229SMatthew Auld 13766ddc693SMatthew Auld if (mr->type == INTEL_MEMORY_SYSTEM) 13866ddc693SMatthew Auld return; 13966ddc693SMatthew Auld 140beb6a229SMatthew Auld if (flags & I915_BO_ALLOC_CONTIGUOUS) 14130b9d1b3SMatthew Auld place->flags |= TTM_PL_FLAG_CONTIGUOUS; 142ecbf2060SMatthew Auld if (offset != I915_BO_INVALID_OFFSET) { 143ecbf2060SMatthew Auld place->fpfn = offset >> PAGE_SHIFT; 144ecbf2060SMatthew Auld place->lpfn = place->fpfn + (size >> PAGE_SHIFT); 145ecbf2060SMatthew Auld } else if (mr->io_size && mr->io_size < mr->total) { 14630b9d1b3SMatthew Auld if (flags & I915_BO_ALLOC_GPU_ONLY) { 14730b9d1b3SMatthew Auld place->flags |= TTM_PL_FLAG_TOPDOWN; 14830b9d1b3SMatthew Auld } else { 1493312a4acSMatthew Auld place->fpfn = 0; 1503312a4acSMatthew Auld place->lpfn = mr->io_size >> PAGE_SHIFT; 1513312a4acSMatthew Auld } 15238f28c06SThomas Hellström } 15330b9d1b3SMatthew Auld } 15438f28c06SThomas Hellström 15538f28c06SThomas Hellström static void 15638f28c06SThomas Hellström i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj, 15738f28c06SThomas Hellström struct ttm_place *requested, 15838f28c06SThomas Hellström struct ttm_place *busy, 15938f28c06SThomas Hellström struct ttm_placement *placement) 16038f28c06SThomas Hellström { 16138f28c06SThomas Hellström unsigned int num_allowed = obj->mm.n_placements; 162beb6a229SMatthew Auld unsigned int flags = obj->flags; 16338f28c06SThomas Hellström unsigned int i; 16438f28c06SThomas Hellström 16538f28c06SThomas Hellström placement->num_placement = 1; 16638f28c06SThomas Hellström i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] : 167ecbf2060SMatthew Auld obj->mm.region, requested, obj->bo_offset, 168ecbf2060SMatthew Auld obj->base.size, flags); 16938f28c06SThomas Hellström 17038f28c06SThomas Hellström /* Cache this on object? */ 17138f28c06SThomas Hellström placement->num_busy_placement = num_allowed; 17238f28c06SThomas Hellström for (i = 0; i < placement->num_busy_placement; ++i) 173ecbf2060SMatthew Auld i915_ttm_place_from_region(obj->mm.placements[i], busy + i, 174ecbf2060SMatthew Auld obj->bo_offset, obj->base.size, flags); 17538f28c06SThomas Hellström 17638f28c06SThomas Hellström if (num_allowed == 0) { 17738f28c06SThomas Hellström *busy = *requested; 17838f28c06SThomas Hellström placement->num_busy_placement = 1; 17938f28c06SThomas Hellström } 18038f28c06SThomas Hellström 18138f28c06SThomas Hellström placement->placement = requested; 18238f28c06SThomas Hellström placement->busy_placement = busy; 18338f28c06SThomas Hellström } 18438f28c06SThomas Hellström 1857ae03459SMatthew Auld static int i915_ttm_tt_shmem_populate(struct ttm_device *bdev, 1867ae03459SMatthew Auld struct ttm_tt *ttm, 1877ae03459SMatthew Auld struct ttm_operation_ctx *ctx) 1887ae03459SMatthew Auld { 1897ae03459SMatthew Auld struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev); 1907ae03459SMatthew Auld struct intel_memory_region *mr = i915->mm.regions[INTEL_MEMORY_SYSTEM]; 1917ae03459SMatthew Auld struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); 1927ae03459SMatthew Auld const unsigned int max_segment = i915_sg_segment_size(); 1935719d4feSRobert Beckett const size_t size = (size_t)ttm->num_pages << PAGE_SHIFT; 1947ae03459SMatthew Auld struct file *filp = i915_tt->filp; 1957ae03459SMatthew Auld struct sgt_iter sgt_iter; 1967ae03459SMatthew Auld struct sg_table *st; 1977ae03459SMatthew Auld struct page *page; 1987ae03459SMatthew Auld unsigned long i; 1997ae03459SMatthew Auld int err; 2007ae03459SMatthew Auld 2017ae03459SMatthew Auld if (!filp) { 2027ae03459SMatthew Auld struct address_space *mapping; 2037ae03459SMatthew Auld gfp_t mask; 2047ae03459SMatthew Auld 2057ae03459SMatthew Auld filp = shmem_file_setup("i915-shmem-tt", size, VM_NORESERVE); 2067ae03459SMatthew Auld if (IS_ERR(filp)) 2077ae03459SMatthew Auld return PTR_ERR(filp); 2087ae03459SMatthew Auld 2097ae03459SMatthew Auld mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; 2107ae03459SMatthew Auld 2117ae03459SMatthew Auld mapping = filp->f_mapping; 2127ae03459SMatthew Auld mapping_set_gfp_mask(mapping, mask); 2137ae03459SMatthew Auld GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM)); 2147ae03459SMatthew Auld 2157ae03459SMatthew Auld i915_tt->filp = filp; 2167ae03459SMatthew Auld } 2177ae03459SMatthew Auld 218cad7109aSThomas Hellström st = &i915_tt->cached_rsgt.table; 219cad7109aSThomas Hellström err = shmem_sg_alloc_table(i915, st, size, mr, filp->f_mapping, 220cad7109aSThomas Hellström max_segment); 221cad7109aSThomas Hellström if (err) 222cad7109aSThomas Hellström return err; 2237ae03459SMatthew Auld 224cad7109aSThomas Hellström err = dma_map_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, 2257ae03459SMatthew Auld DMA_ATTR_SKIP_CPU_SYNC); 226cad7109aSThomas Hellström if (err) 2277ae03459SMatthew Auld goto err_free_st; 2287ae03459SMatthew Auld 2297ae03459SMatthew Auld i = 0; 2307ae03459SMatthew Auld for_each_sgt_page(page, sgt_iter, st) 2317ae03459SMatthew Auld ttm->pages[i++] = page; 2327ae03459SMatthew Auld 2337ae03459SMatthew Auld if (ttm->page_flags & TTM_TT_FLAG_SWAPPED) 2347ae03459SMatthew Auld ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED; 2357ae03459SMatthew Auld 2367ae03459SMatthew Auld return 0; 2377ae03459SMatthew Auld 2387ae03459SMatthew Auld err_free_st: 239cad7109aSThomas Hellström shmem_sg_free_table(st, filp->f_mapping, false, false); 240cad7109aSThomas Hellström 2417ae03459SMatthew Auld return err; 2427ae03459SMatthew Auld } 2437ae03459SMatthew Auld 2447ae03459SMatthew Auld static void i915_ttm_tt_shmem_unpopulate(struct ttm_tt *ttm) 2457ae03459SMatthew Auld { 2467ae03459SMatthew Auld struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); 2477ae03459SMatthew Auld bool backup = ttm->page_flags & TTM_TT_FLAG_SWAPPED; 248cad7109aSThomas Hellström struct sg_table *st = &i915_tt->cached_rsgt.table; 2497ae03459SMatthew Auld 250cad7109aSThomas Hellström shmem_sg_free_table(st, file_inode(i915_tt->filp)->i_mapping, 2517ae03459SMatthew Auld backup, backup); 2527ae03459SMatthew Auld } 2537ae03459SMatthew Auld 254cad7109aSThomas Hellström static void i915_ttm_tt_release(struct kref *ref) 255cad7109aSThomas Hellström { 256cad7109aSThomas Hellström struct i915_ttm_tt *i915_tt = 257cad7109aSThomas Hellström container_of(ref, typeof(*i915_tt), cached_rsgt.kref); 258cad7109aSThomas Hellström struct sg_table *st = &i915_tt->cached_rsgt.table; 259cad7109aSThomas Hellström 260cad7109aSThomas Hellström GEM_WARN_ON(st->sgl); 261cad7109aSThomas Hellström 262cad7109aSThomas Hellström kfree(i915_tt); 263cad7109aSThomas Hellström } 264cad7109aSThomas Hellström 265cad7109aSThomas Hellström static const struct i915_refct_sgt_ops tt_rsgt_ops = { 266cad7109aSThomas Hellström .release = i915_ttm_tt_release 267cad7109aSThomas Hellström }; 268cad7109aSThomas Hellström 269213d5092SThomas Hellström static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo, 270213d5092SThomas Hellström uint32_t page_flags) 271213d5092SThomas Hellström { 27276a6d563SRamalingam C struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915), 27376a6d563SRamalingam C bdev); 274213d5092SThomas Hellström struct ttm_resource_manager *man = 275213d5092SThomas Hellström ttm_manager_type(bo->bdev, bo->resource->mem_type); 276213d5092SThomas Hellström struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); 27776a6d563SRamalingam C unsigned long ccs_pages = 0; 2786385eb7aSThomas Hellström enum ttm_caching caching; 279213d5092SThomas Hellström struct i915_ttm_tt *i915_tt; 280213d5092SThomas Hellström int ret; 281213d5092SThomas Hellström 2826385eb7aSThomas Hellström if (!obj) 2836385eb7aSThomas Hellström return NULL; 2846385eb7aSThomas Hellström 285213d5092SThomas Hellström i915_tt = kzalloc(sizeof(*i915_tt), GFP_KERNEL); 286213d5092SThomas Hellström if (!i915_tt) 287213d5092SThomas Hellström return NULL; 288213d5092SThomas Hellström 289213d5092SThomas Hellström if (obj->flags & I915_BO_ALLOC_CPU_CLEAR && 290213d5092SThomas Hellström man->use_tt) 29143d46f0bSMatthew Auld page_flags |= TTM_TT_FLAG_ZERO_ALLOC; 292213d5092SThomas Hellström 2936385eb7aSThomas Hellström caching = i915_ttm_select_tt_caching(obj); 2947ae03459SMatthew Auld if (i915_gem_object_is_shrinkable(obj) && caching == ttm_cached) { 2957ae03459SMatthew Auld page_flags |= TTM_TT_FLAG_EXTERNAL | 2967ae03459SMatthew Auld TTM_TT_FLAG_EXTERNAL_MAPPABLE; 2977ae03459SMatthew Auld i915_tt->is_shmem = true; 298213d5092SThomas Hellström } 299213d5092SThomas Hellström 30076a6d563SRamalingam C if (HAS_FLAT_CCS(i915) && i915_gem_object_needs_ccs_pages(obj)) 30176a6d563SRamalingam C ccs_pages = DIV_ROUND_UP(DIV_ROUND_UP(bo->base.size, 30276a6d563SRamalingam C NUM_BYTES_PER_CCS_BYTE), 30376a6d563SRamalingam C PAGE_SIZE); 30476a6d563SRamalingam C 30576a6d563SRamalingam C ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags, caching, ccs_pages); 3067ae03459SMatthew Auld if (ret) 3077ae03459SMatthew Auld goto err_free; 3087ae03459SMatthew Auld 309cad7109aSThomas Hellström __i915_refct_sgt_init(&i915_tt->cached_rsgt, bo->base.size, 310cad7109aSThomas Hellström &tt_rsgt_ops); 311cad7109aSThomas Hellström 312213d5092SThomas Hellström i915_tt->dev = obj->base.dev->dev; 313213d5092SThomas Hellström 314213d5092SThomas Hellström return &i915_tt->ttm; 3157ae03459SMatthew Auld 3167ae03459SMatthew Auld err_free: 3177ae03459SMatthew Auld kfree(i915_tt); 3187ae03459SMatthew Auld return NULL; 3197ae03459SMatthew Auld } 3207ae03459SMatthew Auld 3217ae03459SMatthew Auld static int i915_ttm_tt_populate(struct ttm_device *bdev, 3227ae03459SMatthew Auld struct ttm_tt *ttm, 3237ae03459SMatthew Auld struct ttm_operation_ctx *ctx) 3247ae03459SMatthew Auld { 3257ae03459SMatthew Auld struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); 3267ae03459SMatthew Auld 3277ae03459SMatthew Auld if (i915_tt->is_shmem) 3287ae03459SMatthew Auld return i915_ttm_tt_shmem_populate(bdev, ttm, ctx); 3297ae03459SMatthew Auld 3307ae03459SMatthew Auld return ttm_pool_alloc(&bdev->pool, ttm, ctx); 331213d5092SThomas Hellström } 332213d5092SThomas Hellström 333213d5092SThomas Hellström static void i915_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm) 334213d5092SThomas Hellström { 335213d5092SThomas Hellström struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); 336cad7109aSThomas Hellström struct sg_table *st = &i915_tt->cached_rsgt.table; 337cad7109aSThomas Hellström 338cad7109aSThomas Hellström if (st->sgl) 339cad7109aSThomas Hellström dma_unmap_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, 0); 340213d5092SThomas Hellström 3417ae03459SMatthew Auld if (i915_tt->is_shmem) { 3427ae03459SMatthew Auld i915_ttm_tt_shmem_unpopulate(ttm); 3437ae03459SMatthew Auld } else { 344cad7109aSThomas Hellström sg_free_table(st); 345213d5092SThomas Hellström ttm_pool_free(&bdev->pool, ttm); 346213d5092SThomas Hellström } 3477ae03459SMatthew Auld } 348213d5092SThomas Hellström 349213d5092SThomas Hellström static void i915_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) 350213d5092SThomas Hellström { 351213d5092SThomas Hellström struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); 352213d5092SThomas Hellström 3537ae03459SMatthew Auld if (i915_tt->filp) 3547ae03459SMatthew Auld fput(i915_tt->filp); 3557ae03459SMatthew Auld 356c865204eSThomas Hellström ttm_tt_fini(ttm); 357cad7109aSThomas Hellström i915_refct_sgt_put(&i915_tt->cached_rsgt); 358213d5092SThomas Hellström } 359213d5092SThomas Hellström 360213d5092SThomas Hellström static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo, 361213d5092SThomas Hellström const struct ttm_place *place) 362213d5092SThomas Hellström { 363213d5092SThomas Hellström struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); 364213d5092SThomas Hellström 3656385eb7aSThomas Hellström if (!obj) 3666385eb7aSThomas Hellström return false; 3676385eb7aSThomas Hellström 3687ae03459SMatthew Auld /* 3697ae03459SMatthew Auld * EXTERNAL objects should never be swapped out by TTM, instead we need 3707ae03459SMatthew Auld * to handle that ourselves. TTM will already skip such objects for us, 3717ae03459SMatthew Auld * but we would like to avoid grabbing locks for no good reason. 3727ae03459SMatthew Auld */ 3737ae03459SMatthew Auld if (bo->ttm && bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL) 3746164807dSDan Carpenter return false; 3757ae03459SMatthew Auld 376213d5092SThomas Hellström /* Will do for now. Our pinned objects are still on TTM's LRU lists */ 37793735059SMatthew Auld if (!i915_gem_object_evictable(obj)) 37893735059SMatthew Auld return false; 37993735059SMatthew Auld 380*92b2b55eSArunpravin Paneer Selvam return ttm_bo_eviction_valuable(bo, place); 381213d5092SThomas Hellström } 382213d5092SThomas Hellström 383213d5092SThomas Hellström static void i915_ttm_evict_flags(struct ttm_buffer_object *bo, 384213d5092SThomas Hellström struct ttm_placement *placement) 385213d5092SThomas Hellström { 386213d5092SThomas Hellström *placement = i915_sys_placement; 387213d5092SThomas Hellström } 388213d5092SThomas Hellström 3893589fdbdSThomas Hellström /** 3903589fdbdSThomas Hellström * i915_ttm_free_cached_io_rsgt - Free object cached LMEM information 3913589fdbdSThomas Hellström * @obj: The GEM object 3923589fdbdSThomas Hellström * This function frees any LMEM-related information that is cached on 3933589fdbdSThomas Hellström * the object. For example the radix tree for fast page lookup and the 3943589fdbdSThomas Hellström * cached refcounted sg-table 3953589fdbdSThomas Hellström */ 3963589fdbdSThomas Hellström void i915_ttm_free_cached_io_rsgt(struct drm_i915_gem_object *obj) 397213d5092SThomas Hellström { 398cf3e3e86SMaarten Lankhorst struct radix_tree_iter iter; 399cf3e3e86SMaarten Lankhorst void __rcu **slot; 400cf3e3e86SMaarten Lankhorst 401cad7109aSThomas Hellström if (!obj->ttm.cached_io_rsgt) 402cf3e3e86SMaarten Lankhorst return; 403cf3e3e86SMaarten Lankhorst 404cf3e3e86SMaarten Lankhorst rcu_read_lock(); 405cf3e3e86SMaarten Lankhorst radix_tree_for_each_slot(slot, &obj->ttm.get_io_page.radix, &iter, 0) 406cf3e3e86SMaarten Lankhorst radix_tree_delete(&obj->ttm.get_io_page.radix, iter.index); 407cf3e3e86SMaarten Lankhorst rcu_read_unlock(); 408cf3e3e86SMaarten Lankhorst 409cad7109aSThomas Hellström i915_refct_sgt_put(obj->ttm.cached_io_rsgt); 410cad7109aSThomas Hellström obj->ttm.cached_io_rsgt = NULL; 411213d5092SThomas Hellström } 412213d5092SThomas Hellström 4133589fdbdSThomas Hellström /** 4143589fdbdSThomas Hellström * i915_ttm_purge - Clear an object of its memory 4153589fdbdSThomas Hellström * @obj: The object 4163589fdbdSThomas Hellström * 4173589fdbdSThomas Hellström * This function is called to clear an object of it's memory when it is 4183589fdbdSThomas Hellström * marked as not needed anymore. 4193589fdbdSThomas Hellström * 4203589fdbdSThomas Hellström * Return: 0 on success, negative error code on failure. 42132b7cf51SThomas Hellström */ 4223589fdbdSThomas Hellström int i915_ttm_purge(struct drm_i915_gem_object *obj) 423213d5092SThomas Hellström { 424213d5092SThomas Hellström struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); 4257ae03459SMatthew Auld struct i915_ttm_tt *i915_tt = 4267ae03459SMatthew Auld container_of(bo->ttm, typeof(*i915_tt), ttm); 427213d5092SThomas Hellström struct ttm_operation_ctx ctx = { 428213d5092SThomas Hellström .interruptible = true, 429213d5092SThomas Hellström .no_wait_gpu = false, 430213d5092SThomas Hellström }; 431213d5092SThomas Hellström struct ttm_placement place = {}; 432213d5092SThomas Hellström int ret; 433213d5092SThomas Hellström 434213d5092SThomas Hellström if (obj->mm.madv == __I915_MADV_PURGED) 4357ae03459SMatthew Auld return 0; 436213d5092SThomas Hellström 437213d5092SThomas Hellström ret = ttm_bo_validate(bo, &place, &ctx); 4387ae03459SMatthew Auld if (ret) 4397ae03459SMatthew Auld return ret; 4407ae03459SMatthew Auld 4417ae03459SMatthew Auld if (bo->ttm && i915_tt->filp) { 4427ae03459SMatthew Auld /* 4437ae03459SMatthew Auld * The below fput(which eventually calls shmem_truncate) might 4447ae03459SMatthew Auld * be delayed by worker, so when directly called to purge the 4457ae03459SMatthew Auld * pages(like by the shrinker) we should try to be more 4467ae03459SMatthew Auld * aggressive and release the pages immediately. 4477ae03459SMatthew Auld */ 4487ae03459SMatthew Auld shmem_truncate_range(file_inode(i915_tt->filp), 4497ae03459SMatthew Auld 0, (loff_t)-1); 4507ae03459SMatthew Auld fput(fetch_and_zero(&i915_tt->filp)); 4517ae03459SMatthew Auld } 4527ae03459SMatthew Auld 4533c2b8f32SThomas Hellström obj->write_domain = 0; 4543c2b8f32SThomas Hellström obj->read_domains = 0; 4553c2b8f32SThomas Hellström i915_ttm_adjust_gem_after_move(obj); 456cad7109aSThomas Hellström i915_ttm_free_cached_io_rsgt(obj); 457213d5092SThomas Hellström obj->mm.madv = __I915_MADV_PURGED; 4583589fdbdSThomas Hellström 4597ae03459SMatthew Auld return 0; 460213d5092SThomas Hellström } 4617ae03459SMatthew Auld 462ffa3fe08SMatthew Auld static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags) 4637ae03459SMatthew Auld { 4647ae03459SMatthew Auld struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); 4657ae03459SMatthew Auld struct i915_ttm_tt *i915_tt = 4667ae03459SMatthew Auld container_of(bo->ttm, typeof(*i915_tt), ttm); 4677ae03459SMatthew Auld struct ttm_operation_ctx ctx = { 4687ae03459SMatthew Auld .interruptible = true, 469ffa3fe08SMatthew Auld .no_wait_gpu = flags & I915_GEM_OBJECT_SHRINK_NO_GPU_WAIT, 4707ae03459SMatthew Auld }; 4717ae03459SMatthew Auld struct ttm_placement place = {}; 4727ae03459SMatthew Auld int ret; 4737ae03459SMatthew Auld 4747ae03459SMatthew Auld if (!bo->ttm || bo->resource->mem_type != TTM_PL_SYSTEM) 4757ae03459SMatthew Auld return 0; 4767ae03459SMatthew Auld 4777ae03459SMatthew Auld GEM_BUG_ON(!i915_tt->is_shmem); 4787ae03459SMatthew Auld 4797ae03459SMatthew Auld if (!i915_tt->filp) 4807ae03459SMatthew Auld return 0; 4817ae03459SMatthew Auld 482004746e4SThomas Hellström ret = ttm_bo_wait_ctx(bo, &ctx); 483004746e4SThomas Hellström if (ret) 484004746e4SThomas Hellström return ret; 485004746e4SThomas Hellström 4867ae03459SMatthew Auld switch (obj->mm.madv) { 4877ae03459SMatthew Auld case I915_MADV_DONTNEED: 4887ae03459SMatthew Auld return i915_ttm_purge(obj); 4897ae03459SMatthew Auld case __I915_MADV_PURGED: 4907ae03459SMatthew Auld return 0; 4917ae03459SMatthew Auld } 4927ae03459SMatthew Auld 4937ae03459SMatthew Auld if (bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED) 4947ae03459SMatthew Auld return 0; 4957ae03459SMatthew Auld 4967ae03459SMatthew Auld bo->ttm->page_flags |= TTM_TT_FLAG_SWAPPED; 4977ae03459SMatthew Auld ret = ttm_bo_validate(bo, &place, &ctx); 4987ae03459SMatthew Auld if (ret) { 4997ae03459SMatthew Auld bo->ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED; 5007ae03459SMatthew Auld return ret; 5017ae03459SMatthew Auld } 5027ae03459SMatthew Auld 503ffa3fe08SMatthew Auld if (flags & I915_GEM_OBJECT_SHRINK_WRITEBACK) 5047ae03459SMatthew Auld __shmem_writeback(obj->base.size, i915_tt->filp->f_mapping); 5057ae03459SMatthew Auld 5067ae03459SMatthew Auld return 0; 507213d5092SThomas Hellström } 508213d5092SThomas Hellström 509213d5092SThomas Hellström static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo) 510213d5092SThomas Hellström { 511213d5092SThomas Hellström struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); 512213d5092SThomas Hellström 513213d5092SThomas Hellström if (likely(obj)) { 514068396bbSThomas Hellström __i915_gem_object_pages_fini(obj); 515cad7109aSThomas Hellström i915_ttm_free_cached_io_rsgt(obj); 516213d5092SThomas Hellström } 517213d5092SThomas Hellström } 518213d5092SThomas Hellström 519cad7109aSThomas Hellström static struct i915_refct_sgt *i915_ttm_tt_get_st(struct ttm_tt *ttm) 520213d5092SThomas Hellström { 521213d5092SThomas Hellström struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); 522213d5092SThomas Hellström struct sg_table *st; 523213d5092SThomas Hellström int ret; 524213d5092SThomas Hellström 525cad7109aSThomas Hellström if (i915_tt->cached_rsgt.table.sgl) 526cad7109aSThomas Hellström return i915_refct_sgt_get(&i915_tt->cached_rsgt); 527213d5092SThomas Hellström 528cad7109aSThomas Hellström st = &i915_tt->cached_rsgt.table; 52923852becSLinus Torvalds ret = sg_alloc_table_from_pages_segment(st, 53023852becSLinus Torvalds ttm->pages, ttm->num_pages, 53123852becSLinus Torvalds 0, (unsigned long)ttm->num_pages << PAGE_SHIFT, 53223852becSLinus Torvalds i915_sg_segment_size(), GFP_KERNEL); 53323852becSLinus Torvalds if (ret) { 534cad7109aSThomas Hellström st->sgl = NULL; 53523852becSLinus Torvalds return ERR_PTR(ret); 536213d5092SThomas Hellström } 537213d5092SThomas Hellström 538213d5092SThomas Hellström ret = dma_map_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, 0); 539213d5092SThomas Hellström if (ret) { 540213d5092SThomas Hellström sg_free_table(st); 541213d5092SThomas Hellström return ERR_PTR(ret); 542213d5092SThomas Hellström } 543213d5092SThomas Hellström 544cad7109aSThomas Hellström return i915_refct_sgt_get(&i915_tt->cached_rsgt); 545213d5092SThomas Hellström } 546213d5092SThomas Hellström 5473589fdbdSThomas Hellström /** 5483589fdbdSThomas Hellström * i915_ttm_resource_get_st - Get a refcounted sg-table pointing to the 5493589fdbdSThomas Hellström * resource memory 5503589fdbdSThomas Hellström * @obj: The GEM object used for sg-table caching 5513589fdbdSThomas Hellström * @res: The struct ttm_resource for which an sg-table is requested. 5523589fdbdSThomas Hellström * 5533589fdbdSThomas Hellström * This function returns a refcounted sg-table representing the memory 5543589fdbdSThomas Hellström * pointed to by @res. If @res is the object's current resource it may also 5553589fdbdSThomas Hellström * cache the sg_table on the object or attempt to access an already cached 5563589fdbdSThomas Hellström * sg-table. The refcounted sg-table needs to be put when no-longer in use. 5573589fdbdSThomas Hellström * 5583589fdbdSThomas Hellström * Return: A valid pointer to a struct i915_refct_sgt or error pointer on 5593589fdbdSThomas Hellström * failure. 5603589fdbdSThomas Hellström */ 5613589fdbdSThomas Hellström struct i915_refct_sgt * 562213d5092SThomas Hellström i915_ttm_resource_get_st(struct drm_i915_gem_object *obj, 563213d5092SThomas Hellström struct ttm_resource *res) 564213d5092SThomas Hellström { 565213d5092SThomas Hellström struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); 5669306b2b2SMatthew Auld u32 page_alignment; 567213d5092SThomas Hellström 5683589fdbdSThomas Hellström if (!i915_ttm_gtt_binds_lmem(res)) 569213d5092SThomas Hellström return i915_ttm_tt_get_st(bo->ttm); 570213d5092SThomas Hellström 571bc99f120SMatthew Auld page_alignment = bo->page_alignment << PAGE_SHIFT; 572bc99f120SMatthew Auld if (!page_alignment) 573bc99f120SMatthew Auld page_alignment = obj->mm.region->min_page_size; 574bc99f120SMatthew Auld 5753c2b8f32SThomas Hellström /* 5763c2b8f32SThomas Hellström * If CPU mapping differs, we need to add the ttm_tt pages to 5773c2b8f32SThomas Hellström * the resulting st. Might make sense for GGTT. 5783c2b8f32SThomas Hellström */ 5793589fdbdSThomas Hellström GEM_WARN_ON(!i915_ttm_cpu_maps_iomem(res)); 580cad7109aSThomas Hellström if (bo->resource == res) { 581cad7109aSThomas Hellström if (!obj->ttm.cached_io_rsgt) { 582cad7109aSThomas Hellström struct i915_refct_sgt *rsgt; 583cad7109aSThomas Hellström 584cad7109aSThomas Hellström rsgt = intel_region_ttm_resource_to_rsgt(obj->mm.region, 585bc99f120SMatthew Auld res, 586bc99f120SMatthew Auld page_alignment); 587cad7109aSThomas Hellström if (IS_ERR(rsgt)) 588cad7109aSThomas Hellström return rsgt; 589cad7109aSThomas Hellström 590cad7109aSThomas Hellström obj->ttm.cached_io_rsgt = rsgt; 591cad7109aSThomas Hellström } 592cad7109aSThomas Hellström return i915_refct_sgt_get(obj->ttm.cached_io_rsgt); 593cad7109aSThomas Hellström } 594cad7109aSThomas Hellström 595bc99f120SMatthew Auld return intel_region_ttm_resource_to_rsgt(obj->mm.region, res, 596bc99f120SMatthew Auld page_alignment); 597213d5092SThomas Hellström } 598213d5092SThomas Hellström 5996ef295e3SMatthew Auld static int i915_ttm_truncate(struct drm_i915_gem_object *obj) 6006ef295e3SMatthew Auld { 6016ef295e3SMatthew Auld struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); 6026ef295e3SMatthew Auld int err; 6036ef295e3SMatthew Auld 6046ef295e3SMatthew Auld WARN_ON_ONCE(obj->mm.madv == I915_MADV_WILLNEED); 6056ef295e3SMatthew Auld 6066ef295e3SMatthew Auld err = i915_ttm_move_notify(bo); 6076ef295e3SMatthew Auld if (err) 6086ef295e3SMatthew Auld return err; 6096ef295e3SMatthew Auld 6106ef295e3SMatthew Auld return i915_ttm_purge(obj); 6116ef295e3SMatthew Auld } 6126ef295e3SMatthew Auld 6133589fdbdSThomas Hellström static void i915_ttm_swap_notify(struct ttm_buffer_object *bo) 614213d5092SThomas Hellström { 615213d5092SThomas Hellström struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); 6166385eb7aSThomas Hellström int ret; 617213d5092SThomas Hellström 6186385eb7aSThomas Hellström if (!obj) 6196385eb7aSThomas Hellström return; 6206385eb7aSThomas Hellström 6216385eb7aSThomas Hellström ret = i915_ttm_move_notify(bo); 6223589fdbdSThomas Hellström GEM_WARN_ON(ret); 6233589fdbdSThomas Hellström GEM_WARN_ON(obj->ttm.cached_io_rsgt); 6243589fdbdSThomas Hellström if (!ret && obj->mm.madv != I915_MADV_WILLNEED) 625213d5092SThomas Hellström i915_ttm_purge(obj); 626213d5092SThomas Hellström } 627213d5092SThomas Hellström 628bfe53be2SMatthew Auld /** 629bfe53be2SMatthew Auld * i915_ttm_resource_mappable - Return true if the ttm resource is CPU 630bfe53be2SMatthew Auld * accessible. 631bfe53be2SMatthew Auld * @res: The TTM resource to check. 632bfe53be2SMatthew Auld * 633bfe53be2SMatthew Auld * This is interesting on small-BAR systems where we may encounter lmem objects 634bfe53be2SMatthew Auld * that can't be accessed via the CPU. 635bfe53be2SMatthew Auld */ 636bfe53be2SMatthew Auld bool i915_ttm_resource_mappable(struct ttm_resource *res) 637503725c2SMatthew Auld { 638503725c2SMatthew Auld struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res); 639503725c2SMatthew Auld 640503725c2SMatthew Auld if (!i915_ttm_cpu_maps_iomem(res)) 641503725c2SMatthew Auld return true; 642503725c2SMatthew Auld 643503725c2SMatthew Auld return bman_res->used_visible_size == bman_res->base.num_pages; 644503725c2SMatthew Auld } 645503725c2SMatthew Auld 646cf3e3e86SMaarten Lankhorst static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem) 647cf3e3e86SMaarten Lankhorst { 648bfe53be2SMatthew Auld struct drm_i915_gem_object *obj = i915_ttm_to_gem(mem->bo); 649bfe53be2SMatthew Auld bool unknown_state; 650bfe53be2SMatthew Auld 651bfe53be2SMatthew Auld if (!obj) 652bfe53be2SMatthew Auld return -EINVAL; 653bfe53be2SMatthew Auld 654bfe53be2SMatthew Auld if (!kref_get_unless_zero(&obj->base.refcount)) 655bfe53be2SMatthew Auld return -EINVAL; 656bfe53be2SMatthew Auld 657bfe53be2SMatthew Auld assert_object_held(obj); 658bfe53be2SMatthew Auld 659bfe53be2SMatthew Auld unknown_state = i915_gem_object_has_unknown_state(obj); 660bfe53be2SMatthew Auld i915_gem_object_put(obj); 661bfe53be2SMatthew Auld if (unknown_state) 662bfe53be2SMatthew Auld return -EINVAL; 663bfe53be2SMatthew Auld 6643589fdbdSThomas Hellström if (!i915_ttm_cpu_maps_iomem(mem)) 665cf3e3e86SMaarten Lankhorst return 0; 666cf3e3e86SMaarten Lankhorst 667503725c2SMatthew Auld if (!i915_ttm_resource_mappable(mem)) 668503725c2SMatthew Auld return -EINVAL; 669503725c2SMatthew Auld 670cf3e3e86SMaarten Lankhorst mem->bus.caching = ttm_write_combined; 671cf3e3e86SMaarten Lankhorst mem->bus.is_iomem = true; 672cf3e3e86SMaarten Lankhorst 673cf3e3e86SMaarten Lankhorst return 0; 674cf3e3e86SMaarten Lankhorst } 675cf3e3e86SMaarten Lankhorst 676cf3e3e86SMaarten Lankhorst static unsigned long i915_ttm_io_mem_pfn(struct ttm_buffer_object *bo, 677cf3e3e86SMaarten Lankhorst unsigned long page_offset) 678cf3e3e86SMaarten Lankhorst { 679cf3e3e86SMaarten Lankhorst struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); 680cf3e3e86SMaarten Lankhorst struct scatterlist *sg; 6816385eb7aSThomas Hellström unsigned long base; 682cf3e3e86SMaarten Lankhorst unsigned int ofs; 683cf3e3e86SMaarten Lankhorst 6846385eb7aSThomas Hellström GEM_BUG_ON(!obj); 685cf3e3e86SMaarten Lankhorst GEM_WARN_ON(bo->ttm); 686cf3e3e86SMaarten Lankhorst 6876385eb7aSThomas Hellström base = obj->mm.region->iomap.base - obj->mm.region->region.start; 6887d6a276eSJason Ekstrand sg = __i915_gem_object_get_sg(obj, &obj->ttm.get_io_page, page_offset, &ofs, true); 689cf3e3e86SMaarten Lankhorst 690cf3e3e86SMaarten Lankhorst return ((base + sg_dma_address(sg)) >> PAGE_SHIFT) + ofs; 691cf3e3e86SMaarten Lankhorst } 692cf3e3e86SMaarten Lankhorst 6936385eb7aSThomas Hellström /* 6946385eb7aSThomas Hellström * All callbacks need to take care not to downcast a struct ttm_buffer_object 6956385eb7aSThomas Hellström * without checking its subclass, since it might be a TTM ghost object. 6966385eb7aSThomas Hellström */ 697213d5092SThomas Hellström static struct ttm_device_funcs i915_ttm_bo_driver = { 698213d5092SThomas Hellström .ttm_tt_create = i915_ttm_tt_create, 6997ae03459SMatthew Auld .ttm_tt_populate = i915_ttm_tt_populate, 700213d5092SThomas Hellström .ttm_tt_unpopulate = i915_ttm_tt_unpopulate, 701213d5092SThomas Hellström .ttm_tt_destroy = i915_ttm_tt_destroy, 702213d5092SThomas Hellström .eviction_valuable = i915_ttm_eviction_valuable, 703213d5092SThomas Hellström .evict_flags = i915_ttm_evict_flags, 704213d5092SThomas Hellström .move = i915_ttm_move, 705213d5092SThomas Hellström .swap_notify = i915_ttm_swap_notify, 706213d5092SThomas Hellström .delete_mem_notify = i915_ttm_delete_mem_notify, 707cf3e3e86SMaarten Lankhorst .io_mem_reserve = i915_ttm_io_mem_reserve, 708cf3e3e86SMaarten Lankhorst .io_mem_pfn = i915_ttm_io_mem_pfn, 709213d5092SThomas Hellström }; 710213d5092SThomas Hellström 711213d5092SThomas Hellström /** 712213d5092SThomas Hellström * i915_ttm_driver - Return a pointer to the TTM device funcs 713213d5092SThomas Hellström * 714213d5092SThomas Hellström * Return: Pointer to statically allocated TTM device funcs. 715213d5092SThomas Hellström */ 716213d5092SThomas Hellström struct ttm_device_funcs *i915_ttm_driver(void) 717213d5092SThomas Hellström { 718213d5092SThomas Hellström return &i915_ttm_bo_driver; 719213d5092SThomas Hellström } 720213d5092SThomas Hellström 721b6e913e1SThomas Hellström static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj, 722b6e913e1SThomas Hellström struct ttm_placement *placement) 723213d5092SThomas Hellström { 724213d5092SThomas Hellström struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); 725213d5092SThomas Hellström struct ttm_operation_ctx ctx = { 726213d5092SThomas Hellström .interruptible = true, 727213d5092SThomas Hellström .no_wait_gpu = false, 728213d5092SThomas Hellström }; 729b07a6483SThomas Hellström int real_num_busy; 730213d5092SThomas Hellström int ret; 731213d5092SThomas Hellström 732b07a6483SThomas Hellström /* First try only the requested placement. No eviction. */ 733b6e913e1SThomas Hellström real_num_busy = fetch_and_zero(&placement->num_busy_placement); 734b6e913e1SThomas Hellström ret = ttm_bo_validate(bo, placement, &ctx); 735b07a6483SThomas Hellström if (ret) { 736b07a6483SThomas Hellström ret = i915_ttm_err_to_gem(ret); 737b07a6483SThomas Hellström /* 738b07a6483SThomas Hellström * Anything that wants to restart the operation gets to 739b07a6483SThomas Hellström * do that. 740b07a6483SThomas Hellström */ 741b07a6483SThomas Hellström if (ret == -EDEADLK || ret == -EINTR || ret == -ERESTARTSYS || 742b07a6483SThomas Hellström ret == -EAGAIN) 743b07a6483SThomas Hellström return ret; 744213d5092SThomas Hellström 745b07a6483SThomas Hellström /* 746b07a6483SThomas Hellström * If the initial attempt fails, allow all accepted placements, 747b07a6483SThomas Hellström * evicting if necessary. 748b07a6483SThomas Hellström */ 749b6e913e1SThomas Hellström placement->num_busy_placement = real_num_busy; 750b6e913e1SThomas Hellström ret = ttm_bo_validate(bo, placement, &ctx); 751213d5092SThomas Hellström if (ret) 752b07a6483SThomas Hellström return i915_ttm_err_to_gem(ret); 753b07a6483SThomas Hellström } 754213d5092SThomas Hellström 7553c2b8f32SThomas Hellström if (bo->ttm && !ttm_tt_is_populated(bo->ttm)) { 7563c2b8f32SThomas Hellström ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx); 7573c2b8f32SThomas Hellström if (ret) 7583c2b8f32SThomas Hellström return ret; 7593c2b8f32SThomas Hellström 7603c2b8f32SThomas Hellström i915_ttm_adjust_domains_after_move(obj); 7613c2b8f32SThomas Hellström i915_ttm_adjust_gem_after_move(obj); 7623c2b8f32SThomas Hellström } 7633c2b8f32SThomas Hellström 76475e38285SJason Ekstrand if (!i915_gem_object_has_pages(obj)) { 765cad7109aSThomas Hellström struct i915_refct_sgt *rsgt = 766cad7109aSThomas Hellström i915_ttm_resource_get_st(obj, bo->resource); 767213d5092SThomas Hellström 768cad7109aSThomas Hellström if (IS_ERR(rsgt)) 769cad7109aSThomas Hellström return PTR_ERR(rsgt); 770cad7109aSThomas Hellström 771cad7109aSThomas Hellström GEM_BUG_ON(obj->mm.rsgt); 772cad7109aSThomas Hellström obj->mm.rsgt = rsgt; 773cad7109aSThomas Hellström __i915_gem_object_set_pages(obj, &rsgt->table, 774cad7109aSThomas Hellström i915_sg_dma_sizes(rsgt->table.sgl)); 77575e38285SJason Ekstrand } 776213d5092SThomas Hellström 77776a6d563SRamalingam C GEM_BUG_ON(bo->ttm && ((obj->base.size >> PAGE_SHIFT) < bo->ttm->num_pages)); 778ebd4a8ecSMatthew Auld i915_ttm_adjust_lru(obj); 779213d5092SThomas Hellström return ret; 780213d5092SThomas Hellström } 781213d5092SThomas Hellström 782b6e913e1SThomas Hellström static int i915_ttm_get_pages(struct drm_i915_gem_object *obj) 783b6e913e1SThomas Hellström { 784b6e913e1SThomas Hellström struct ttm_place requested, busy[I915_TTM_MAX_PLACEMENTS]; 785b6e913e1SThomas Hellström struct ttm_placement placement; 786b6e913e1SThomas Hellström 787b6e913e1SThomas Hellström GEM_BUG_ON(obj->mm.n_placements > I915_TTM_MAX_PLACEMENTS); 788b6e913e1SThomas Hellström 789b6e913e1SThomas Hellström /* Move to the requested placement. */ 790b6e913e1SThomas Hellström i915_ttm_placement_from_obj(obj, &requested, busy, &placement); 791b6e913e1SThomas Hellström 792b6e913e1SThomas Hellström return __i915_ttm_get_pages(obj, &placement); 793b6e913e1SThomas Hellström } 794b6e913e1SThomas Hellström 795b6e913e1SThomas Hellström /** 796b6e913e1SThomas Hellström * DOC: Migration vs eviction 797b6e913e1SThomas Hellström * 798b6e913e1SThomas Hellström * GEM migration may not be the same as TTM migration / eviction. If 799b6e913e1SThomas Hellström * the TTM core decides to evict an object it may be evicted to a 800b6e913e1SThomas Hellström * TTM memory type that is not in the object's allowable GEM regions, or 801b6e913e1SThomas Hellström * in fact theoretically to a TTM memory type that doesn't correspond to 802b6e913e1SThomas Hellström * a GEM memory region. In that case the object's GEM region is not 803b6e913e1SThomas Hellström * updated, and the data is migrated back to the GEM region at 804b6e913e1SThomas Hellström * get_pages time. TTM may however set up CPU ptes to the object even 805b6e913e1SThomas Hellström * when it is evicted. 806b6e913e1SThomas Hellström * Gem forced migration using the i915_ttm_migrate() op, is allowed even 807b6e913e1SThomas Hellström * to regions that are not in the object's list of allowable placements. 808b6e913e1SThomas Hellström */ 809503725c2SMatthew Auld static int __i915_ttm_migrate(struct drm_i915_gem_object *obj, 810503725c2SMatthew Auld struct intel_memory_region *mr, 811503725c2SMatthew Auld unsigned int flags) 812b6e913e1SThomas Hellström { 813b6e913e1SThomas Hellström struct ttm_place requested; 814b6e913e1SThomas Hellström struct ttm_placement placement; 815b6e913e1SThomas Hellström int ret; 816b6e913e1SThomas Hellström 817ecbf2060SMatthew Auld i915_ttm_place_from_region(mr, &requested, obj->bo_offset, 818ecbf2060SMatthew Auld obj->base.size, flags); 819b6e913e1SThomas Hellström placement.num_placement = 1; 820b6e913e1SThomas Hellström placement.num_busy_placement = 1; 821b6e913e1SThomas Hellström placement.placement = &requested; 822b6e913e1SThomas Hellström placement.busy_placement = &requested; 823b6e913e1SThomas Hellström 824b6e913e1SThomas Hellström ret = __i915_ttm_get_pages(obj, &placement); 825b6e913e1SThomas Hellström if (ret) 826b6e913e1SThomas Hellström return ret; 827b6e913e1SThomas Hellström 828b6e913e1SThomas Hellström /* 829b6e913e1SThomas Hellström * Reinitialize the region bindings. This is primarily 830b6e913e1SThomas Hellström * required for objects where the new region is not in 831b6e913e1SThomas Hellström * its allowable placements. 832b6e913e1SThomas Hellström */ 833b6e913e1SThomas Hellström if (obj->mm.region != mr) { 834b6e913e1SThomas Hellström i915_gem_object_release_memory_region(obj); 835b6e913e1SThomas Hellström i915_gem_object_init_memory_region(obj, mr); 836b6e913e1SThomas Hellström } 837b6e913e1SThomas Hellström 838b6e913e1SThomas Hellström return 0; 839b6e913e1SThomas Hellström } 840b6e913e1SThomas Hellström 841503725c2SMatthew Auld static int i915_ttm_migrate(struct drm_i915_gem_object *obj, 842503725c2SMatthew Auld struct intel_memory_region *mr) 843503725c2SMatthew Auld { 844503725c2SMatthew Auld return __i915_ttm_migrate(obj, mr, obj->flags); 845503725c2SMatthew Auld } 846503725c2SMatthew Auld 847213d5092SThomas Hellström static void i915_ttm_put_pages(struct drm_i915_gem_object *obj, 848213d5092SThomas Hellström struct sg_table *st) 849213d5092SThomas Hellström { 850213d5092SThomas Hellström /* 851213d5092SThomas Hellström * We're currently not called from a shrinker, so put_pages() 852213d5092SThomas Hellström * typically means the object is about to destroyed, or called 853213d5092SThomas Hellström * from move_notify(). So just avoid doing much for now. 854213d5092SThomas Hellström * If the object is not destroyed next, The TTM eviction logic 855213d5092SThomas Hellström * and shrinkers will move it out if needed. 856213d5092SThomas Hellström */ 857cad7109aSThomas Hellström 858cad7109aSThomas Hellström if (obj->mm.rsgt) 859cad7109aSThomas Hellström i915_refct_sgt_put(fetch_and_zero(&obj->mm.rsgt)); 860213d5092SThomas Hellström } 861213d5092SThomas Hellström 8623589fdbdSThomas Hellström /** 8633589fdbdSThomas Hellström * i915_ttm_adjust_lru - Adjust an object's position on relevant LRU lists. 8643589fdbdSThomas Hellström * @obj: The object 8653589fdbdSThomas Hellström */ 8663589fdbdSThomas Hellström void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj) 867213d5092SThomas Hellström { 868213d5092SThomas Hellström struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); 8697ae03459SMatthew Auld struct i915_ttm_tt *i915_tt = 8707ae03459SMatthew Auld container_of(bo->ttm, typeof(*i915_tt), ttm); 871ebd4a8ecSMatthew Auld bool shrinkable = 872ebd4a8ecSMatthew Auld bo->ttm && i915_tt->filp && ttm_tt_is_populated(bo->ttm); 873213d5092SThomas Hellström 874213d5092SThomas Hellström /* 875213d5092SThomas Hellström * Don't manipulate the TTM LRUs while in TTM bo destruction. 876213d5092SThomas Hellström * We're called through i915_ttm_delete_mem_notify(). 877213d5092SThomas Hellström */ 878213d5092SThomas Hellström if (!kref_read(&bo->kref)) 879213d5092SThomas Hellström return; 880213d5092SThomas Hellström 881213d5092SThomas Hellström /* 882ebd4a8ecSMatthew Auld * We skip managing the shrinker LRU in set_pages() and just manage 883ebd4a8ecSMatthew Auld * everything here. This does at least solve the issue with having 884ebd4a8ecSMatthew Auld * temporary shmem mappings(like with evicted lmem) not being visible to 885ebd4a8ecSMatthew Auld * the shrinker. Only our shmem objects are shrinkable, everything else 886ebd4a8ecSMatthew Auld * we keep as unshrinkable. 887ebd4a8ecSMatthew Auld * 888ebd4a8ecSMatthew Auld * To make sure everything plays nice we keep an extra shrink pin in TTM 889ebd4a8ecSMatthew Auld * if the underlying pages are not currently shrinkable. Once we release 890ebd4a8ecSMatthew Auld * our pin, like when the pages are moved to shmem, the pages will then 891ebd4a8ecSMatthew Auld * be added to the shrinker LRU, assuming the caller isn't also holding 892ebd4a8ecSMatthew Auld * a pin. 893ebd4a8ecSMatthew Auld * 894ebd4a8ecSMatthew Auld * TODO: consider maybe also bumping the shrinker list here when we have 895ebd4a8ecSMatthew Auld * already unpinned it, which should give us something more like an LRU. 896d3cb30f8SThomas Hellström * 897d3cb30f8SThomas Hellström * TODO: There is a small window of opportunity for this function to 898d3cb30f8SThomas Hellström * get called from eviction after we've dropped the last GEM refcount, 899d3cb30f8SThomas Hellström * but before the TTM deleted flag is set on the object. Avoid 900d3cb30f8SThomas Hellström * adjusting the shrinker list in such cases, since the object is 901d3cb30f8SThomas Hellström * not available to the shrinker anyway due to its zero refcount. 902d3cb30f8SThomas Hellström * To fix this properly we should move to a TTM shrinker LRU list for 903d3cb30f8SThomas Hellström * these objects. 904ebd4a8ecSMatthew Auld */ 905d3cb30f8SThomas Hellström if (kref_get_unless_zero(&obj->base.refcount)) { 906ebd4a8ecSMatthew Auld if (shrinkable != obj->mm.ttm_shrinkable) { 907ebd4a8ecSMatthew Auld if (shrinkable) { 908ebd4a8ecSMatthew Auld if (obj->mm.madv == I915_MADV_WILLNEED) 909ebd4a8ecSMatthew Auld __i915_gem_object_make_shrinkable(obj); 910ebd4a8ecSMatthew Auld else 911ebd4a8ecSMatthew Auld __i915_gem_object_make_purgeable(obj); 912ebd4a8ecSMatthew Auld } else { 913ebd4a8ecSMatthew Auld i915_gem_object_make_unshrinkable(obj); 914ebd4a8ecSMatthew Auld } 915ebd4a8ecSMatthew Auld 916ebd4a8ecSMatthew Auld obj->mm.ttm_shrinkable = shrinkable; 917ebd4a8ecSMatthew Auld } 918d3cb30f8SThomas Hellström i915_gem_object_put(obj); 919d3cb30f8SThomas Hellström } 920ebd4a8ecSMatthew Auld 921ebd4a8ecSMatthew Auld /* 922213d5092SThomas Hellström * Put on the correct LRU list depending on the MADV status 923213d5092SThomas Hellström */ 924213d5092SThomas Hellström spin_lock(&bo->bdev->lru_lock); 925ebd4a8ecSMatthew Auld if (shrinkable) { 9267ae03459SMatthew Auld /* Try to keep shmem_tt from being considered for shrinking. */ 9277ae03459SMatthew Auld bo->priority = TTM_MAX_BO_PRIORITY - 1; 9287ae03459SMatthew Auld } else if (obj->mm.madv != I915_MADV_WILLNEED) { 929213d5092SThomas Hellström bo->priority = I915_TTM_PRIO_PURGE; 930213d5092SThomas Hellström } else if (!i915_gem_object_has_pages(obj)) { 931213d5092SThomas Hellström bo->priority = I915_TTM_PRIO_NO_PAGES; 932ba2c5d15SMatthew Auld } else { 93393735059SMatthew Auld struct ttm_resource_manager *man = 93493735059SMatthew Auld ttm_manager_type(bo->bdev, bo->resource->mem_type); 93593735059SMatthew Auld 93693735059SMatthew Auld /* 93793735059SMatthew Auld * If we need to place an LMEM resource which doesn't need CPU 93893735059SMatthew Auld * access then we should try not to victimize mappable objects 93993735059SMatthew Auld * first, since we likely end up stealing more of the mappable 94093735059SMatthew Auld * portion. And likewise when we try to find space for a mappble 94193735059SMatthew Auld * object, we know not to ever victimize objects that don't 94293735059SMatthew Auld * occupy any mappable pages. 94393735059SMatthew Auld */ 94493735059SMatthew Auld if (i915_ttm_cpu_maps_iomem(bo->resource) && 94593735059SMatthew Auld i915_ttm_buddy_man_visible_size(man) < man->size && 94693735059SMatthew Auld !(obj->flags & I915_BO_ALLOC_GPU_ONLY)) 94793735059SMatthew Auld bo->priority = I915_TTM_PRIO_NEEDS_CPU_ACCESS; 94893735059SMatthew Auld else 949ba2c5d15SMatthew Auld bo->priority = I915_TTM_PRIO_HAS_PAGES; 950213d5092SThomas Hellström } 951213d5092SThomas Hellström 952fee2ede1SChristian König ttm_bo_move_to_lru_tail(bo); 953213d5092SThomas Hellström spin_unlock(&bo->bdev->lru_lock); 954213d5092SThomas Hellström } 955213d5092SThomas Hellström 956213d5092SThomas Hellström /* 957213d5092SThomas Hellström * TTM-backed gem object destruction requires some clarification. 958213d5092SThomas Hellström * Basically we have two possibilities here. We can either rely on the 959213d5092SThomas Hellström * i915 delayed destruction and put the TTM object when the object 960213d5092SThomas Hellström * is idle. This would be detected by TTM which would bypass the 961213d5092SThomas Hellström * TTM delayed destroy handling. The other approach is to put the TTM 962213d5092SThomas Hellström * object early and rely on the TTM destroyed handling, and then free 963213d5092SThomas Hellström * the leftover parts of the GEM object once TTM's destroyed list handling is 964213d5092SThomas Hellström * complete. For now, we rely on the latter for two reasons: 965213d5092SThomas Hellström * a) TTM can evict an object even when it's on the delayed destroy list, 966213d5092SThomas Hellström * which in theory allows for complete eviction. 967213d5092SThomas Hellström * b) There is work going on in TTM to allow freeing an object even when 968213d5092SThomas Hellström * it's not idle, and using the TTM destroyed list handling could help us 969213d5092SThomas Hellström * benefit from that. 970213d5092SThomas Hellström */ 971213d5092SThomas Hellström static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj) 972213d5092SThomas Hellström { 973068396bbSThomas Hellström GEM_BUG_ON(!obj->ttm.created); 974068396bbSThomas Hellström 975213d5092SThomas Hellström ttm_bo_put(i915_gem_to_ttm(obj)); 976213d5092SThomas Hellström } 977213d5092SThomas Hellström 978cf3e3e86SMaarten Lankhorst static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) 979cf3e3e86SMaarten Lankhorst { 980cf3e3e86SMaarten Lankhorst struct vm_area_struct *area = vmf->vma; 9816385eb7aSThomas Hellström struct ttm_buffer_object *bo = area->vm_private_data; 982ebd4a8ecSMatthew Auld struct drm_device *dev = bo->base.dev; 9836385eb7aSThomas Hellström struct drm_i915_gem_object *obj; 984ebd4a8ecSMatthew Auld vm_fault_t ret; 985ebd4a8ecSMatthew Auld int idx; 986cf3e3e86SMaarten Lankhorst 9876385eb7aSThomas Hellström obj = i915_ttm_to_gem(bo); 9886385eb7aSThomas Hellström if (!obj) 9896385eb7aSThomas Hellström return VM_FAULT_SIGBUS; 9906385eb7aSThomas Hellström 991cf3e3e86SMaarten Lankhorst /* Sanity check that we allow writing into this object */ 992cf3e3e86SMaarten Lankhorst if (unlikely(i915_gem_object_is_readonly(obj) && 993cf3e3e86SMaarten Lankhorst area->vm_flags & VM_WRITE)) 994cf3e3e86SMaarten Lankhorst return VM_FAULT_SIGBUS; 995cf3e3e86SMaarten Lankhorst 996ebd4a8ecSMatthew Auld ret = ttm_bo_vm_reserve(bo, vmf); 997ebd4a8ecSMatthew Auld if (ret) 998ebd4a8ecSMatthew Auld return ret; 999ebd4a8ecSMatthew Auld 100003ee5956SMatthew Auld if (obj->mm.madv != I915_MADV_WILLNEED) { 100103ee5956SMatthew Auld dma_resv_unlock(bo->base.resv); 100203ee5956SMatthew Auld return VM_FAULT_SIGBUS; 100303ee5956SMatthew Auld } 100403ee5956SMatthew Auld 1005503725c2SMatthew Auld if (!i915_ttm_resource_mappable(bo->resource)) { 1006503725c2SMatthew Auld int err = -ENODEV; 1007503725c2SMatthew Auld int i; 1008503725c2SMatthew Auld 1009503725c2SMatthew Auld for (i = 0; i < obj->mm.n_placements; i++) { 1010503725c2SMatthew Auld struct intel_memory_region *mr = obj->mm.placements[i]; 1011503725c2SMatthew Auld unsigned int flags; 1012503725c2SMatthew Auld 1013503725c2SMatthew Auld if (!mr->io_size && mr->type != INTEL_MEMORY_SYSTEM) 1014503725c2SMatthew Auld continue; 1015503725c2SMatthew Auld 1016503725c2SMatthew Auld flags = obj->flags; 1017503725c2SMatthew Auld flags &= ~I915_BO_ALLOC_GPU_ONLY; 1018503725c2SMatthew Auld err = __i915_ttm_migrate(obj, mr, flags); 1019503725c2SMatthew Auld if (!err) 1020503725c2SMatthew Auld break; 1021503725c2SMatthew Auld } 1022503725c2SMatthew Auld 1023503725c2SMatthew Auld if (err) { 1024503725c2SMatthew Auld drm_dbg(dev, "Unable to make resource CPU accessible\n"); 1025503725c2SMatthew Auld dma_resv_unlock(bo->base.resv); 1026503725c2SMatthew Auld return VM_FAULT_SIGBUS; 1027503725c2SMatthew Auld } 1028503725c2SMatthew Auld } 1029503725c2SMatthew Auld 1030ebd4a8ecSMatthew Auld if (drm_dev_enter(dev, &idx)) { 1031ebd4a8ecSMatthew Auld ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, 1032be373fadSMatthew Auld TTM_BO_VM_NUM_PREFAULT); 1033ebd4a8ecSMatthew Auld drm_dev_exit(idx); 1034ebd4a8ecSMatthew Auld } else { 1035ebd4a8ecSMatthew Auld ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); 1036ebd4a8ecSMatthew Auld } 1037ebd4a8ecSMatthew Auld if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) 1038ebd4a8ecSMatthew Auld return ret; 1039ebd4a8ecSMatthew Auld 1040ebd4a8ecSMatthew Auld i915_ttm_adjust_lru(obj); 1041ebd4a8ecSMatthew Auld 1042ebd4a8ecSMatthew Auld dma_resv_unlock(bo->base.resv); 1043ebd4a8ecSMatthew Auld return ret; 1044cf3e3e86SMaarten Lankhorst } 1045cf3e3e86SMaarten Lankhorst 1046cf3e3e86SMaarten Lankhorst static int 1047cf3e3e86SMaarten Lankhorst vm_access_ttm(struct vm_area_struct *area, unsigned long addr, 1048cf3e3e86SMaarten Lankhorst void *buf, int len, int write) 1049cf3e3e86SMaarten Lankhorst { 1050cf3e3e86SMaarten Lankhorst struct drm_i915_gem_object *obj = 1051cf3e3e86SMaarten Lankhorst i915_ttm_to_gem(area->vm_private_data); 1052cf3e3e86SMaarten Lankhorst 1053cf3e3e86SMaarten Lankhorst if (i915_gem_object_is_readonly(obj) && write) 1054cf3e3e86SMaarten Lankhorst return -EACCES; 1055cf3e3e86SMaarten Lankhorst 1056cf3e3e86SMaarten Lankhorst return ttm_bo_vm_access(area, addr, buf, len, write); 1057cf3e3e86SMaarten Lankhorst } 1058cf3e3e86SMaarten Lankhorst 1059cf3e3e86SMaarten Lankhorst static void ttm_vm_open(struct vm_area_struct *vma) 1060cf3e3e86SMaarten Lankhorst { 1061cf3e3e86SMaarten Lankhorst struct drm_i915_gem_object *obj = 1062cf3e3e86SMaarten Lankhorst i915_ttm_to_gem(vma->vm_private_data); 1063cf3e3e86SMaarten Lankhorst 1064cf3e3e86SMaarten Lankhorst GEM_BUG_ON(!obj); 1065cf3e3e86SMaarten Lankhorst i915_gem_object_get(obj); 1066cf3e3e86SMaarten Lankhorst } 1067cf3e3e86SMaarten Lankhorst 1068cf3e3e86SMaarten Lankhorst static void ttm_vm_close(struct vm_area_struct *vma) 1069cf3e3e86SMaarten Lankhorst { 1070cf3e3e86SMaarten Lankhorst struct drm_i915_gem_object *obj = 1071cf3e3e86SMaarten Lankhorst i915_ttm_to_gem(vma->vm_private_data); 1072cf3e3e86SMaarten Lankhorst 1073cf3e3e86SMaarten Lankhorst GEM_BUG_ON(!obj); 1074cf3e3e86SMaarten Lankhorst i915_gem_object_put(obj); 1075cf3e3e86SMaarten Lankhorst } 1076cf3e3e86SMaarten Lankhorst 1077cf3e3e86SMaarten Lankhorst static const struct vm_operations_struct vm_ops_ttm = { 1078cf3e3e86SMaarten Lankhorst .fault = vm_fault_ttm, 1079cf3e3e86SMaarten Lankhorst .access = vm_access_ttm, 1080cf3e3e86SMaarten Lankhorst .open = ttm_vm_open, 1081cf3e3e86SMaarten Lankhorst .close = ttm_vm_close, 1082cf3e3e86SMaarten Lankhorst }; 1083cf3e3e86SMaarten Lankhorst 1084cf3e3e86SMaarten Lankhorst static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj) 1085cf3e3e86SMaarten Lankhorst { 1086cf3e3e86SMaarten Lankhorst /* The ttm_bo must be allocated with I915_BO_ALLOC_USER */ 1087cf3e3e86SMaarten Lankhorst GEM_BUG_ON(!drm_mm_node_allocated(&obj->base.vma_node.vm_node)); 1088cf3e3e86SMaarten Lankhorst 1089cf3e3e86SMaarten Lankhorst return drm_vma_node_offset_addr(&obj->base.vma_node); 1090cf3e3e86SMaarten Lankhorst } 1091cf3e3e86SMaarten Lankhorst 10928ee262baSMatthew Auld static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj) 10938ee262baSMatthew Auld { 10948ee262baSMatthew Auld ttm_bo_unmap_virtual(i915_gem_to_ttm(obj)); 10958ee262baSMatthew Auld } 10968ee262baSMatthew Auld 10974bc2d574SMatthew Auld static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = { 1098213d5092SThomas Hellström .name = "i915_gem_object_ttm", 10995d12ffe6SMatthew Auld .flags = I915_GEM_OBJECT_IS_SHRINKABLE | 11005d12ffe6SMatthew Auld I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST, 1101213d5092SThomas Hellström 1102213d5092SThomas Hellström .get_pages = i915_ttm_get_pages, 1103213d5092SThomas Hellström .put_pages = i915_ttm_put_pages, 11046ef295e3SMatthew Auld .truncate = i915_ttm_truncate, 1105ffa3fe08SMatthew Auld .shrink = i915_ttm_shrink, 11067ae03459SMatthew Auld 1107213d5092SThomas Hellström .adjust_lru = i915_ttm_adjust_lru, 1108213d5092SThomas Hellström .delayed_free = i915_ttm_delayed_free, 1109b6e913e1SThomas Hellström .migrate = i915_ttm_migrate, 11107ae03459SMatthew Auld 1111cf3e3e86SMaarten Lankhorst .mmap_offset = i915_ttm_mmap_offset, 11128ee262baSMatthew Auld .unmap_virtual = i915_ttm_unmap_virtual, 1113cf3e3e86SMaarten Lankhorst .mmap_ops = &vm_ops_ttm, 1114213d5092SThomas Hellström }; 1115213d5092SThomas Hellström 1116213d5092SThomas Hellström void i915_ttm_bo_destroy(struct ttm_buffer_object *bo) 1117213d5092SThomas Hellström { 1118213d5092SThomas Hellström struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); 1119213d5092SThomas Hellström 1120213d5092SThomas Hellström i915_gem_object_release_memory_region(obj); 1121cf3e3e86SMaarten Lankhorst mutex_destroy(&obj->ttm.get_io_page.lock); 1122068396bbSThomas Hellström 1123068396bbSThomas Hellström if (obj->ttm.created) { 1124ebd4a8ecSMatthew Auld /* 1125ebd4a8ecSMatthew Auld * We freely manage the shrinker LRU outide of the mm.pages life 1126ebd4a8ecSMatthew Auld * cycle. As a result when destroying the object we should be 1127ebd4a8ecSMatthew Auld * extra paranoid and ensure we remove it from the LRU, before 1128ebd4a8ecSMatthew Auld * we free the object. 1129ebd4a8ecSMatthew Auld * 1130ebd4a8ecSMatthew Auld * Touching the ttm_shrinkable outside of the object lock here 1131ebd4a8ecSMatthew Auld * should be safe now that the last GEM object ref was dropped. 1132ebd4a8ecSMatthew Auld */ 1133ebd4a8ecSMatthew Auld if (obj->mm.ttm_shrinkable) 1134ebd4a8ecSMatthew Auld i915_gem_object_make_unshrinkable(obj); 1135ebd4a8ecSMatthew Auld 1136c56ce956SThomas Hellström i915_ttm_backup_free(obj); 1137c56ce956SThomas Hellström 113848b09612SMaarten Lankhorst /* This releases all gem object bindings to the backend. */ 113948b09612SMaarten Lankhorst __i915_gem_free_object(obj); 114048b09612SMaarten Lankhorst 1141213d5092SThomas Hellström call_rcu(&obj->rcu, __i915_gem_free_object_rcu); 1142068396bbSThomas Hellström } else { 1143068396bbSThomas Hellström __i915_gem_object_fini(obj); 1144068396bbSThomas Hellström } 1145213d5092SThomas Hellström } 1146213d5092SThomas Hellström 1147213d5092SThomas Hellström /** 1148213d5092SThomas Hellström * __i915_gem_ttm_object_init - Initialize a ttm-backed i915 gem object 1149213d5092SThomas Hellström * @mem: The initial memory region for the object. 1150213d5092SThomas Hellström * @obj: The gem object. 1151213d5092SThomas Hellström * @size: Object size in bytes. 1152213d5092SThomas Hellström * @flags: gem object flags. 1153213d5092SThomas Hellström * 1154213d5092SThomas Hellström * Return: 0 on success, negative error code on failure. 1155213d5092SThomas Hellström */ 1156213d5092SThomas Hellström int __i915_gem_ttm_object_init(struct intel_memory_region *mem, 1157213d5092SThomas Hellström struct drm_i915_gem_object *obj, 11589b78b5daSMatthew Auld resource_size_t offset, 1159213d5092SThomas Hellström resource_size_t size, 1160d22632c8SMatthew Auld resource_size_t page_size, 1161213d5092SThomas Hellström unsigned int flags) 1162213d5092SThomas Hellström { 1163213d5092SThomas Hellström static struct lock_class_key lock_class; 1164213d5092SThomas Hellström struct drm_i915_private *i915 = mem->i915; 11653c2b8f32SThomas Hellström struct ttm_operation_ctx ctx = { 11663c2b8f32SThomas Hellström .interruptible = true, 11673c2b8f32SThomas Hellström .no_wait_gpu = false, 11683c2b8f32SThomas Hellström }; 1169213d5092SThomas Hellström enum ttm_bo_type bo_type; 1170213d5092SThomas Hellström int ret; 1171213d5092SThomas Hellström 1172213d5092SThomas Hellström drm_gem_private_object_init(&i915->drm, &obj->base, size); 1173213d5092SThomas Hellström i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags); 1174068396bbSThomas Hellström 1175ecbf2060SMatthew Auld obj->bo_offset = offset; 1176ecbf2060SMatthew Auld 1177068396bbSThomas Hellström /* Don't put on a region list until we're either locked or fully initialized. */ 11788b1f7f92SThomas Hellström obj->mm.region = mem; 1179068396bbSThomas Hellström INIT_LIST_HEAD(&obj->mm.region_link); 1180068396bbSThomas Hellström 1181cf3e3e86SMaarten Lankhorst INIT_RADIX_TREE(&obj->ttm.get_io_page.radix, GFP_KERNEL | __GFP_NOWARN); 1182cf3e3e86SMaarten Lankhorst mutex_init(&obj->ttm.get_io_page.lock); 1183213d5092SThomas Hellström bo_type = (obj->flags & I915_BO_ALLOC_USER) ? ttm_bo_type_device : 1184213d5092SThomas Hellström ttm_bo_type_kernel; 1185213d5092SThomas Hellström 11863c2b8f32SThomas Hellström obj->base.vma_node.driver_private = i915_gem_to_ttm(obj); 11873c2b8f32SThomas Hellström 1188d22632c8SMatthew Auld /* Forcing the page size is kernel internal only */ 1189d22632c8SMatthew Auld GEM_BUG_ON(page_size && obj->mm.n_placements); 1190d22632c8SMatthew Auld 1191213d5092SThomas Hellström /* 1192ebd4a8ecSMatthew Auld * Keep an extra shrink pin to prevent the object from being made 1193ebd4a8ecSMatthew Auld * shrinkable too early. If the ttm_tt is ever allocated in shmem, we 1194ebd4a8ecSMatthew Auld * drop the pin. The TTM backend manages the shrinker LRU itself, 1195ebd4a8ecSMatthew Auld * outside of the normal mm.pages life cycle. 1196ebd4a8ecSMatthew Auld */ 1197ebd4a8ecSMatthew Auld i915_gem_object_make_unshrinkable(obj); 1198ebd4a8ecSMatthew Auld 1199ebd4a8ecSMatthew Auld /* 1200213d5092SThomas Hellström * If this function fails, it will call the destructor, but 1201213d5092SThomas Hellström * our caller still owns the object. So no freeing in the 1202213d5092SThomas Hellström * destructor until obj->ttm.created is true. 1203213d5092SThomas Hellström * Similarly, in delayed_destroy, we can't call ttm_bo_put() 1204213d5092SThomas Hellström * until successful initialization. 1205213d5092SThomas Hellström */ 1206347987a2SChristian König ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), bo_type, 1207347987a2SChristian König &i915_sys_placement, page_size >> PAGE_SHIFT, 12083c2b8f32SThomas Hellström &ctx, NULL, NULL, i915_ttm_bo_destroy); 12093c2b8f32SThomas Hellström if (ret) 1210b07a6483SThomas Hellström return i915_ttm_err_to_gem(ret); 1211213d5092SThomas Hellström 1212213d5092SThomas Hellström obj->ttm.created = true; 1213068396bbSThomas Hellström i915_gem_object_release_memory_region(obj); 1214068396bbSThomas Hellström i915_gem_object_init_memory_region(obj, mem); 12153c2b8f32SThomas Hellström i915_ttm_adjust_domains_after_move(obj); 12163c2b8f32SThomas Hellström i915_ttm_adjust_gem_after_move(obj); 12173c2b8f32SThomas Hellström i915_gem_object_unlock(obj); 1218213d5092SThomas Hellström 12193c2b8f32SThomas Hellström return 0; 1220213d5092SThomas Hellström } 122132b7cf51SThomas Hellström 122232b7cf51SThomas Hellström static const struct intel_memory_region_ops ttm_system_region_ops = { 122332b7cf51SThomas Hellström .init_object = __i915_gem_ttm_object_init, 12248b1f7f92SThomas Hellström .release = intel_region_ttm_fini, 122532b7cf51SThomas Hellström }; 122632b7cf51SThomas Hellström 122732b7cf51SThomas Hellström struct intel_memory_region * 122832b7cf51SThomas Hellström i915_gem_ttm_system_setup(struct drm_i915_private *i915, 122932b7cf51SThomas Hellström u16 type, u16 instance) 123032b7cf51SThomas Hellström { 123132b7cf51SThomas Hellström struct intel_memory_region *mr; 123232b7cf51SThomas Hellström 123332b7cf51SThomas Hellström mr = intel_memory_region_create(i915, 0, 123432b7cf51SThomas Hellström totalram_pages() << PAGE_SHIFT, 1235235582caSMatthew Auld PAGE_SIZE, 0, 0, 123632b7cf51SThomas Hellström type, instance, 123732b7cf51SThomas Hellström &ttm_system_region_ops); 123832b7cf51SThomas Hellström if (IS_ERR(mr)) 123932b7cf51SThomas Hellström return mr; 124032b7cf51SThomas Hellström 124132b7cf51SThomas Hellström intel_memory_region_set_name(mr, "system-ttm"); 124232b7cf51SThomas Hellström return mr; 1243213d5092SThomas Hellström } 1244