1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2010 Daniel Vetter 4 * Copyright © 2020 Intel Corporation 5 */ 6 7 #include <linux/slab.h> /* fault-inject.h is not standalone! */ 8 9 #include <linux/fault-inject.h> 10 #include <linux/log2.h> 11 #include <linux/random.h> 12 #include <linux/seq_file.h> 13 #include <linux/stop_machine.h> 14 15 #include <asm/set_memory.h> 16 #include <asm/smp.h> 17 18 #include "display/intel_frontbuffer.h" 19 #include "gt/intel_gt.h" 20 #include "gt/intel_gt_requests.h" 21 22 #include "i915_drv.h" 23 #include "i915_scatterlist.h" 24 #include "i915_trace.h" 25 #include "i915_vgpu.h" 26 27 int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj, 28 struct sg_table *pages) 29 { 30 do { 31 if (dma_map_sg_attrs(&obj->base.dev->pdev->dev, 32 pages->sgl, pages->nents, 33 PCI_DMA_BIDIRECTIONAL, 34 DMA_ATTR_SKIP_CPU_SYNC | 35 DMA_ATTR_NO_KERNEL_MAPPING | 36 DMA_ATTR_NO_WARN)) 37 return 0; 38 39 /* 40 * If the DMA remap fails, one cause can be that we have 41 * too many objects pinned in a small remapping table, 42 * such as swiotlb. Incrementally purge all other objects and 43 * try again - if there are no more pages to remove from 44 * the DMA remapper, i915_gem_shrink will return 0. 45 */ 46 GEM_BUG_ON(obj->mm.pages == pages); 47 } while (i915_gem_shrink(to_i915(obj->base.dev), 48 obj->base.size >> PAGE_SHIFT, NULL, 49 I915_SHRINK_BOUND | 50 I915_SHRINK_UNBOUND)); 51 52 return -ENOSPC; 53 } 54 55 void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj, 56 struct sg_table *pages) 57 { 58 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 59 struct device *kdev = &dev_priv->drm.pdev->dev; 60 struct i915_ggtt *ggtt = &dev_priv->ggtt; 61 62 if (unlikely(ggtt->do_idle_maps)) { 63 /* XXX This does not prevent more requests being submitted! */ 64 if (intel_gt_retire_requests_timeout(ggtt->vm.gt, 65 -MAX_SCHEDULE_TIMEOUT)) { 66 drm_err(&dev_priv->drm, 67 "Failed to wait for idle; VT'd may hang.\n"); 68 /* Wait a bit, in hopes it avoids the hang */ 69 udelay(10); 70 } 71 } 72 73 dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL); 74 } 75 76 /** 77 * i915_gem_gtt_reserve - reserve a node in an address_space (GTT) 78 * @vm: the &struct i915_address_space 79 * @node: the &struct drm_mm_node (typically i915_vma.mode) 80 * @size: how much space to allocate inside the GTT, 81 * must be #I915_GTT_PAGE_SIZE aligned 82 * @offset: where to insert inside the GTT, 83 * must be #I915_GTT_MIN_ALIGNMENT aligned, and the node 84 * (@offset + @size) must fit within the address space 85 * @color: color to apply to node, if this node is not from a VMA, 86 * color must be #I915_COLOR_UNEVICTABLE 87 * @flags: control search and eviction behaviour 88 * 89 * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside 90 * the address space (using @size and @color). If the @node does not fit, it 91 * tries to evict any overlapping nodes from the GTT, including any 92 * neighbouring nodes if the colors do not match (to ensure guard pages between 93 * differing domains). See i915_gem_evict_for_node() for the gory details 94 * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on 95 * evicting active overlapping objects, and any overlapping node that is pinned 96 * or marked as unevictable will also result in failure. 97 * 98 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if 99 * asked to wait for eviction and interrupted. 100 */ 101 int i915_gem_gtt_reserve(struct i915_address_space *vm, 102 struct drm_mm_node *node, 103 u64 size, u64 offset, unsigned long color, 104 unsigned int flags) 105 { 106 int err; 107 108 GEM_BUG_ON(!size); 109 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); 110 GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT)); 111 GEM_BUG_ON(range_overflows(offset, size, vm->total)); 112 GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm); 113 GEM_BUG_ON(drm_mm_node_allocated(node)); 114 115 node->size = size; 116 node->start = offset; 117 node->color = color; 118 119 err = drm_mm_reserve_node(&vm->mm, node); 120 if (err != -ENOSPC) 121 return err; 122 123 if (flags & PIN_NOEVICT) 124 return -ENOSPC; 125 126 err = i915_gem_evict_for_node(vm, node, flags); 127 if (err == 0) 128 err = drm_mm_reserve_node(&vm->mm, node); 129 130 return err; 131 } 132 133 static u64 random_offset(u64 start, u64 end, u64 len, u64 align) 134 { 135 u64 range, addr; 136 137 GEM_BUG_ON(range_overflows(start, len, end)); 138 GEM_BUG_ON(round_up(start, align) > round_down(end - len, align)); 139 140 range = round_down(end - len, align) - round_up(start, align); 141 if (range) { 142 if (sizeof(unsigned long) == sizeof(u64)) { 143 addr = get_random_long(); 144 } else { 145 addr = get_random_int(); 146 if (range > U32_MAX) { 147 addr <<= 32; 148 addr |= get_random_int(); 149 } 150 } 151 div64_u64_rem(addr, range, &addr); 152 start += addr; 153 } 154 155 return round_up(start, align); 156 } 157 158 /** 159 * i915_gem_gtt_insert - insert a node into an address_space (GTT) 160 * @vm: the &struct i915_address_space 161 * @node: the &struct drm_mm_node (typically i915_vma.node) 162 * @size: how much space to allocate inside the GTT, 163 * must be #I915_GTT_PAGE_SIZE aligned 164 * @alignment: required alignment of starting offset, may be 0 but 165 * if specified, this must be a power-of-two and at least 166 * #I915_GTT_MIN_ALIGNMENT 167 * @color: color to apply to node 168 * @start: start of any range restriction inside GTT (0 for all), 169 * must be #I915_GTT_PAGE_SIZE aligned 170 * @end: end of any range restriction inside GTT (U64_MAX for all), 171 * must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX 172 * @flags: control search and eviction behaviour 173 * 174 * i915_gem_gtt_insert() first searches for an available hole into which 175 * is can insert the node. The hole address is aligned to @alignment and 176 * its @size must then fit entirely within the [@start, @end] bounds. The 177 * nodes on either side of the hole must match @color, or else a guard page 178 * will be inserted between the two nodes (or the node evicted). If no 179 * suitable hole is found, first a victim is randomly selected and tested 180 * for eviction, otherwise then the LRU list of objects within the GTT 181 * is scanned to find the first set of replacement nodes to create the hole. 182 * Those old overlapping nodes are evicted from the GTT (and so must be 183 * rebound before any future use). Any node that is currently pinned cannot 184 * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently 185 * active and #PIN_NONBLOCK is specified, that node is also skipped when 186 * searching for an eviction candidate. See i915_gem_evict_something() for 187 * the gory details on the eviction algorithm. 188 * 189 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if 190 * asked to wait for eviction and interrupted. 191 */ 192 int i915_gem_gtt_insert(struct i915_address_space *vm, 193 struct drm_mm_node *node, 194 u64 size, u64 alignment, unsigned long color, 195 u64 start, u64 end, unsigned int flags) 196 { 197 enum drm_mm_insert_mode mode; 198 u64 offset; 199 int err; 200 201 lockdep_assert_held(&vm->mutex); 202 203 GEM_BUG_ON(!size); 204 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); 205 GEM_BUG_ON(alignment && !is_power_of_2(alignment)); 206 GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT)); 207 GEM_BUG_ON(start >= end); 208 GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE)); 209 GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE)); 210 GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm); 211 GEM_BUG_ON(drm_mm_node_allocated(node)); 212 213 if (unlikely(range_overflows(start, size, end))) 214 return -ENOSPC; 215 216 if (unlikely(round_up(start, alignment) > round_down(end - size, alignment))) 217 return -ENOSPC; 218 219 mode = DRM_MM_INSERT_BEST; 220 if (flags & PIN_HIGH) 221 mode = DRM_MM_INSERT_HIGHEST; 222 if (flags & PIN_MAPPABLE) 223 mode = DRM_MM_INSERT_LOW; 224 225 /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks, 226 * so we know that we always have a minimum alignment of 4096. 227 * The drm_mm range manager is optimised to return results 228 * with zero alignment, so where possible use the optimal 229 * path. 230 */ 231 BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE); 232 if (alignment <= I915_GTT_MIN_ALIGNMENT) 233 alignment = 0; 234 235 err = drm_mm_insert_node_in_range(&vm->mm, node, 236 size, alignment, color, 237 start, end, mode); 238 if (err != -ENOSPC) 239 return err; 240 241 if (mode & DRM_MM_INSERT_ONCE) { 242 err = drm_mm_insert_node_in_range(&vm->mm, node, 243 size, alignment, color, 244 start, end, 245 DRM_MM_INSERT_BEST); 246 if (err != -ENOSPC) 247 return err; 248 } 249 250 if (flags & PIN_NOEVICT) 251 return -ENOSPC; 252 253 /* 254 * No free space, pick a slot at random. 255 * 256 * There is a pathological case here using a GTT shared between 257 * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt): 258 * 259 * |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->| 260 * (64k objects) (448k objects) 261 * 262 * Now imagine that the eviction LRU is ordered top-down (just because 263 * pathology meets real life), and that we need to evict an object to 264 * make room inside the aperture. The eviction scan then has to walk 265 * the 448k list before it finds one within range. And now imagine that 266 * it has to search for a new hole between every byte inside the memcpy, 267 * for several simultaneous clients. 268 * 269 * On a full-ppgtt system, if we have run out of available space, there 270 * will be lots and lots of objects in the eviction list! Again, 271 * searching that LRU list may be slow if we are also applying any 272 * range restrictions (e.g. restriction to low 4GiB) and so, for 273 * simplicity and similarilty between different GTT, try the single 274 * random replacement first. 275 */ 276 offset = random_offset(start, end, 277 size, alignment ?: I915_GTT_MIN_ALIGNMENT); 278 err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags); 279 if (err != -ENOSPC) 280 return err; 281 282 if (flags & PIN_NOSEARCH) 283 return -ENOSPC; 284 285 /* Randomly selected placement is pinned, do a search */ 286 err = i915_gem_evict_something(vm, size, alignment, color, 287 start, end, flags); 288 if (err) 289 return err; 290 291 return drm_mm_insert_node_in_range(&vm->mm, node, 292 size, alignment, color, 293 start, end, DRM_MM_INSERT_EVICT); 294 } 295 296 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 297 #include "selftests/i915_gem_gtt.c" 298 #endif 299