1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2008 Intel Corporation 5 */ 6 7 #include <linux/string.h> 8 #include <linux/bitops.h> 9 10 #include "i915_drv.h" 11 #include "i915_gem.h" 12 #include "i915_gem_ioctls.h" 13 #include "i915_gem_mman.h" 14 #include "i915_gem_object.h" 15 #include "i915_gem_tiling.h" 16 #include "i915_reg.h" 17 18 /** 19 * DOC: buffer object tiling 20 * 21 * i915_gem_set_tiling_ioctl() and i915_gem_get_tiling_ioctl() is the userspace 22 * interface to declare fence register requirements. 23 * 24 * In principle GEM doesn't care at all about the internal data layout of an 25 * object, and hence it also doesn't care about tiling or swizzling. There's two 26 * exceptions: 27 * 28 * - For X and Y tiling the hardware provides detilers for CPU access, so called 29 * fences. Since there's only a limited amount of them the kernel must manage 30 * these, and therefore userspace must tell the kernel the object tiling if it 31 * wants to use fences for detiling. 32 * - On gen3 and gen4 platforms have a swizzling pattern for tiled objects which 33 * depends upon the physical page frame number. When swapping such objects the 34 * page frame number might change and the kernel must be able to fix this up 35 * and hence now the tiling. Note that on a subset of platforms with 36 * asymmetric memory channel population the swizzling pattern changes in an 37 * unknown way, and for those the kernel simply forbids swapping completely. 38 * 39 * Since neither of this applies for new tiling layouts on modern platforms like 40 * W, Ys and Yf tiling GEM only allows object tiling to be set to X or Y tiled. 41 * Anything else can be handled in userspace entirely without the kernel's 42 * invovlement. 43 */ 44 45 /** 46 * i915_gem_fence_size - required global GTT size for a fence 47 * @i915: i915 device 48 * @size: object size 49 * @tiling: tiling mode 50 * @stride: tiling stride 51 * 52 * Return the required global GTT size for a fence (view of a tiled object), 53 * taking into account potential fence register mapping. 54 */ 55 u32 i915_gem_fence_size(struct drm_i915_private *i915, 56 u32 size, unsigned int tiling, unsigned int stride) 57 { 58 u32 ggtt_size; 59 60 GEM_BUG_ON(!size); 61 62 if (tiling == I915_TILING_NONE) 63 return size; 64 65 GEM_BUG_ON(!stride); 66 67 if (GRAPHICS_VER(i915) >= 4) { 68 stride *= i915_gem_tile_height(tiling); 69 GEM_BUG_ON(!IS_ALIGNED(stride, I965_FENCE_PAGE)); 70 return roundup(size, stride); 71 } 72 73 /* Previous chips need a power-of-two fence region when tiling */ 74 if (GRAPHICS_VER(i915) == 3) 75 ggtt_size = 1024*1024; 76 else 77 ggtt_size = 512*1024; 78 79 while (ggtt_size < size) 80 ggtt_size <<= 1; 81 82 return ggtt_size; 83 } 84 85 /** 86 * i915_gem_fence_alignment - required global GTT alignment for a fence 87 * @i915: i915 device 88 * @size: object size 89 * @tiling: tiling mode 90 * @stride: tiling stride 91 * 92 * Return the required global GTT alignment for a fence (a view of a tiled 93 * object), taking into account potential fence register mapping. 94 */ 95 u32 i915_gem_fence_alignment(struct drm_i915_private *i915, u32 size, 96 unsigned int tiling, unsigned int stride) 97 { 98 GEM_BUG_ON(!size); 99 100 /* 101 * Minimum alignment is 4k (GTT page size), but might be greater 102 * if a fence register is needed for the object. 103 */ 104 if (tiling == I915_TILING_NONE) 105 return I915_GTT_MIN_ALIGNMENT; 106 107 if (GRAPHICS_VER(i915) >= 4) 108 return I965_FENCE_PAGE; 109 110 /* 111 * Previous chips need to be aligned to the size of the smallest 112 * fence register that can contain the object. 113 */ 114 return i915_gem_fence_size(i915, size, tiling, stride); 115 } 116 117 /* Check pitch constriants for all chips & tiling formats */ 118 static bool 119 i915_tiling_ok(struct drm_i915_gem_object *obj, 120 unsigned int tiling, unsigned int stride) 121 { 122 struct drm_i915_private *i915 = to_i915(obj->base.dev); 123 unsigned int tile_width; 124 125 /* Linear is always fine */ 126 if (tiling == I915_TILING_NONE) 127 return true; 128 129 if (tiling > I915_TILING_LAST) 130 return false; 131 132 /* check maximum stride & object size */ 133 /* i965+ stores the end address of the gtt mapping in the fence 134 * reg, so dont bother to check the size */ 135 if (GRAPHICS_VER(i915) >= 7) { 136 if (stride / 128 > GEN7_FENCE_MAX_PITCH_VAL) 137 return false; 138 } else if (GRAPHICS_VER(i915) >= 4) { 139 if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) 140 return false; 141 } else { 142 if (stride > 8192) 143 return false; 144 145 if (!is_power_of_2(stride)) 146 return false; 147 } 148 149 if (GRAPHICS_VER(i915) == 2 || 150 (tiling == I915_TILING_Y && HAS_128_BYTE_Y_TILING(i915))) 151 tile_width = 128; 152 else 153 tile_width = 512; 154 155 if (!stride || !IS_ALIGNED(stride, tile_width)) 156 return false; 157 158 return true; 159 } 160 161 static bool i915_vma_fence_prepare(struct i915_vma *vma, 162 int tiling_mode, unsigned int stride) 163 { 164 struct drm_i915_private *i915 = vma->vm->i915; 165 u32 size, alignment; 166 167 if (!i915_vma_is_map_and_fenceable(vma)) 168 return true; 169 170 size = i915_gem_fence_size(i915, vma->size, tiling_mode, stride); 171 if (vma->node.size < size) 172 return false; 173 174 alignment = i915_gem_fence_alignment(i915, vma->size, tiling_mode, stride); 175 if (!IS_ALIGNED(vma->node.start, alignment)) 176 return false; 177 178 return true; 179 } 180 181 /* Make the current GTT allocation valid for the change in tiling. */ 182 static int 183 i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj, 184 int tiling_mode, unsigned int stride) 185 { 186 struct drm_i915_private *i915 = to_i915(obj->base.dev); 187 struct i915_ggtt *ggtt = to_gt(i915)->ggtt; 188 struct i915_vma *vma, *vn; 189 LIST_HEAD(unbind); 190 int ret = 0; 191 192 if (tiling_mode == I915_TILING_NONE) 193 return 0; 194 195 mutex_lock(&ggtt->vm.mutex); 196 197 spin_lock(&obj->vma.lock); 198 for_each_ggtt_vma(vma, obj) { 199 GEM_BUG_ON(vma->vm != &ggtt->vm); 200 201 if (i915_vma_fence_prepare(vma, tiling_mode, stride)) 202 continue; 203 204 list_move(&vma->vm_link, &unbind); 205 } 206 spin_unlock(&obj->vma.lock); 207 208 list_for_each_entry_safe(vma, vn, &unbind, vm_link) { 209 ret = __i915_vma_unbind(vma); 210 if (ret) { 211 /* Restore the remaining vma on an error */ 212 list_splice(&unbind, &ggtt->vm.bound_list); 213 break; 214 } 215 } 216 217 mutex_unlock(&ggtt->vm.mutex); 218 219 return ret; 220 } 221 222 int 223 i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, 224 unsigned int tiling, unsigned int stride) 225 { 226 struct drm_i915_private *i915 = to_i915(obj->base.dev); 227 struct i915_vma *vma; 228 int err; 229 230 /* Make sure we don't cross-contaminate obj->tiling_and_stride */ 231 BUILD_BUG_ON(I915_TILING_LAST & STRIDE_MASK); 232 233 GEM_BUG_ON(!i915_tiling_ok(obj, tiling, stride)); 234 GEM_BUG_ON(!stride ^ (tiling == I915_TILING_NONE)); 235 236 if ((tiling | stride) == obj->tiling_and_stride) 237 return 0; 238 239 if (i915_gem_object_is_framebuffer(obj)) 240 return -EBUSY; 241 242 /* We need to rebind the object if its current allocation 243 * no longer meets the alignment restrictions for its new 244 * tiling mode. Otherwise we can just leave it alone, but 245 * need to ensure that any fence register is updated before 246 * the next fenced (either through the GTT or by the BLT unit 247 * on older GPUs) access. 248 * 249 * After updating the tiling parameters, we then flag whether 250 * we need to update an associated fence register. Note this 251 * has to also include the unfenced register the GPU uses 252 * whilst executing a fenced command for an untiled object. 253 */ 254 255 i915_gem_object_lock(obj, NULL); 256 if (i915_gem_object_is_framebuffer(obj)) { 257 i915_gem_object_unlock(obj); 258 return -EBUSY; 259 } 260 261 err = i915_gem_object_fence_prepare(obj, tiling, stride); 262 if (err) { 263 i915_gem_object_unlock(obj); 264 return err; 265 } 266 267 /* If the memory has unknown (i.e. varying) swizzling, we pin the 268 * pages to prevent them being swapped out and causing corruption 269 * due to the change in swizzling. 270 */ 271 if (i915_gem_object_has_pages(obj) && 272 obj->mm.madv == I915_MADV_WILLNEED && 273 i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) { 274 if (tiling == I915_TILING_NONE) { 275 GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj)); 276 i915_gem_object_clear_tiling_quirk(obj); 277 i915_gem_object_make_shrinkable(obj); 278 } 279 if (!i915_gem_object_is_tiled(obj)) { 280 GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj)); 281 i915_gem_object_make_unshrinkable(obj); 282 i915_gem_object_set_tiling_quirk(obj); 283 } 284 } 285 286 spin_lock(&obj->vma.lock); 287 for_each_ggtt_vma(vma, obj) { 288 vma->fence_size = 289 i915_gem_fence_size(i915, vma->size, tiling, stride); 290 vma->fence_alignment = 291 i915_gem_fence_alignment(i915, 292 vma->size, tiling, stride); 293 294 if (vma->fence) 295 vma->fence->dirty = true; 296 } 297 spin_unlock(&obj->vma.lock); 298 299 obj->tiling_and_stride = tiling | stride; 300 i915_gem_object_unlock(obj); 301 302 /* Force the fence to be reacquired for GTT access */ 303 i915_gem_object_release_mmap_gtt(obj); 304 305 /* Try to preallocate memory required to save swizzling on put-pages */ 306 if (i915_gem_object_needs_bit17_swizzle(obj)) { 307 if (!obj->bit_17) { 308 obj->bit_17 = bitmap_zalloc(obj->base.size >> PAGE_SHIFT, 309 GFP_KERNEL); 310 } 311 } else { 312 bitmap_free(obj->bit_17); 313 obj->bit_17 = NULL; 314 } 315 316 return 0; 317 } 318 319 /** 320 * i915_gem_set_tiling_ioctl - IOCTL handler to set tiling mode 321 * @dev: DRM device 322 * @data: data pointer for the ioctl 323 * @file: DRM file for the ioctl call 324 * 325 * Sets the tiling mode of an object, returning the required swizzling of 326 * bit 6 of addresses in the object. 327 * 328 * Called by the user via ioctl. 329 * 330 * Returns: 331 * Zero on success, negative errno on failure. 332 */ 333 int 334 i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data, 335 struct drm_file *file) 336 { 337 struct drm_i915_private *dev_priv = to_i915(dev); 338 struct drm_i915_gem_set_tiling *args = data; 339 struct drm_i915_gem_object *obj; 340 int err; 341 342 if (!to_gt(dev_priv)->ggtt->num_fences) 343 return -EOPNOTSUPP; 344 345 obj = i915_gem_object_lookup(file, args->handle); 346 if (!obj) 347 return -ENOENT; 348 349 /* 350 * The tiling mode of proxy objects is handled by its generator, and 351 * not allowed to be changed by userspace. 352 */ 353 if (i915_gem_object_is_proxy(obj)) { 354 err = -ENXIO; 355 goto err; 356 } 357 358 if (!i915_tiling_ok(obj, args->tiling_mode, args->stride)) { 359 err = -EINVAL; 360 goto err; 361 } 362 363 if (args->tiling_mode == I915_TILING_NONE) { 364 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; 365 args->stride = 0; 366 } else { 367 if (args->tiling_mode == I915_TILING_X) 368 args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_x; 369 else 370 args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_y; 371 372 /* Hide bit 17 swizzling from the user. This prevents old Mesa 373 * from aborting the application on sw fallbacks to bit 17, 374 * and we use the pread/pwrite bit17 paths to swizzle for it. 375 * If there was a user that was relying on the swizzle 376 * information for drm_intel_bo_map()ed reads/writes this would 377 * break it, but we don't have any of those. 378 */ 379 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) 380 args->swizzle_mode = I915_BIT_6_SWIZZLE_9; 381 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) 382 args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; 383 384 /* If we can't handle the swizzling, make it untiled. */ 385 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { 386 args->tiling_mode = I915_TILING_NONE; 387 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; 388 args->stride = 0; 389 } 390 } 391 392 err = i915_gem_object_set_tiling(obj, args->tiling_mode, args->stride); 393 394 /* We have to maintain this existing ABI... */ 395 args->stride = i915_gem_object_get_stride(obj); 396 args->tiling_mode = i915_gem_object_get_tiling(obj); 397 398 err: 399 i915_gem_object_put(obj); 400 return err; 401 } 402 403 /** 404 * i915_gem_get_tiling_ioctl - IOCTL handler to get tiling mode 405 * @dev: DRM device 406 * @data: data pointer for the ioctl 407 * @file: DRM file for the ioctl call 408 * 409 * Returns the current tiling mode and required bit 6 swizzling for the object. 410 * 411 * Called by the user via ioctl. 412 * 413 * Returns: 414 * Zero on success, negative errno on failure. 415 */ 416 int 417 i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data, 418 struct drm_file *file) 419 { 420 struct drm_i915_gem_get_tiling *args = data; 421 struct drm_i915_private *dev_priv = to_i915(dev); 422 struct drm_i915_gem_object *obj; 423 int err = -ENOENT; 424 425 if (!to_gt(dev_priv)->ggtt->num_fences) 426 return -EOPNOTSUPP; 427 428 rcu_read_lock(); 429 obj = i915_gem_object_lookup_rcu(file, args->handle); 430 if (obj) { 431 args->tiling_mode = 432 READ_ONCE(obj->tiling_and_stride) & TILING_MASK; 433 err = 0; 434 } 435 rcu_read_unlock(); 436 if (unlikely(err)) 437 return err; 438 439 switch (args->tiling_mode) { 440 case I915_TILING_X: 441 args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_x; 442 break; 443 case I915_TILING_Y: 444 args->swizzle_mode = to_gt(dev_priv)->ggtt->bit_6_swizzle_y; 445 break; 446 default: 447 case I915_TILING_NONE: 448 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; 449 break; 450 } 451 452 /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */ 453 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) 454 args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN; 455 else 456 args->phys_swizzle_mode = args->swizzle_mode; 457 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) 458 args->swizzle_mode = I915_BIT_6_SWIZZLE_9; 459 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) 460 args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; 461 462 return 0; 463 } 464