1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2014-2016 Intel Corporation 5 */ 6 7 #include <linux/highmem.h> 8 #include <linux/shmem_fs.h> 9 #include <linux/swap.h> 10 11 #include <drm/drm_cache.h> 12 13 #include "gt/intel_gt.h" 14 #include "i915_drv.h" 15 #include "i915_gem_object.h" 16 #include "i915_gem_region.h" 17 #include "i915_scatterlist.h" 18 19 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) 20 { 21 struct address_space *mapping = obj->base.filp->f_mapping; 22 struct drm_i915_private *i915 = to_i915(obj->base.dev); 23 struct scatterlist *sg; 24 struct sg_table *st; 25 dma_addr_t dma; 26 void *vaddr; 27 void *dst; 28 int i; 29 30 if (GEM_WARN_ON(i915_gem_object_needs_bit17_swizzle(obj))) 31 return -EINVAL; 32 33 /* 34 * Always aligning to the object size, allows a single allocation 35 * to handle all possible callers, and given typical object sizes, 36 * the alignment of the buddy allocation will naturally match. 37 */ 38 vaddr = dma_alloc_coherent(obj->base.dev->dev, 39 roundup_pow_of_two(obj->base.size), 40 &dma, GFP_KERNEL); 41 if (!vaddr) 42 return -ENOMEM; 43 44 st = kmalloc(sizeof(*st), GFP_KERNEL); 45 if (!st) 46 goto err_pci; 47 48 if (sg_alloc_table(st, 1, GFP_KERNEL)) 49 goto err_st; 50 51 sg = st->sgl; 52 sg->offset = 0; 53 sg->length = obj->base.size; 54 55 sg_assign_page(sg, (struct page *)vaddr); 56 sg_dma_address(sg) = dma; 57 sg_dma_len(sg) = obj->base.size; 58 59 dst = vaddr; 60 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { 61 struct page *page; 62 void *src; 63 64 page = shmem_read_mapping_page(mapping, i); 65 if (IS_ERR(page)) 66 goto err_st; 67 68 src = kmap_atomic(page); 69 memcpy(dst, src, PAGE_SIZE); 70 drm_clflush_virt_range(dst, PAGE_SIZE); 71 kunmap_atomic(src); 72 73 put_page(page); 74 dst += PAGE_SIZE; 75 } 76 77 intel_gt_chipset_flush(to_gt(i915)); 78 79 /* We're no longer struct page backed */ 80 obj->mem_flags &= ~I915_BO_FLAG_STRUCT_PAGE; 81 __i915_gem_object_set_pages(obj, st, sg->length); 82 83 return 0; 84 85 err_st: 86 kfree(st); 87 err_pci: 88 dma_free_coherent(obj->base.dev->dev, 89 roundup_pow_of_two(obj->base.size), 90 vaddr, dma); 91 return -ENOMEM; 92 } 93 94 void 95 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, 96 struct sg_table *pages) 97 { 98 dma_addr_t dma = sg_dma_address(pages->sgl); 99 void *vaddr = sg_page(pages->sgl); 100 101 __i915_gem_object_release_shmem(obj, pages, false); 102 103 if (obj->mm.dirty) { 104 struct address_space *mapping = obj->base.filp->f_mapping; 105 void *src = vaddr; 106 int i; 107 108 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { 109 struct page *page; 110 char *dst; 111 112 page = shmem_read_mapping_page(mapping, i); 113 if (IS_ERR(page)) 114 continue; 115 116 dst = kmap_atomic(page); 117 drm_clflush_virt_range(src, PAGE_SIZE); 118 memcpy(dst, src, PAGE_SIZE); 119 kunmap_atomic(dst); 120 121 set_page_dirty(page); 122 if (obj->mm.madv == I915_MADV_WILLNEED) 123 mark_page_accessed(page); 124 put_page(page); 125 126 src += PAGE_SIZE; 127 } 128 obj->mm.dirty = false; 129 } 130 131 sg_free_table(pages); 132 kfree(pages); 133 134 dma_free_coherent(obj->base.dev->dev, 135 roundup_pow_of_two(obj->base.size), 136 vaddr, dma); 137 } 138 139 int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj, 140 const struct drm_i915_gem_pwrite *args) 141 { 142 void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset; 143 char __user *user_data = u64_to_user_ptr(args->data_ptr); 144 struct drm_i915_private *i915 = to_i915(obj->base.dev); 145 int err; 146 147 err = i915_gem_object_wait(obj, 148 I915_WAIT_INTERRUPTIBLE | 149 I915_WAIT_ALL, 150 MAX_SCHEDULE_TIMEOUT); 151 if (err) 152 return err; 153 154 /* 155 * We manually control the domain here and pretend that it 156 * remains coherent i.e. in the GTT domain, like shmem_pwrite. 157 */ 158 i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU); 159 160 if (copy_from_user(vaddr, user_data, args->size)) 161 return -EFAULT; 162 163 drm_clflush_virt_range(vaddr, args->size); 164 intel_gt_chipset_flush(to_gt(i915)); 165 166 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU); 167 return 0; 168 } 169 170 int i915_gem_object_pread_phys(struct drm_i915_gem_object *obj, 171 const struct drm_i915_gem_pread *args) 172 { 173 void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset; 174 char __user *user_data = u64_to_user_ptr(args->data_ptr); 175 int err; 176 177 err = i915_gem_object_wait(obj, 178 I915_WAIT_INTERRUPTIBLE, 179 MAX_SCHEDULE_TIMEOUT); 180 if (err) 181 return err; 182 183 drm_clflush_virt_range(vaddr, args->size); 184 if (copy_to_user(user_data, vaddr, args->size)) 185 return -EFAULT; 186 187 return 0; 188 } 189 190 static int i915_gem_object_shmem_to_phys(struct drm_i915_gem_object *obj) 191 { 192 struct sg_table *pages; 193 int err; 194 195 pages = __i915_gem_object_unset_pages(obj); 196 197 err = i915_gem_object_get_pages_phys(obj); 198 if (err) 199 goto err_xfer; 200 201 /* Perma-pin (until release) the physical set of pages */ 202 __i915_gem_object_pin_pages(obj); 203 204 if (!IS_ERR_OR_NULL(pages)) 205 i915_gem_object_put_pages_shmem(obj, pages); 206 207 i915_gem_object_release_memory_region(obj); 208 return 0; 209 210 err_xfer: 211 if (!IS_ERR_OR_NULL(pages)) { 212 unsigned int sg_page_sizes = i915_sg_dma_sizes(pages->sgl); 213 214 __i915_gem_object_set_pages(obj, pages, sg_page_sizes); 215 } 216 return err; 217 } 218 219 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align) 220 { 221 int err; 222 223 assert_object_held(obj); 224 225 if (align > obj->base.size) 226 return -EINVAL; 227 228 if (!i915_gem_object_is_shmem(obj)) 229 return -EINVAL; 230 231 if (!i915_gem_object_has_struct_page(obj)) 232 return 0; 233 234 err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE); 235 if (err) 236 return err; 237 238 if (obj->mm.madv != I915_MADV_WILLNEED) 239 return -EFAULT; 240 241 if (i915_gem_object_has_tiling_quirk(obj)) 242 return -EFAULT; 243 244 if (obj->mm.mapping || i915_gem_object_has_pinned_pages(obj)) 245 return -EBUSY; 246 247 if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) { 248 drm_dbg(obj->base.dev, 249 "Attempting to obtain a purgeable object\n"); 250 return -EFAULT; 251 } 252 253 return i915_gem_object_shmem_to_phys(obj); 254 } 255 256 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 257 #include "selftests/i915_gem_phys.c" 258 #endif 259