1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2014-2016 Intel Corporation 5 */ 6 7 #include <linux/highmem.h> 8 #include <linux/shmem_fs.h> 9 #include <linux/swap.h> 10 11 #include <drm/drm.h> /* for drm_legacy.h! */ 12 #include <drm/drm_cache.h> 13 #include <drm/drm_legacy.h> /* for drm_pci.h! */ 14 #include <drm/drm_pci.h> 15 16 #include "gt/intel_gt.h" 17 #include "i915_drv.h" 18 #include "i915_gem_object.h" 19 #include "i915_scatterlist.h" 20 21 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) 22 { 23 struct address_space *mapping = obj->base.filp->f_mapping; 24 struct drm_dma_handle *phys; 25 struct sg_table *st; 26 struct scatterlist *sg; 27 char *vaddr; 28 int i; 29 int err; 30 31 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj))) 32 return -EINVAL; 33 34 /* Always aligning to the object size, allows a single allocation 35 * to handle all possible callers, and given typical object sizes, 36 * the alignment of the buddy allocation will naturally match. 37 */ 38 phys = drm_pci_alloc(obj->base.dev, 39 roundup_pow_of_two(obj->base.size), 40 roundup_pow_of_two(obj->base.size)); 41 if (!phys) 42 return -ENOMEM; 43 44 vaddr = phys->vaddr; 45 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { 46 struct page *page; 47 char *src; 48 49 page = shmem_read_mapping_page(mapping, i); 50 if (IS_ERR(page)) { 51 err = PTR_ERR(page); 52 goto err_phys; 53 } 54 55 src = kmap_atomic(page); 56 memcpy(vaddr, src, PAGE_SIZE); 57 drm_clflush_virt_range(vaddr, PAGE_SIZE); 58 kunmap_atomic(src); 59 60 put_page(page); 61 vaddr += PAGE_SIZE; 62 } 63 64 intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt); 65 66 st = kmalloc(sizeof(*st), GFP_KERNEL); 67 if (!st) { 68 err = -ENOMEM; 69 goto err_phys; 70 } 71 72 if (sg_alloc_table(st, 1, GFP_KERNEL)) { 73 kfree(st); 74 err = -ENOMEM; 75 goto err_phys; 76 } 77 78 sg = st->sgl; 79 sg->offset = 0; 80 sg->length = obj->base.size; 81 82 sg_dma_address(sg) = phys->busaddr; 83 sg_dma_len(sg) = obj->base.size; 84 85 obj->phys_handle = phys; 86 87 __i915_gem_object_set_pages(obj, st, sg->length); 88 89 return 0; 90 91 err_phys: 92 drm_pci_free(obj->base.dev, phys); 93 94 return err; 95 } 96 97 static void 98 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, 99 struct sg_table *pages) 100 { 101 __i915_gem_object_release_shmem(obj, pages, false); 102 103 if (obj->mm.dirty) { 104 struct address_space *mapping = obj->base.filp->f_mapping; 105 char *vaddr = obj->phys_handle->vaddr; 106 int i; 107 108 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { 109 struct page *page; 110 char *dst; 111 112 page = shmem_read_mapping_page(mapping, i); 113 if (IS_ERR(page)) 114 continue; 115 116 dst = kmap_atomic(page); 117 drm_clflush_virt_range(vaddr, PAGE_SIZE); 118 memcpy(dst, vaddr, PAGE_SIZE); 119 kunmap_atomic(dst); 120 121 set_page_dirty(page); 122 if (obj->mm.madv == I915_MADV_WILLNEED) 123 mark_page_accessed(page); 124 put_page(page); 125 vaddr += PAGE_SIZE; 126 } 127 obj->mm.dirty = false; 128 } 129 130 sg_free_table(pages); 131 kfree(pages); 132 133 drm_pci_free(obj->base.dev, obj->phys_handle); 134 } 135 136 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = { 137 .get_pages = i915_gem_object_get_pages_phys, 138 .put_pages = i915_gem_object_put_pages_phys, 139 }; 140 141 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align) 142 { 143 struct sg_table *pages; 144 int err; 145 146 if (align > obj->base.size) 147 return -EINVAL; 148 149 if (obj->ops == &i915_gem_phys_ops) 150 return 0; 151 152 if (obj->ops != &i915_gem_shmem_ops) 153 return -EINVAL; 154 155 err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE); 156 if (err) 157 return err; 158 159 mutex_lock(&obj->mm.lock); 160 161 if (obj->mm.madv != I915_MADV_WILLNEED) { 162 err = -EFAULT; 163 goto err_unlock; 164 } 165 166 if (obj->mm.quirked) { 167 err = -EFAULT; 168 goto err_unlock; 169 } 170 171 if (obj->mm.mapping) { 172 err = -EBUSY; 173 goto err_unlock; 174 } 175 176 pages = __i915_gem_object_unset_pages(obj); 177 178 obj->ops = &i915_gem_phys_ops; 179 180 err = ____i915_gem_object_get_pages(obj); 181 if (err) 182 goto err_xfer; 183 184 /* Perma-pin (until release) the physical set of pages */ 185 __i915_gem_object_pin_pages(obj); 186 187 if (!IS_ERR_OR_NULL(pages)) 188 i915_gem_shmem_ops.put_pages(obj, pages); 189 mutex_unlock(&obj->mm.lock); 190 return 0; 191 192 err_xfer: 193 obj->ops = &i915_gem_shmem_ops; 194 if (!IS_ERR_OR_NULL(pages)) { 195 unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl); 196 197 __i915_gem_object_set_pages(obj, pages, sg_page_sizes); 198 } 199 err_unlock: 200 mutex_unlock(&obj->mm.lock); 201 return err; 202 } 203 204 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 205 #include "selftests/i915_gem_phys.c" 206 #endif 207