1f033428dSChris Wilson /*
2f033428dSChris Wilson  * SPDX-License-Identifier: MIT
3f033428dSChris Wilson  *
4f033428dSChris Wilson  * Copyright © 2014-2016 Intel Corporation
5f033428dSChris Wilson  */
6f033428dSChris Wilson 
7f033428dSChris Wilson #include <linux/highmem.h>
8f033428dSChris Wilson #include <linux/shmem_fs.h>
9f033428dSChris Wilson #include <linux/swap.h>
10f033428dSChris Wilson 
11f033428dSChris Wilson #include <drm/drm_cache.h>
12f033428dSChris Wilson 
13baea429dSTvrtko Ursulin #include "gt/intel_gt.h"
14f033428dSChris Wilson #include "i915_drv.h"
15f033428dSChris Wilson #include "i915_gem_object.h"
16da1184cdSMatthew Auld #include "i915_gem_region.h"
170438fd1aSJani Nikula #include "i915_gem_tiling.h"
1837d63f8fSChris Wilson #include "i915_scatterlist.h"
19f033428dSChris Wilson 
i915_gem_object_get_pages_phys(struct drm_i915_gem_object * obj)20f033428dSChris Wilson static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
21f033428dSChris Wilson {
22f033428dSChris Wilson 	struct address_space *mapping = obj->base.filp->f_mapping;
231a9c4db4SMichał Winiarski 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
24f033428dSChris Wilson 	struct scatterlist *sg;
25c6790dc2SChris Wilson 	struct sg_table *st;
26c6790dc2SChris Wilson 	dma_addr_t dma;
27c6790dc2SChris Wilson 	void *vaddr;
28c6790dc2SChris Wilson 	void *dst;
29f033428dSChris Wilson 	int i;
30f033428dSChris Wilson 
31*c3bfba9aSChris Wilson 	/* Contiguous chunk, with a single scatterlist element */
32*c3bfba9aSChris Wilson 	if (overflows_type(obj->base.size, sg->length))
33*c3bfba9aSChris Wilson 		return -E2BIG;
34*c3bfba9aSChris Wilson 
35ea97c4caSChris Wilson 	if (GEM_WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
36f033428dSChris Wilson 		return -EINVAL;
37f033428dSChris Wilson 
38c6790dc2SChris Wilson 	/*
39c6790dc2SChris Wilson 	 * Always aligning to the object size, allows a single allocation
40f033428dSChris Wilson 	 * to handle all possible callers, and given typical object sizes,
41f033428dSChris Wilson 	 * the alignment of the buddy allocation will naturally match.
42f033428dSChris Wilson 	 */
438ff5446aSThomas Zimmermann 	vaddr = dma_alloc_coherent(obj->base.dev->dev,
44f033428dSChris Wilson 				   roundup_pow_of_two(obj->base.size),
45c6790dc2SChris Wilson 				   &dma, GFP_KERNEL);
46c6790dc2SChris Wilson 	if (!vaddr)
47f033428dSChris Wilson 		return -ENOMEM;
48f033428dSChris Wilson 
49f033428dSChris Wilson 	st = kmalloc(sizeof(*st), GFP_KERNEL);
50c6790dc2SChris Wilson 	if (!st)
51c6790dc2SChris Wilson 		goto err_pci;
52f033428dSChris Wilson 
53c6790dc2SChris Wilson 	if (sg_alloc_table(st, 1, GFP_KERNEL))
54c6790dc2SChris Wilson 		goto err_st;
55f033428dSChris Wilson 
56f033428dSChris Wilson 	sg = st->sgl;
57f033428dSChris Wilson 	sg->offset = 0;
58f033428dSChris Wilson 	sg->length = obj->base.size;
59f033428dSChris Wilson 
60c6790dc2SChris Wilson 	sg_assign_page(sg, (struct page *)vaddr);
61c6790dc2SChris Wilson 	sg_dma_address(sg) = dma;
62f033428dSChris Wilson 	sg_dma_len(sg) = obj->base.size;
63f033428dSChris Wilson 
64c6790dc2SChris Wilson 	dst = vaddr;
65c6790dc2SChris Wilson 	for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
66c6790dc2SChris Wilson 		struct page *page;
67c6790dc2SChris Wilson 		void *src;
68c6790dc2SChris Wilson 
69c6790dc2SChris Wilson 		page = shmem_read_mapping_page(mapping, i);
70c6790dc2SChris Wilson 		if (IS_ERR(page))
71c6790dc2SChris Wilson 			goto err_st;
72c6790dc2SChris Wilson 
73c6790dc2SChris Wilson 		src = kmap_atomic(page);
74c6790dc2SChris Wilson 		memcpy(dst, src, PAGE_SIZE);
75c6790dc2SChris Wilson 		drm_clflush_virt_range(dst, PAGE_SIZE);
76c6790dc2SChris Wilson 		kunmap_atomic(src);
77c6790dc2SChris Wilson 
78c6790dc2SChris Wilson 		put_page(page);
79c6790dc2SChris Wilson 		dst += PAGE_SIZE;
80c6790dc2SChris Wilson 	}
81c6790dc2SChris Wilson 
821a9c4db4SMichał Winiarski 	intel_gt_chipset_flush(to_gt(i915));
83f033428dSChris Wilson 
84a6117097SMaarten Lankhorst 	/* We're no longer struct page backed */
850ff37575SThomas Hellström 	obj->mem_flags &= ~I915_BO_FLAG_STRUCT_PAGE;
868c949515SMatthew Auld 	__i915_gem_object_set_pages(obj, st);
87f033428dSChris Wilson 
88f033428dSChris Wilson 	return 0;
89f033428dSChris Wilson 
90c6790dc2SChris Wilson err_st:
91c6790dc2SChris Wilson 	kfree(st);
92c6790dc2SChris Wilson err_pci:
938ff5446aSThomas Zimmermann 	dma_free_coherent(obj->base.dev->dev,
94c6790dc2SChris Wilson 			  roundup_pow_of_two(obj->base.size),
95c6790dc2SChris Wilson 			  vaddr, dma);
96c6790dc2SChris Wilson 	return -ENOMEM;
97f033428dSChris Wilson }
98f033428dSChris Wilson 
99a6117097SMaarten Lankhorst void
i915_gem_object_put_pages_phys(struct drm_i915_gem_object * obj,struct sg_table * pages)100f033428dSChris Wilson i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
101f033428dSChris Wilson 			       struct sg_table *pages)
102f033428dSChris Wilson {
103c6790dc2SChris Wilson 	dma_addr_t dma = sg_dma_address(pages->sgl);
104c6790dc2SChris Wilson 	void *vaddr = sg_page(pages->sgl);
105c6790dc2SChris Wilson 
106f033428dSChris Wilson 	__i915_gem_object_release_shmem(obj, pages, false);
107f033428dSChris Wilson 
108f033428dSChris Wilson 	if (obj->mm.dirty) {
109f033428dSChris Wilson 		struct address_space *mapping = obj->base.filp->f_mapping;
110c6790dc2SChris Wilson 		void *src = vaddr;
111f033428dSChris Wilson 		int i;
112f033428dSChris Wilson 
113f033428dSChris Wilson 		for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
114f033428dSChris Wilson 			struct page *page;
115f033428dSChris Wilson 			char *dst;
116f033428dSChris Wilson 
117f033428dSChris Wilson 			page = shmem_read_mapping_page(mapping, i);
118f033428dSChris Wilson 			if (IS_ERR(page))
119f033428dSChris Wilson 				continue;
120f033428dSChris Wilson 
121f033428dSChris Wilson 			dst = kmap_atomic(page);
122c6790dc2SChris Wilson 			drm_clflush_virt_range(src, PAGE_SIZE);
123c6790dc2SChris Wilson 			memcpy(dst, src, PAGE_SIZE);
124f033428dSChris Wilson 			kunmap_atomic(dst);
125f033428dSChris Wilson 
126f033428dSChris Wilson 			set_page_dirty(page);
127f033428dSChris Wilson 			if (obj->mm.madv == I915_MADV_WILLNEED)
128f033428dSChris Wilson 				mark_page_accessed(page);
129f033428dSChris Wilson 			put_page(page);
130c6790dc2SChris Wilson 
131c6790dc2SChris Wilson 			src += PAGE_SIZE;
132f033428dSChris Wilson 		}
133f033428dSChris Wilson 		obj->mm.dirty = false;
134f033428dSChris Wilson 	}
135f033428dSChris Wilson 
136f033428dSChris Wilson 	sg_free_table(pages);
137f033428dSChris Wilson 	kfree(pages);
138f033428dSChris Wilson 
1398ff5446aSThomas Zimmermann 	dma_free_coherent(obj->base.dev->dev,
140c6790dc2SChris Wilson 			  roundup_pow_of_two(obj->base.size),
141c6790dc2SChris Wilson 			  vaddr, dma);
142f033428dSChris Wilson }
143f033428dSChris Wilson 
i915_gem_object_pwrite_phys(struct drm_i915_gem_object * obj,const struct drm_i915_gem_pwrite * args)144a6117097SMaarten Lankhorst int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
1450eb0feb9SChris Wilson 				const struct drm_i915_gem_pwrite *args)
1460eb0feb9SChris Wilson {
1470eb0feb9SChris Wilson 	void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
1480eb0feb9SChris Wilson 	char __user *user_data = u64_to_user_ptr(args->data_ptr);
1491a9c4db4SMichał Winiarski 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
1500eb0feb9SChris Wilson 	int err;
1510eb0feb9SChris Wilson 
1520eb0feb9SChris Wilson 	err = i915_gem_object_wait(obj,
1530eb0feb9SChris Wilson 				   I915_WAIT_INTERRUPTIBLE |
1540eb0feb9SChris Wilson 				   I915_WAIT_ALL,
1550eb0feb9SChris Wilson 				   MAX_SCHEDULE_TIMEOUT);
1560eb0feb9SChris Wilson 	if (err)
1570eb0feb9SChris Wilson 		return err;
1580eb0feb9SChris Wilson 
1590eb0feb9SChris Wilson 	/*
1600eb0feb9SChris Wilson 	 * We manually control the domain here and pretend that it
1610eb0feb9SChris Wilson 	 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
1620eb0feb9SChris Wilson 	 */
1630eb0feb9SChris Wilson 	i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
1640eb0feb9SChris Wilson 
1650eb0feb9SChris Wilson 	if (copy_from_user(vaddr, user_data, args->size))
1660eb0feb9SChris Wilson 		return -EFAULT;
1670eb0feb9SChris Wilson 
1680eb0feb9SChris Wilson 	drm_clflush_virt_range(vaddr, args->size);
1691a9c4db4SMichał Winiarski 	intel_gt_chipset_flush(to_gt(i915));
1700eb0feb9SChris Wilson 
1710eb0feb9SChris Wilson 	i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
1720eb0feb9SChris Wilson 	return 0;
1730eb0feb9SChris Wilson }
1740eb0feb9SChris Wilson 
i915_gem_object_pread_phys(struct drm_i915_gem_object * obj,const struct drm_i915_gem_pread * args)175a6117097SMaarten Lankhorst int i915_gem_object_pread_phys(struct drm_i915_gem_object *obj,
1760eb0feb9SChris Wilson 			       const struct drm_i915_gem_pread *args)
1770eb0feb9SChris Wilson {
1780eb0feb9SChris Wilson 	void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
1790eb0feb9SChris Wilson 	char __user *user_data = u64_to_user_ptr(args->data_ptr);
1800eb0feb9SChris Wilson 	int err;
1810eb0feb9SChris Wilson 
1820eb0feb9SChris Wilson 	err = i915_gem_object_wait(obj,
1830eb0feb9SChris Wilson 				   I915_WAIT_INTERRUPTIBLE,
1840eb0feb9SChris Wilson 				   MAX_SCHEDULE_TIMEOUT);
1850eb0feb9SChris Wilson 	if (err)
1860eb0feb9SChris Wilson 		return err;
1870eb0feb9SChris Wilson 
1880eb0feb9SChris Wilson 	drm_clflush_virt_range(vaddr, args->size);
1890eb0feb9SChris Wilson 	if (copy_to_user(user_data, vaddr, args->size))
1900eb0feb9SChris Wilson 		return -EFAULT;
1910eb0feb9SChris Wilson 
1920eb0feb9SChris Wilson 	return 0;
1930eb0feb9SChris Wilson }
1940eb0feb9SChris Wilson 
i915_gem_object_shmem_to_phys(struct drm_i915_gem_object * obj)195a6117097SMaarten Lankhorst static int i915_gem_object_shmem_to_phys(struct drm_i915_gem_object *obj)
196f033428dSChris Wilson {
197f033428dSChris Wilson 	struct sg_table *pages;
198f033428dSChris Wilson 	int err;
199f033428dSChris Wilson 
200f033428dSChris Wilson 	pages = __i915_gem_object_unset_pages(obj);
201f033428dSChris Wilson 
202a6117097SMaarten Lankhorst 	err = i915_gem_object_get_pages_phys(obj);
203f033428dSChris Wilson 	if (err)
204f033428dSChris Wilson 		goto err_xfer;
205f033428dSChris Wilson 
206f033428dSChris Wilson 	/* Perma-pin (until release) the physical set of pages */
207f033428dSChris Wilson 	__i915_gem_object_pin_pages(obj);
208f033428dSChris Wilson 
20964dc802aSAbdiel Janulgue 	if (!IS_ERR_OR_NULL(pages))
210a85fffe3SMaarten Lankhorst 		i915_gem_object_put_pages_shmem(obj, pages);
21164dc802aSAbdiel Janulgue 
212da1184cdSMatthew Auld 	i915_gem_object_release_memory_region(obj);
213f033428dSChris Wilson 	return 0;
214f033428dSChris Wilson 
215f033428dSChris Wilson err_xfer:
2168c949515SMatthew Auld 	if (!IS_ERR_OR_NULL(pages))
2178c949515SMatthew Auld 		__i915_gem_object_set_pages(obj, pages);
218a6117097SMaarten Lankhorst 	return err;
219a6117097SMaarten Lankhorst }
220a6117097SMaarten Lankhorst 
i915_gem_object_attach_phys(struct drm_i915_gem_object * obj,int align)221a6117097SMaarten Lankhorst int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
222a6117097SMaarten Lankhorst {
223a6117097SMaarten Lankhorst 	int err;
224a6117097SMaarten Lankhorst 
2251b321026SMaarten Lankhorst 	assert_object_held(obj);
2261b321026SMaarten Lankhorst 
227a6117097SMaarten Lankhorst 	if (align > obj->base.size)
228a6117097SMaarten Lankhorst 		return -EINVAL;
229a6117097SMaarten Lankhorst 
230a6117097SMaarten Lankhorst 	if (!i915_gem_object_is_shmem(obj))
231a6117097SMaarten Lankhorst 		return -EINVAL;
232a6117097SMaarten Lankhorst 
233a6117097SMaarten Lankhorst 	if (!i915_gem_object_has_struct_page(obj))
234a6117097SMaarten Lankhorst 		return 0;
235a6117097SMaarten Lankhorst 
236a6117097SMaarten Lankhorst 	err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
237a6117097SMaarten Lankhorst 	if (err)
238a6117097SMaarten Lankhorst 		return err;
239a6117097SMaarten Lankhorst 
240cf41a8f1SMaarten Lankhorst 	if (obj->mm.madv != I915_MADV_WILLNEED)
241cf41a8f1SMaarten Lankhorst 		return -EFAULT;
242a6117097SMaarten Lankhorst 
243cf41a8f1SMaarten Lankhorst 	if (i915_gem_object_has_tiling_quirk(obj))
244cf41a8f1SMaarten Lankhorst 		return -EFAULT;
245a6117097SMaarten Lankhorst 
246cf41a8f1SMaarten Lankhorst 	if (obj->mm.mapping || i915_gem_object_has_pinned_pages(obj))
247cf41a8f1SMaarten Lankhorst 		return -EBUSY;
248a6117097SMaarten Lankhorst 
249a6117097SMaarten Lankhorst 	if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
250a6117097SMaarten Lankhorst 		drm_dbg(obj->base.dev,
251a6117097SMaarten Lankhorst 			"Attempting to obtain a purgeable object\n");
252cf41a8f1SMaarten Lankhorst 		return -EFAULT;
253a6117097SMaarten Lankhorst 	}
254a6117097SMaarten Lankhorst 
255cf41a8f1SMaarten Lankhorst 	return i915_gem_object_shmem_to_phys(obj);
256f033428dSChris Wilson }
257f033428dSChris Wilson 
258f033428dSChris Wilson #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
259f033428dSChris Wilson #include "selftests/i915_gem_phys.c"
260f033428dSChris Wilson #endif
261