1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2014-2016 Intel Corporation
5  */
6 
7 #include <linux/scatterlist.h>
8 #include <linux/slab.h>
9 #include <linux/swiotlb.h>
10 
11 #include "i915_drv.h"
12 #include "i915_gem.h"
13 #include "i915_gem_internal.h"
14 #include "i915_gem_object.h"
15 #include "i915_scatterlist.h"
16 #include "i915_utils.h"
17 
18 #define QUIET (__GFP_NORETRY | __GFP_NOWARN)
19 #define MAYFAIL (__GFP_RETRY_MAYFAIL | __GFP_NOWARN)
20 
21 static void internal_free_pages(struct sg_table *st)
22 {
23 	struct scatterlist *sg;
24 
25 	for (sg = st->sgl; sg; sg = __sg_next(sg)) {
26 		if (sg_page(sg))
27 			__free_pages(sg_page(sg), get_order(sg->length));
28 	}
29 
30 	sg_free_table(st);
31 	kfree(st);
32 }
33 
34 static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
35 {
36 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
37 	struct sg_table *st;
38 	struct scatterlist *sg;
39 	unsigned int sg_page_sizes;
40 	unsigned int npages;
41 	int max_order;
42 	gfp_t gfp;
43 
44 	max_order = MAX_ORDER;
45 #ifdef CONFIG_SWIOTLB
46 	if (is_swiotlb_active(obj->base.dev->dev)) {
47 		unsigned int max_segment;
48 
49 		max_segment = swiotlb_max_segment();
50 		if (max_segment) {
51 			max_segment = max_t(unsigned int, max_segment,
52 					    PAGE_SIZE) >> PAGE_SHIFT;
53 			max_order = min(max_order, ilog2(max_segment));
54 		}
55 	}
56 #endif
57 
58 	gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE;
59 	if (IS_I965GM(i915) || IS_I965G(i915)) {
60 		/* 965gm cannot relocate objects above 4GiB. */
61 		gfp &= ~__GFP_HIGHMEM;
62 		gfp |= __GFP_DMA32;
63 	}
64 
65 create_st:
66 	st = kmalloc(sizeof(*st), GFP_KERNEL);
67 	if (!st)
68 		return -ENOMEM;
69 
70 	npages = obj->base.size / PAGE_SIZE;
71 	if (sg_alloc_table(st, npages, GFP_KERNEL)) {
72 		kfree(st);
73 		return -ENOMEM;
74 	}
75 
76 	sg = st->sgl;
77 	st->nents = 0;
78 	sg_page_sizes = 0;
79 
80 	do {
81 		int order = min(fls(npages) - 1, max_order);
82 		struct page *page;
83 
84 		do {
85 			page = alloc_pages(gfp | (order ? QUIET : MAYFAIL),
86 					   order);
87 			if (page)
88 				break;
89 			if (!order--)
90 				goto err;
91 
92 			/* Limit subsequent allocations as well */
93 			max_order = order;
94 		} while (1);
95 
96 		sg_set_page(sg, page, PAGE_SIZE << order, 0);
97 		sg_page_sizes |= PAGE_SIZE << order;
98 		st->nents++;
99 
100 		npages -= 1 << order;
101 		if (!npages) {
102 			sg_mark_end(sg);
103 			break;
104 		}
105 
106 		sg = __sg_next(sg);
107 	} while (1);
108 
109 	if (i915_gem_gtt_prepare_pages(obj, st)) {
110 		/* Failed to dma-map try again with single page sg segments */
111 		if (get_order(st->sgl->length)) {
112 			internal_free_pages(st);
113 			max_order = 0;
114 			goto create_st;
115 		}
116 		goto err;
117 	}
118 
119 	__i915_gem_object_set_pages(obj, st, sg_page_sizes);
120 
121 	return 0;
122 
123 err:
124 	sg_set_page(sg, NULL, 0, 0);
125 	sg_mark_end(sg);
126 	internal_free_pages(st);
127 
128 	return -ENOMEM;
129 }
130 
131 static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
132 					       struct sg_table *pages)
133 {
134 	i915_gem_gtt_finish_pages(obj, pages);
135 	internal_free_pages(pages);
136 
137 	obj->mm.dirty = false;
138 
139 	__start_cpu_write(obj);
140 }
141 
142 static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
143 	.name = "i915_gem_object_internal",
144 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
145 	.get_pages = i915_gem_object_get_pages_internal,
146 	.put_pages = i915_gem_object_put_pages_internal,
147 };
148 
149 struct drm_i915_gem_object *
150 __i915_gem_object_create_internal(struct drm_i915_private *i915,
151 				  const struct drm_i915_gem_object_ops *ops,
152 				  phys_addr_t size)
153 {
154 	static struct lock_class_key lock_class;
155 	struct drm_i915_gem_object *obj;
156 	unsigned int cache_level;
157 
158 	GEM_BUG_ON(!size);
159 	GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
160 
161 	if (overflows_type(size, obj->base.size))
162 		return ERR_PTR(-E2BIG);
163 
164 	obj = i915_gem_object_alloc();
165 	if (!obj)
166 		return ERR_PTR(-ENOMEM);
167 
168 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
169 	i915_gem_object_init(obj, ops, &lock_class, 0);
170 	obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
171 
172 	/*
173 	 * Mark the object as volatile, such that the pages are marked as
174 	 * dontneed whilst they are still pinned. As soon as they are unpinned
175 	 * they are allowed to be reaped by the shrinker, and the caller is
176 	 * expected to repopulate - the contents of this object are only valid
177 	 * whilst active and pinned.
178 	 */
179 	i915_gem_object_set_volatile(obj);
180 
181 	obj->read_domains = I915_GEM_DOMAIN_CPU;
182 	obj->write_domain = I915_GEM_DOMAIN_CPU;
183 
184 	cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
185 	i915_gem_object_set_cache_coherency(obj, cache_level);
186 
187 	return obj;
188 }
189 
190 /**
191  * i915_gem_object_create_internal: create an object with volatile pages
192  * @i915: the i915 device
193  * @size: the size in bytes of backing storage to allocate for the object
194  *
195  * Creates a new object that wraps some internal memory for private use.
196  * This object is not backed by swappable storage, and as such its contents
197  * are volatile and only valid whilst pinned. If the object is reaped by the
198  * shrinker, its pages and data will be discarded. Equally, it is not a full
199  * GEM object and so not valid for access from userspace. This makes it useful
200  * for hardware interfaces like ringbuffers (which are pinned from the time
201  * the request is written to the time the hardware stops accessing it), but
202  * not for contexts (which need to be preserved when not active for later
203  * reuse). Note that it is not cleared upon allocation.
204  */
205 struct drm_i915_gem_object *
206 i915_gem_object_create_internal(struct drm_i915_private *i915,
207 				phys_addr_t size)
208 {
209 	return __i915_gem_object_create_internal(i915, &i915_gem_object_internal_ops, size);
210 }
211