1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2014-2016 Intel Corporation
5  */
6 
7 #include <linux/scatterlist.h>
8 #include <linux/slab.h>
9 
10 #include "i915_drv.h"
11 #include "i915_gem.h"
12 #include "i915_gem_internal.h"
13 #include "i915_gem_object.h"
14 #include "i915_scatterlist.h"
15 #include "i915_utils.h"
16 
17 #define QUIET (__GFP_NORETRY | __GFP_NOWARN)
18 #define MAYFAIL (__GFP_RETRY_MAYFAIL | __GFP_NOWARN)
19 
20 static void internal_free_pages(struct sg_table *st)
21 {
22 	struct scatterlist *sg;
23 
24 	for (sg = st->sgl; sg; sg = __sg_next(sg)) {
25 		if (sg_page(sg))
26 			__free_pages(sg_page(sg), get_order(sg->length));
27 	}
28 
29 	sg_free_table(st);
30 	kfree(st);
31 }
32 
33 static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
34 {
35 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
36 	struct sg_table *st;
37 	struct scatterlist *sg;
38 	unsigned int npages;
39 	int max_order = MAX_ORDER;
40 	unsigned int max_segment;
41 	gfp_t gfp;
42 
43 	max_segment = i915_sg_segment_size(i915->drm.dev) >> PAGE_SHIFT;
44 	max_order = min(max_order, get_order(max_segment));
45 
46 	gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE;
47 	if (IS_I965GM(i915) || IS_I965G(i915)) {
48 		/* 965gm cannot relocate objects above 4GiB. */
49 		gfp &= ~__GFP_HIGHMEM;
50 		gfp |= __GFP_DMA32;
51 	}
52 
53 create_st:
54 	st = kmalloc(sizeof(*st), GFP_KERNEL);
55 	if (!st)
56 		return -ENOMEM;
57 
58 	npages = obj->base.size / PAGE_SIZE;
59 	if (sg_alloc_table(st, npages, GFP_KERNEL)) {
60 		kfree(st);
61 		return -ENOMEM;
62 	}
63 
64 	sg = st->sgl;
65 	st->nents = 0;
66 
67 	do {
68 		int order = min(fls(npages) - 1, max_order);
69 		struct page *page;
70 
71 		do {
72 			page = alloc_pages(gfp | (order ? QUIET : MAYFAIL),
73 					   order);
74 			if (page)
75 				break;
76 			if (!order--)
77 				goto err;
78 
79 			/* Limit subsequent allocations as well */
80 			max_order = order;
81 		} while (1);
82 
83 		sg_set_page(sg, page, PAGE_SIZE << order, 0);
84 		st->nents++;
85 
86 		npages -= 1 << order;
87 		if (!npages) {
88 			sg_mark_end(sg);
89 			break;
90 		}
91 
92 		sg = __sg_next(sg);
93 	} while (1);
94 
95 	if (i915_gem_gtt_prepare_pages(obj, st)) {
96 		/* Failed to dma-map try again with single page sg segments */
97 		if (get_order(st->sgl->length)) {
98 			internal_free_pages(st);
99 			max_order = 0;
100 			goto create_st;
101 		}
102 		goto err;
103 	}
104 
105 	__i915_gem_object_set_pages(obj, st);
106 
107 	return 0;
108 
109 err:
110 	sg_set_page(sg, NULL, 0, 0);
111 	sg_mark_end(sg);
112 	internal_free_pages(st);
113 
114 	return -ENOMEM;
115 }
116 
117 static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
118 					       struct sg_table *pages)
119 {
120 	i915_gem_gtt_finish_pages(obj, pages);
121 	internal_free_pages(pages);
122 
123 	obj->mm.dirty = false;
124 
125 	__start_cpu_write(obj);
126 }
127 
128 static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
129 	.name = "i915_gem_object_internal",
130 	.flags = I915_GEM_OBJECT_IS_SHRINKABLE,
131 	.get_pages = i915_gem_object_get_pages_internal,
132 	.put_pages = i915_gem_object_put_pages_internal,
133 };
134 
135 struct drm_i915_gem_object *
136 __i915_gem_object_create_internal(struct drm_i915_private *i915,
137 				  const struct drm_i915_gem_object_ops *ops,
138 				  phys_addr_t size)
139 {
140 	static struct lock_class_key lock_class;
141 	struct drm_i915_gem_object *obj;
142 	unsigned int cache_level;
143 
144 	GEM_BUG_ON(!size);
145 	GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
146 
147 	if (overflows_type(size, obj->base.size))
148 		return ERR_PTR(-E2BIG);
149 
150 	obj = i915_gem_object_alloc();
151 	if (!obj)
152 		return ERR_PTR(-ENOMEM);
153 
154 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
155 	i915_gem_object_init(obj, ops, &lock_class, 0);
156 	obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
157 
158 	/*
159 	 * Mark the object as volatile, such that the pages are marked as
160 	 * dontneed whilst they are still pinned. As soon as they are unpinned
161 	 * they are allowed to be reaped by the shrinker, and the caller is
162 	 * expected to repopulate - the contents of this object are only valid
163 	 * whilst active and pinned.
164 	 */
165 	i915_gem_object_set_volatile(obj);
166 
167 	obj->read_domains = I915_GEM_DOMAIN_CPU;
168 	obj->write_domain = I915_GEM_DOMAIN_CPU;
169 
170 	cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
171 	i915_gem_object_set_cache_coherency(obj, cache_level);
172 
173 	return obj;
174 }
175 
176 /**
177  * i915_gem_object_create_internal: create an object with volatile pages
178  * @i915: the i915 device
179  * @size: the size in bytes of backing storage to allocate for the object
180  *
181  * Creates a new object that wraps some internal memory for private use.
182  * This object is not backed by swappable storage, and as such its contents
183  * are volatile and only valid whilst pinned. If the object is reaped by the
184  * shrinker, its pages and data will be discarded. Equally, it is not a full
185  * GEM object and so not valid for access from userspace. This makes it useful
186  * for hardware interfaces like ringbuffers (which are pinned from the time
187  * the request is written to the time the hardware stops accessing it), but
188  * not for contexts (which need to be preserved when not active for later
189  * reuse). Note that it is not cleared upon allocation.
190  */
191 struct drm_i915_gem_object *
192 i915_gem_object_create_internal(struct drm_i915_private *i915,
193 				phys_addr_t size)
194 {
195 	return __i915_gem_object_create_internal(i915, &i915_gem_object_internal_ops, size);
196 }
197