1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2014-2018 Intel Corporation
4  */
5 
6 #include "gem/i915_gem_object.h"
7 
8 #include "i915_drv.h"
9 #include "intel_engine_pm.h"
10 #include "intel_gt_buffer_pool.h"
11 
12 static struct intel_gt *to_gt(struct intel_gt_buffer_pool *pool)
13 {
14 	return container_of(pool, struct intel_gt, buffer_pool);
15 }
16 
17 static struct list_head *
18 bucket_for_size(struct intel_gt_buffer_pool *pool, size_t sz)
19 {
20 	int n;
21 
22 	/*
23 	 * Compute a power-of-two bucket, but throw everything greater than
24 	 * 16KiB into the same bucket: i.e. the buckets hold objects of
25 	 * (1 page, 2 pages, 4 pages, 8+ pages).
26 	 */
27 	n = fls(sz >> PAGE_SHIFT) - 1;
28 	if (n >= ARRAY_SIZE(pool->cache_list))
29 		n = ARRAY_SIZE(pool->cache_list) - 1;
30 
31 	return &pool->cache_list[n];
32 }
33 
34 static void node_free(struct intel_gt_buffer_pool_node *node)
35 {
36 	i915_gem_object_put(node->obj);
37 	i915_active_fini(&node->active);
38 	kfree_rcu(node, rcu);
39 }
40 
41 static bool pool_free_older_than(struct intel_gt_buffer_pool *pool, long keep)
42 {
43 	struct intel_gt_buffer_pool_node *node, *stale = NULL;
44 	bool active = false;
45 	int n;
46 
47 	/* Free buffers that have not been used in the past second */
48 	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
49 		struct list_head *list = &pool->cache_list[n];
50 
51 		if (list_empty(list))
52 			continue;
53 
54 		if (spin_trylock_irq(&pool->lock)) {
55 			struct list_head *pos;
56 
57 			/* Most recent at head; oldest at tail */
58 			list_for_each_prev(pos, list) {
59 				unsigned long age;
60 
61 				node = list_entry(pos, typeof(*node), link);
62 
63 				age = READ_ONCE(node->age);
64 				if (!age || jiffies - age < keep)
65 					break;
66 
67 				/* Check we are the first to claim this node */
68 				if (!xchg(&node->age, 0))
69 					break;
70 
71 				node->free = stale;
72 				stale = node;
73 			}
74 			if (!list_is_last(pos, list))
75 				__list_del_many(pos, list);
76 
77 			spin_unlock_irq(&pool->lock);
78 		}
79 
80 		active |= !list_empty(list);
81 	}
82 
83 	while ((node = stale)) {
84 		stale = stale->free;
85 		node_free(node);
86 	}
87 
88 	return active;
89 }
90 
91 static void pool_free_work(struct work_struct *wrk)
92 {
93 	struct intel_gt_buffer_pool *pool =
94 		container_of(wrk, typeof(*pool), work.work);
95 
96 	if (pool_free_older_than(pool, HZ))
97 		schedule_delayed_work(&pool->work,
98 				      round_jiffies_up_relative(HZ));
99 }
100 
101 static int pool_active(struct i915_active *ref)
102 {
103 	struct intel_gt_buffer_pool_node *node =
104 		container_of(ref, typeof(*node), active);
105 	struct dma_resv *resv = node->obj->base.resv;
106 	int err;
107 
108 	if (dma_resv_trylock(resv)) {
109 		dma_resv_add_excl_fence(resv, NULL);
110 		dma_resv_unlock(resv);
111 	}
112 
113 	err = i915_gem_object_pin_pages(node->obj);
114 	if (err)
115 		return err;
116 
117 	/* Hide this pinned object from the shrinker until retired */
118 	i915_gem_object_make_unshrinkable(node->obj);
119 
120 	return 0;
121 }
122 
123 __i915_active_call
124 static void pool_retire(struct i915_active *ref)
125 {
126 	struct intel_gt_buffer_pool_node *node =
127 		container_of(ref, typeof(*node), active);
128 	struct intel_gt_buffer_pool *pool = node->pool;
129 	struct list_head *list = bucket_for_size(pool, node->obj->base.size);
130 	unsigned long flags;
131 
132 	i915_gem_object_unpin_pages(node->obj);
133 
134 	/* Return this object to the shrinker pool */
135 	i915_gem_object_make_purgeable(node->obj);
136 
137 	GEM_BUG_ON(node->age);
138 	spin_lock_irqsave(&pool->lock, flags);
139 	list_add_rcu(&node->link, list);
140 	WRITE_ONCE(node->age, jiffies ?: 1); /* 0 reserved for active nodes */
141 	spin_unlock_irqrestore(&pool->lock, flags);
142 
143 	schedule_delayed_work(&pool->work,
144 			      round_jiffies_up_relative(HZ));
145 }
146 
147 static struct intel_gt_buffer_pool_node *
148 node_create(struct intel_gt_buffer_pool *pool, size_t sz,
149 	    enum i915_map_type type)
150 {
151 	struct intel_gt *gt = to_gt(pool);
152 	struct intel_gt_buffer_pool_node *node;
153 	struct drm_i915_gem_object *obj;
154 
155 	node = kmalloc(sizeof(*node),
156 		       GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
157 	if (!node)
158 		return ERR_PTR(-ENOMEM);
159 
160 	node->age = 0;
161 	node->pool = pool;
162 	i915_active_init(&node->active, pool_active, pool_retire);
163 
164 	obj = i915_gem_object_create_internal(gt->i915, sz);
165 	if (IS_ERR(obj)) {
166 		i915_active_fini(&node->active);
167 		kfree(node);
168 		return ERR_CAST(obj);
169 	}
170 
171 	i915_gem_object_set_readonly(obj);
172 
173 	node->type = type;
174 	node->obj = obj;
175 	return node;
176 }
177 
178 struct intel_gt_buffer_pool_node *
179 intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size,
180 			 enum i915_map_type type)
181 {
182 	struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
183 	struct intel_gt_buffer_pool_node *node;
184 	struct list_head *list;
185 	int ret;
186 
187 	size = PAGE_ALIGN(size);
188 	list = bucket_for_size(pool, size);
189 
190 	rcu_read_lock();
191 	list_for_each_entry_rcu(node, list, link) {
192 		unsigned long age;
193 
194 		if (node->obj->base.size < size)
195 			continue;
196 
197 		if (node->type != type)
198 			continue;
199 
200 		age = READ_ONCE(node->age);
201 		if (!age)
202 			continue;
203 
204 		if (cmpxchg(&node->age, age, 0) == age) {
205 			spin_lock_irq(&pool->lock);
206 			list_del_rcu(&node->link);
207 			spin_unlock_irq(&pool->lock);
208 			break;
209 		}
210 	}
211 	rcu_read_unlock();
212 
213 	if (&node->link == list) {
214 		node = node_create(pool, size, type);
215 		if (IS_ERR(node))
216 			return node;
217 	}
218 
219 	ret = i915_active_acquire(&node->active);
220 	if (ret) {
221 		node_free(node);
222 		return ERR_PTR(ret);
223 	}
224 
225 	return node;
226 }
227 
228 void intel_gt_init_buffer_pool(struct intel_gt *gt)
229 {
230 	struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
231 	int n;
232 
233 	spin_lock_init(&pool->lock);
234 	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
235 		INIT_LIST_HEAD(&pool->cache_list[n]);
236 	INIT_DELAYED_WORK(&pool->work, pool_free_work);
237 }
238 
239 void intel_gt_flush_buffer_pool(struct intel_gt *gt)
240 {
241 	struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
242 
243 	do {
244 		while (pool_free_older_than(pool, 0))
245 			;
246 	} while (cancel_delayed_work_sync(&pool->work));
247 }
248 
249 void intel_gt_fini_buffer_pool(struct intel_gt *gt)
250 {
251 	struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
252 	int n;
253 
254 	intel_gt_flush_buffer_pool(gt);
255 
256 	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
257 		GEM_BUG_ON(!list_empty(&pool->cache_list[n]));
258 }
259