1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2014-2018 Intel Corporation
4  */
5 
6 #include "gem/i915_gem_object.h"
7 
8 #include "i915_drv.h"
9 #include "intel_engine_pm.h"
10 #include "intel_gt_buffer_pool.h"
11 
12 static struct list_head *
13 bucket_for_size(struct intel_gt_buffer_pool *pool, size_t sz)
14 {
15 	int n;
16 
17 	/*
18 	 * Compute a power-of-two bucket, but throw everything greater than
19 	 * 16KiB into the same bucket: i.e. the buckets hold objects of
20 	 * (1 page, 2 pages, 4 pages, 8+ pages).
21 	 */
22 	n = fls(sz >> PAGE_SHIFT) - 1;
23 	if (n >= ARRAY_SIZE(pool->cache_list))
24 		n = ARRAY_SIZE(pool->cache_list) - 1;
25 
26 	return &pool->cache_list[n];
27 }
28 
29 static void node_free(struct intel_gt_buffer_pool_node *node)
30 {
31 	i915_gem_object_put(node->obj);
32 	i915_active_fini(&node->active);
33 	kfree_rcu(node, rcu);
34 }
35 
36 static bool pool_free_older_than(struct intel_gt_buffer_pool *pool, long keep)
37 {
38 	struct intel_gt_buffer_pool_node *node, *stale = NULL;
39 	bool active = false;
40 	int n;
41 
42 	/* Free buffers that have not been used in the past second */
43 	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
44 		struct list_head *list = &pool->cache_list[n];
45 
46 		if (list_empty(list))
47 			continue;
48 
49 		if (spin_trylock_irq(&pool->lock)) {
50 			struct list_head *pos;
51 
52 			/* Most recent at head; oldest at tail */
53 			list_for_each_prev(pos, list) {
54 				unsigned long age;
55 
56 				node = list_entry(pos, typeof(*node), link);
57 
58 				age = READ_ONCE(node->age);
59 				if (!age || jiffies - age < keep)
60 					break;
61 
62 				/* Check we are the first to claim this node */
63 				if (!xchg(&node->age, 0))
64 					break;
65 
66 				node->free = stale;
67 				stale = node;
68 			}
69 			if (!list_is_last(pos, list))
70 				__list_del_many(pos, list);
71 
72 			spin_unlock_irq(&pool->lock);
73 		}
74 
75 		active |= !list_empty(list);
76 	}
77 
78 	while ((node = stale)) {
79 		stale = stale->free;
80 		node_free(node);
81 	}
82 
83 	return active;
84 }
85 
86 static void pool_free_work(struct work_struct *wrk)
87 {
88 	struct intel_gt_buffer_pool *pool =
89 		container_of(wrk, typeof(*pool), work.work);
90 
91 	if (pool_free_older_than(pool, HZ))
92 		schedule_delayed_work(&pool->work,
93 				      round_jiffies_up_relative(HZ));
94 }
95 
96 static void pool_retire(struct i915_active *ref)
97 {
98 	struct intel_gt_buffer_pool_node *node =
99 		container_of(ref, typeof(*node), active);
100 	struct intel_gt_buffer_pool *pool = node->pool;
101 	struct list_head *list = bucket_for_size(pool, node->obj->base.size);
102 	unsigned long flags;
103 
104 	if (node->pinned) {
105 		i915_gem_object_unpin_pages(node->obj);
106 
107 		/* Return this object to the shrinker pool */
108 		i915_gem_object_make_purgeable(node->obj);
109 		node->pinned = false;
110 	}
111 
112 	GEM_BUG_ON(node->age);
113 	spin_lock_irqsave(&pool->lock, flags);
114 	list_add_rcu(&node->link, list);
115 	WRITE_ONCE(node->age, jiffies ?: 1); /* 0 reserved for active nodes */
116 	spin_unlock_irqrestore(&pool->lock, flags);
117 
118 	schedule_delayed_work(&pool->work,
119 			      round_jiffies_up_relative(HZ));
120 }
121 
122 void intel_gt_buffer_pool_mark_used(struct intel_gt_buffer_pool_node *node)
123 {
124 	assert_object_held(node->obj);
125 
126 	if (node->pinned)
127 		return;
128 
129 	__i915_gem_object_pin_pages(node->obj);
130 	/* Hide this pinned object from the shrinker until retired */
131 	i915_gem_object_make_unshrinkable(node->obj);
132 	node->pinned = true;
133 }
134 
135 static struct intel_gt_buffer_pool_node *
136 node_create(struct intel_gt_buffer_pool *pool, size_t sz,
137 	    enum i915_map_type type)
138 {
139 	struct intel_gt *gt = container_of(pool, struct intel_gt, buffer_pool);
140 	struct intel_gt_buffer_pool_node *node;
141 	struct drm_i915_gem_object *obj;
142 
143 	node = kmalloc(sizeof(*node),
144 		       GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
145 	if (!node)
146 		return ERR_PTR(-ENOMEM);
147 
148 	node->age = 0;
149 	node->pool = pool;
150 	node->pinned = false;
151 	i915_active_init(&node->active, NULL, pool_retire, 0);
152 
153 	obj = i915_gem_object_create_internal(gt->i915, sz);
154 	if (IS_ERR(obj)) {
155 		i915_active_fini(&node->active);
156 		kfree(node);
157 		return ERR_CAST(obj);
158 	}
159 
160 	i915_gem_object_set_readonly(obj);
161 
162 	node->type = type;
163 	node->obj = obj;
164 	return node;
165 }
166 
167 struct intel_gt_buffer_pool_node *
168 intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size,
169 			 enum i915_map_type type)
170 {
171 	struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
172 	struct intel_gt_buffer_pool_node *node;
173 	struct list_head *list;
174 	int ret;
175 
176 	size = PAGE_ALIGN(size);
177 	list = bucket_for_size(pool, size);
178 
179 	rcu_read_lock();
180 	list_for_each_entry_rcu(node, list, link) {
181 		unsigned long age;
182 
183 		if (node->obj->base.size < size)
184 			continue;
185 
186 		if (node->type != type)
187 			continue;
188 
189 		age = READ_ONCE(node->age);
190 		if (!age)
191 			continue;
192 
193 		if (cmpxchg(&node->age, age, 0) == age) {
194 			spin_lock_irq(&pool->lock);
195 			list_del_rcu(&node->link);
196 			spin_unlock_irq(&pool->lock);
197 			break;
198 		}
199 	}
200 	rcu_read_unlock();
201 
202 	if (&node->link == list) {
203 		node = node_create(pool, size, type);
204 		if (IS_ERR(node))
205 			return node;
206 	}
207 
208 	ret = i915_active_acquire(&node->active);
209 	if (ret) {
210 		node_free(node);
211 		return ERR_PTR(ret);
212 	}
213 
214 	return node;
215 }
216 
217 void intel_gt_init_buffer_pool(struct intel_gt *gt)
218 {
219 	struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
220 	int n;
221 
222 	spin_lock_init(&pool->lock);
223 	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
224 		INIT_LIST_HEAD(&pool->cache_list[n]);
225 	INIT_DELAYED_WORK(&pool->work, pool_free_work);
226 }
227 
228 void intel_gt_flush_buffer_pool(struct intel_gt *gt)
229 {
230 	struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
231 
232 	do {
233 		while (pool_free_older_than(pool, 0))
234 			;
235 	} while (cancel_delayed_work_sync(&pool->work));
236 }
237 
238 void intel_gt_fini_buffer_pool(struct intel_gt *gt)
239 {
240 	struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
241 	int n;
242 
243 	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
244 		GEM_BUG_ON(!list_empty(&pool->cache_list[n]));
245 }
246