1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2014-2018 Intel Corporation
4  */
5 
6 #include "gem/i915_gem_object.h"
7 
8 #include "i915_drv.h"
9 #include "intel_engine_pm.h"
10 #include "intel_gt_buffer_pool.h"
11 
12 static struct intel_gt *to_gt(struct intel_gt_buffer_pool *pool)
13 {
14 	return container_of(pool, struct intel_gt, buffer_pool);
15 }
16 
17 static struct list_head *
18 bucket_for_size(struct intel_gt_buffer_pool *pool, size_t sz)
19 {
20 	int n;
21 
22 	/*
23 	 * Compute a power-of-two bucket, but throw everything greater than
24 	 * 16KiB into the same bucket: i.e. the buckets hold objects of
25 	 * (1 page, 2 pages, 4 pages, 8+ pages).
26 	 */
27 	n = fls(sz >> PAGE_SHIFT) - 1;
28 	if (n >= ARRAY_SIZE(pool->cache_list))
29 		n = ARRAY_SIZE(pool->cache_list) - 1;
30 
31 	return &pool->cache_list[n];
32 }
33 
34 static void node_free(struct intel_gt_buffer_pool_node *node)
35 {
36 	i915_gem_object_put(node->obj);
37 	i915_active_fini(&node->active);
38 	kfree(node);
39 }
40 
41 static void pool_free_work(struct work_struct *wrk)
42 {
43 	struct intel_gt_buffer_pool *pool =
44 		container_of(wrk, typeof(*pool), work.work);
45 	struct intel_gt_buffer_pool_node *node, *next;
46 	unsigned long old = jiffies - HZ;
47 	bool active = false;
48 	LIST_HEAD(stale);
49 	int n;
50 
51 	/* Free buffers that have not been used in the past second */
52 	spin_lock_irq(&pool->lock);
53 	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
54 		struct list_head *list = &pool->cache_list[n];
55 
56 		/* Most recent at head; oldest at tail */
57 		list_for_each_entry_safe_reverse(node, next, list, link) {
58 			if (time_before(node->age, old))
59 				break;
60 
61 			list_move(&node->link, &stale);
62 		}
63 		active |= !list_empty(list);
64 	}
65 	spin_unlock_irq(&pool->lock);
66 
67 	list_for_each_entry_safe(node, next, &stale, link)
68 		node_free(node);
69 
70 	if (active)
71 		schedule_delayed_work(&pool->work,
72 				      round_jiffies_up_relative(HZ));
73 }
74 
75 static int pool_active(struct i915_active *ref)
76 {
77 	struct intel_gt_buffer_pool_node *node =
78 		container_of(ref, typeof(*node), active);
79 	struct dma_resv *resv = node->obj->base.resv;
80 	int err;
81 
82 	if (dma_resv_trylock(resv)) {
83 		dma_resv_add_excl_fence(resv, NULL);
84 		dma_resv_unlock(resv);
85 	}
86 
87 	err = i915_gem_object_pin_pages(node->obj);
88 	if (err)
89 		return err;
90 
91 	/* Hide this pinned object from the shrinker until retired */
92 	i915_gem_object_make_unshrinkable(node->obj);
93 
94 	return 0;
95 }
96 
97 __i915_active_call
98 static void pool_retire(struct i915_active *ref)
99 {
100 	struct intel_gt_buffer_pool_node *node =
101 		container_of(ref, typeof(*node), active);
102 	struct intel_gt_buffer_pool *pool = node->pool;
103 	struct list_head *list = bucket_for_size(pool, node->obj->base.size);
104 	unsigned long flags;
105 
106 	i915_gem_object_unpin_pages(node->obj);
107 
108 	/* Return this object to the shrinker pool */
109 	i915_gem_object_make_purgeable(node->obj);
110 
111 	spin_lock_irqsave(&pool->lock, flags);
112 	node->age = jiffies;
113 	list_add(&node->link, list);
114 	spin_unlock_irqrestore(&pool->lock, flags);
115 
116 	schedule_delayed_work(&pool->work,
117 			      round_jiffies_up_relative(HZ));
118 }
119 
120 static struct intel_gt_buffer_pool_node *
121 node_create(struct intel_gt_buffer_pool *pool, size_t sz)
122 {
123 	struct intel_gt *gt = to_gt(pool);
124 	struct intel_gt_buffer_pool_node *node;
125 	struct drm_i915_gem_object *obj;
126 
127 	node = kmalloc(sizeof(*node),
128 		       GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
129 	if (!node)
130 		return ERR_PTR(-ENOMEM);
131 
132 	node->pool = pool;
133 	i915_active_init(&node->active, pool_active, pool_retire);
134 
135 	obj = i915_gem_object_create_internal(gt->i915, sz);
136 	if (IS_ERR(obj)) {
137 		i915_active_fini(&node->active);
138 		kfree(node);
139 		return ERR_CAST(obj);
140 	}
141 
142 	i915_gem_object_set_readonly(obj);
143 
144 	node->obj = obj;
145 	return node;
146 }
147 
148 struct intel_gt_buffer_pool_node *
149 intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size)
150 {
151 	struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
152 	struct intel_gt_buffer_pool_node *node;
153 	struct list_head *list;
154 	unsigned long flags;
155 	int ret;
156 
157 	size = PAGE_ALIGN(size);
158 	list = bucket_for_size(pool, size);
159 
160 	spin_lock_irqsave(&pool->lock, flags);
161 	list_for_each_entry(node, list, link) {
162 		if (node->obj->base.size < size)
163 			continue;
164 		list_del(&node->link);
165 		break;
166 	}
167 	spin_unlock_irqrestore(&pool->lock, flags);
168 
169 	if (&node->link == list) {
170 		node = node_create(pool, size);
171 		if (IS_ERR(node))
172 			return node;
173 	}
174 
175 	ret = i915_active_acquire(&node->active);
176 	if (ret) {
177 		node_free(node);
178 		return ERR_PTR(ret);
179 	}
180 
181 	return node;
182 }
183 
184 void intel_gt_init_buffer_pool(struct intel_gt *gt)
185 {
186 	struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
187 	int n;
188 
189 	spin_lock_init(&pool->lock);
190 	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
191 		INIT_LIST_HEAD(&pool->cache_list[n]);
192 	INIT_DELAYED_WORK(&pool->work, pool_free_work);
193 }
194 
195 static void pool_free_imm(struct intel_gt_buffer_pool *pool)
196 {
197 	int n;
198 
199 	spin_lock_irq(&pool->lock);
200 	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
201 		struct intel_gt_buffer_pool_node *node, *next;
202 		struct list_head *list = &pool->cache_list[n];
203 
204 		list_for_each_entry_safe(node, next, list, link)
205 			node_free(node);
206 		INIT_LIST_HEAD(list);
207 	}
208 	spin_unlock_irq(&pool->lock);
209 }
210 
211 void intel_gt_flush_buffer_pool(struct intel_gt *gt)
212 {
213 	struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
214 
215 	if (cancel_delayed_work_sync(&pool->work))
216 		pool_free_imm(pool);
217 }
218 
219 void intel_gt_fini_buffer_pool(struct intel_gt *gt)
220 {
221 	struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
222 	int n;
223 
224 	intel_gt_flush_buffer_pool(gt);
225 
226 	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
227 		GEM_BUG_ON(!list_empty(&pool->cache_list[n]));
228 }
229