1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2014-2018 Intel Corporation 4 */ 5 6 #include "gem/i915_gem_object.h" 7 8 #include "i915_drv.h" 9 #include "intel_engine_pm.h" 10 #include "intel_gt_buffer_pool.h" 11 12 static struct intel_gt *to_gt(struct intel_gt_buffer_pool *pool) 13 { 14 return container_of(pool, struct intel_gt, buffer_pool); 15 } 16 17 static struct list_head * 18 bucket_for_size(struct intel_gt_buffer_pool *pool, size_t sz) 19 { 20 int n; 21 22 /* 23 * Compute a power-of-two bucket, but throw everything greater than 24 * 16KiB into the same bucket: i.e. the buckets hold objects of 25 * (1 page, 2 pages, 4 pages, 8+ pages). 26 */ 27 n = fls(sz >> PAGE_SHIFT) - 1; 28 if (n >= ARRAY_SIZE(pool->cache_list)) 29 n = ARRAY_SIZE(pool->cache_list) - 1; 30 31 return &pool->cache_list[n]; 32 } 33 34 static void node_free(struct intel_gt_buffer_pool_node *node) 35 { 36 i915_gem_object_put(node->obj); 37 i915_active_fini(&node->active); 38 kfree_rcu(node, rcu); 39 } 40 41 static bool pool_free_older_than(struct intel_gt_buffer_pool *pool, long keep) 42 { 43 struct intel_gt_buffer_pool_node *node, *stale = NULL; 44 bool active = false; 45 int n; 46 47 /* Free buffers that have not been used in the past second */ 48 for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) { 49 struct list_head *list = &pool->cache_list[n]; 50 51 if (list_empty(list)) 52 continue; 53 54 if (spin_trylock_irq(&pool->lock)) { 55 struct list_head *pos; 56 57 /* Most recent at head; oldest at tail */ 58 list_for_each_prev(pos, list) { 59 unsigned long age; 60 61 node = list_entry(pos, typeof(*node), link); 62 63 age = READ_ONCE(node->age); 64 if (!age || jiffies - age < keep) 65 break; 66 67 /* Check we are the first to claim this node */ 68 if (!xchg(&node->age, 0)) 69 break; 70 71 node->free = stale; 72 stale = node; 73 } 74 if (!list_is_last(pos, list)) 75 __list_del_many(pos, list); 76 77 spin_unlock_irq(&pool->lock); 78 } 79 80 active |= !list_empty(list); 81 } 82 83 while ((node = stale)) { 84 stale = stale->free; 85 node_free(node); 86 } 87 88 return active; 89 } 90 91 static void pool_free_work(struct work_struct *wrk) 92 { 93 struct intel_gt_buffer_pool *pool = 94 container_of(wrk, typeof(*pool), work.work); 95 96 if (pool_free_older_than(pool, HZ)) 97 schedule_delayed_work(&pool->work, 98 round_jiffies_up_relative(HZ)); 99 } 100 101 static int pool_active(struct i915_active *ref) 102 { 103 struct intel_gt_buffer_pool_node *node = 104 container_of(ref, typeof(*node), active); 105 struct dma_resv *resv = node->obj->base.resv; 106 int err; 107 108 if (dma_resv_trylock(resv)) { 109 dma_resv_add_excl_fence(resv, NULL); 110 dma_resv_unlock(resv); 111 } 112 113 err = i915_gem_object_pin_pages(node->obj); 114 if (err) 115 return err; 116 117 /* Hide this pinned object from the shrinker until retired */ 118 i915_gem_object_make_unshrinkable(node->obj); 119 120 return 0; 121 } 122 123 __i915_active_call 124 static void pool_retire(struct i915_active *ref) 125 { 126 struct intel_gt_buffer_pool_node *node = 127 container_of(ref, typeof(*node), active); 128 struct intel_gt_buffer_pool *pool = node->pool; 129 struct list_head *list = bucket_for_size(pool, node->obj->base.size); 130 unsigned long flags; 131 132 i915_gem_object_unpin_pages(node->obj); 133 134 /* Return this object to the shrinker pool */ 135 i915_gem_object_make_purgeable(node->obj); 136 137 GEM_BUG_ON(node->age); 138 spin_lock_irqsave(&pool->lock, flags); 139 list_add_rcu(&node->link, list); 140 WRITE_ONCE(node->age, jiffies ?: 1); /* 0 reserved for active nodes */ 141 spin_unlock_irqrestore(&pool->lock, flags); 142 143 schedule_delayed_work(&pool->work, 144 round_jiffies_up_relative(HZ)); 145 } 146 147 static struct intel_gt_buffer_pool_node * 148 node_create(struct intel_gt_buffer_pool *pool, size_t sz) 149 { 150 struct intel_gt *gt = to_gt(pool); 151 struct intel_gt_buffer_pool_node *node; 152 struct drm_i915_gem_object *obj; 153 154 node = kmalloc(sizeof(*node), 155 GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); 156 if (!node) 157 return ERR_PTR(-ENOMEM); 158 159 node->age = 0; 160 node->pool = pool; 161 i915_active_init(&node->active, pool_active, pool_retire); 162 163 obj = i915_gem_object_create_internal(gt->i915, sz); 164 if (IS_ERR(obj)) { 165 i915_active_fini(&node->active); 166 kfree(node); 167 return ERR_CAST(obj); 168 } 169 170 i915_gem_object_set_readonly(obj); 171 172 node->obj = obj; 173 return node; 174 } 175 176 struct intel_gt_buffer_pool_node * 177 intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size) 178 { 179 struct intel_gt_buffer_pool *pool = >->buffer_pool; 180 struct intel_gt_buffer_pool_node *node; 181 struct list_head *list; 182 int ret; 183 184 size = PAGE_ALIGN(size); 185 list = bucket_for_size(pool, size); 186 187 rcu_read_lock(); 188 list_for_each_entry_rcu(node, list, link) { 189 unsigned long age; 190 191 if (node->obj->base.size < size) 192 continue; 193 194 age = READ_ONCE(node->age); 195 if (!age) 196 continue; 197 198 if (cmpxchg(&node->age, age, 0) == age) { 199 spin_lock_irq(&pool->lock); 200 list_del_rcu(&node->link); 201 spin_unlock_irq(&pool->lock); 202 break; 203 } 204 } 205 rcu_read_unlock(); 206 207 if (&node->link == list) { 208 node = node_create(pool, size); 209 if (IS_ERR(node)) 210 return node; 211 } 212 213 ret = i915_active_acquire(&node->active); 214 if (ret) { 215 node_free(node); 216 return ERR_PTR(ret); 217 } 218 219 return node; 220 } 221 222 void intel_gt_init_buffer_pool(struct intel_gt *gt) 223 { 224 struct intel_gt_buffer_pool *pool = >->buffer_pool; 225 int n; 226 227 spin_lock_init(&pool->lock); 228 for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) 229 INIT_LIST_HEAD(&pool->cache_list[n]); 230 INIT_DELAYED_WORK(&pool->work, pool_free_work); 231 } 232 233 void intel_gt_flush_buffer_pool(struct intel_gt *gt) 234 { 235 struct intel_gt_buffer_pool *pool = >->buffer_pool; 236 237 do { 238 while (pool_free_older_than(pool, 0)) 239 ; 240 } while (cancel_delayed_work_sync(&pool->work)); 241 } 242 243 void intel_gt_fini_buffer_pool(struct intel_gt *gt) 244 { 245 struct intel_gt_buffer_pool *pool = >->buffer_pool; 246 int n; 247 248 intel_gt_flush_buffer_pool(gt); 249 250 for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) 251 GEM_BUG_ON(!list_empty(&pool->cache_list[n])); 252 } 253