1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2014-2018 Intel Corporation 4 */ 5 6 #include "gem/i915_gem_object.h" 7 8 #include "i915_drv.h" 9 #include "intel_engine_pm.h" 10 #include "intel_gt_buffer_pool.h" 11 12 static struct intel_gt *to_gt(struct intel_gt_buffer_pool *pool) 13 { 14 return container_of(pool, struct intel_gt, buffer_pool); 15 } 16 17 static struct list_head * 18 bucket_for_size(struct intel_gt_buffer_pool *pool, size_t sz) 19 { 20 int n; 21 22 /* 23 * Compute a power-of-two bucket, but throw everything greater than 24 * 16KiB into the same bucket: i.e. the buckets hold objects of 25 * (1 page, 2 pages, 4 pages, 8+ pages). 26 */ 27 n = fls(sz >> PAGE_SHIFT) - 1; 28 if (n >= ARRAY_SIZE(pool->cache_list)) 29 n = ARRAY_SIZE(pool->cache_list) - 1; 30 31 return &pool->cache_list[n]; 32 } 33 34 static void node_free(struct intel_gt_buffer_pool_node *node) 35 { 36 i915_gem_object_put(node->obj); 37 i915_active_fini(&node->active); 38 kfree_rcu(node, rcu); 39 } 40 41 static bool pool_free_older_than(struct intel_gt_buffer_pool *pool, long keep) 42 { 43 struct intel_gt_buffer_pool_node *node, *stale = NULL; 44 bool active = false; 45 int n; 46 47 /* Free buffers that have not been used in the past second */ 48 for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) { 49 struct list_head *list = &pool->cache_list[n]; 50 51 if (list_empty(list)) 52 continue; 53 54 if (spin_trylock_irq(&pool->lock)) { 55 struct list_head *pos; 56 57 /* Most recent at head; oldest at tail */ 58 list_for_each_prev(pos, list) { 59 unsigned long age; 60 61 node = list_entry(pos, typeof(*node), link); 62 63 age = READ_ONCE(node->age); 64 if (!age || jiffies - age < keep) 65 break; 66 67 /* Check we are the first to claim this node */ 68 if (!xchg(&node->age, 0)) 69 break; 70 71 node->free = stale; 72 stale = node; 73 } 74 if (!list_is_last(pos, list)) 75 __list_del_many(pos, list); 76 77 spin_unlock_irq(&pool->lock); 78 } 79 80 active |= !list_empty(list); 81 } 82 83 while ((node = stale)) { 84 stale = stale->free; 85 node_free(node); 86 } 87 88 return active; 89 } 90 91 static void pool_free_work(struct work_struct *wrk) 92 { 93 struct intel_gt_buffer_pool *pool = 94 container_of(wrk, typeof(*pool), work.work); 95 96 if (pool_free_older_than(pool, HZ)) 97 schedule_delayed_work(&pool->work, 98 round_jiffies_up_relative(HZ)); 99 } 100 101 static int pool_active(struct i915_active *ref) 102 { 103 struct intel_gt_buffer_pool_node *node = 104 container_of(ref, typeof(*node), active); 105 struct dma_resv *resv = node->obj->base.resv; 106 int err; 107 108 if (dma_resv_trylock(resv)) { 109 dma_resv_add_excl_fence(resv, NULL); 110 dma_resv_unlock(resv); 111 } 112 113 err = i915_gem_object_pin_pages(node->obj); 114 if (err) 115 return err; 116 117 /* Hide this pinned object from the shrinker until retired */ 118 i915_gem_object_make_unshrinkable(node->obj); 119 120 return 0; 121 } 122 123 __i915_active_call 124 static void pool_retire(struct i915_active *ref) 125 { 126 struct intel_gt_buffer_pool_node *node = 127 container_of(ref, typeof(*node), active); 128 struct intel_gt_buffer_pool *pool = node->pool; 129 struct list_head *list = bucket_for_size(pool, node->obj->base.size); 130 unsigned long flags; 131 132 i915_gem_object_unpin_pages(node->obj); 133 134 /* Return this object to the shrinker pool */ 135 i915_gem_object_make_purgeable(node->obj); 136 137 spin_lock_irqsave(&pool->lock, flags); 138 list_add_rcu(&node->link, list); 139 WRITE_ONCE(node->age, jiffies ?: 1); /* 0 reserved for active nodes */ 140 spin_unlock_irqrestore(&pool->lock, flags); 141 142 schedule_delayed_work(&pool->work, 143 round_jiffies_up_relative(HZ)); 144 } 145 146 static struct intel_gt_buffer_pool_node * 147 node_create(struct intel_gt_buffer_pool *pool, size_t sz) 148 { 149 struct intel_gt *gt = to_gt(pool); 150 struct intel_gt_buffer_pool_node *node; 151 struct drm_i915_gem_object *obj; 152 153 node = kmalloc(sizeof(*node), 154 GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); 155 if (!node) 156 return ERR_PTR(-ENOMEM); 157 158 node->pool = pool; 159 i915_active_init(&node->active, pool_active, pool_retire); 160 161 obj = i915_gem_object_create_internal(gt->i915, sz); 162 if (IS_ERR(obj)) { 163 i915_active_fini(&node->active); 164 kfree(node); 165 return ERR_CAST(obj); 166 } 167 168 i915_gem_object_set_readonly(obj); 169 170 node->obj = obj; 171 return node; 172 } 173 174 struct intel_gt_buffer_pool_node * 175 intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size) 176 { 177 struct intel_gt_buffer_pool *pool = >->buffer_pool; 178 struct intel_gt_buffer_pool_node *node; 179 struct list_head *list; 180 int ret; 181 182 size = PAGE_ALIGN(size); 183 list = bucket_for_size(pool, size); 184 185 rcu_read_lock(); 186 list_for_each_entry_rcu(node, list, link) { 187 unsigned long age; 188 189 if (node->obj->base.size < size) 190 continue; 191 192 age = READ_ONCE(node->age); 193 if (!age) 194 continue; 195 196 if (cmpxchg(&node->age, age, 0) == age) { 197 spin_lock_irq(&pool->lock); 198 list_del_rcu(&node->link); 199 spin_unlock_irq(&pool->lock); 200 break; 201 } 202 } 203 rcu_read_unlock(); 204 205 if (&node->link == list) { 206 node = node_create(pool, size); 207 if (IS_ERR(node)) 208 return node; 209 } 210 211 ret = i915_active_acquire(&node->active); 212 if (ret) { 213 node_free(node); 214 return ERR_PTR(ret); 215 } 216 217 return node; 218 } 219 220 void intel_gt_init_buffer_pool(struct intel_gt *gt) 221 { 222 struct intel_gt_buffer_pool *pool = >->buffer_pool; 223 int n; 224 225 spin_lock_init(&pool->lock); 226 for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) 227 INIT_LIST_HEAD(&pool->cache_list[n]); 228 INIT_DELAYED_WORK(&pool->work, pool_free_work); 229 } 230 231 void intel_gt_flush_buffer_pool(struct intel_gt *gt) 232 { 233 struct intel_gt_buffer_pool *pool = >->buffer_pool; 234 235 do { 236 while (pool_free_older_than(pool, 0)) 237 ; 238 } while (cancel_delayed_work_sync(&pool->work)); 239 } 240 241 void intel_gt_fini_buffer_pool(struct intel_gt *gt) 242 { 243 struct intel_gt_buffer_pool *pool = >->buffer_pool; 244 int n; 245 246 intel_gt_flush_buffer_pool(gt); 247 248 for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) 249 GEM_BUG_ON(!list_empty(&pool->cache_list[n])); 250 } 251