1d38ceaf9SAlex Deucher /* 2d38ceaf9SAlex Deucher * Copyright 2011 Red Hat Inc. 3d38ceaf9SAlex Deucher * All Rights Reserved. 4d38ceaf9SAlex Deucher * 5d38ceaf9SAlex Deucher * Permission is hereby granted, free of charge, to any person obtaining a 6d38ceaf9SAlex Deucher * copy of this software and associated documentation files (the 7d38ceaf9SAlex Deucher * "Software"), to deal in the Software without restriction, including 8d38ceaf9SAlex Deucher * without limitation the rights to use, copy, modify, merge, publish, 9d38ceaf9SAlex Deucher * distribute, sub license, and/or sell copies of the Software, and to 10d38ceaf9SAlex Deucher * permit persons to whom the Software is furnished to do so, subject to 11d38ceaf9SAlex Deucher * the following conditions: 12d38ceaf9SAlex Deucher * 13d38ceaf9SAlex Deucher * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14d38ceaf9SAlex Deucher * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15d38ceaf9SAlex Deucher * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16d38ceaf9SAlex Deucher * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17d38ceaf9SAlex Deucher * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18d38ceaf9SAlex Deucher * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19d38ceaf9SAlex Deucher * USE OR OTHER DEALINGS IN THE SOFTWARE. 20d38ceaf9SAlex Deucher * 21d38ceaf9SAlex Deucher * The above copyright notice and this permission notice (including the 22d38ceaf9SAlex Deucher * next paragraph) shall be included in all copies or substantial portions 23d38ceaf9SAlex Deucher * of the Software. 24d38ceaf9SAlex Deucher * 25d38ceaf9SAlex Deucher */ 26d38ceaf9SAlex Deucher /* 27d38ceaf9SAlex Deucher * Authors: 28d38ceaf9SAlex Deucher * Jerome Glisse <glisse@freedesktop.org> 29d38ceaf9SAlex Deucher */ 30d38ceaf9SAlex Deucher /* Algorithm: 31d38ceaf9SAlex Deucher * 32d38ceaf9SAlex Deucher * We store the last allocated bo in "hole", we always try to allocate 33d38ceaf9SAlex Deucher * after the last allocated bo. Principle is that in a linear GPU ring 34d38ceaf9SAlex Deucher * progression was is after last is the oldest bo we allocated and thus 35d38ceaf9SAlex Deucher * the first one that should no longer be in use by the GPU. 36d38ceaf9SAlex Deucher * 37d38ceaf9SAlex Deucher * If it's not the case we skip over the bo after last to the closest 38d38ceaf9SAlex Deucher * done bo if such one exist. If none exist and we are not asked to 39d38ceaf9SAlex Deucher * block we report failure to allocate. 40d38ceaf9SAlex Deucher * 41d38ceaf9SAlex Deucher * If we are asked to block we wait on all the oldest fence of all 42d38ceaf9SAlex Deucher * rings. We just wait for any of those fence to complete. 43d38ceaf9SAlex Deucher */ 44d38ceaf9SAlex Deucher #include <drm/drmP.h> 45d38ceaf9SAlex Deucher #include "amdgpu.h" 46d38ceaf9SAlex Deucher 47d38ceaf9SAlex Deucher static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo); 48d38ceaf9SAlex Deucher static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager); 49d38ceaf9SAlex Deucher 50d38ceaf9SAlex Deucher int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev, 51d38ceaf9SAlex Deucher struct amdgpu_sa_manager *sa_manager, 52d38ceaf9SAlex Deucher unsigned size, u32 align, u32 domain) 53d38ceaf9SAlex Deucher { 54d38ceaf9SAlex Deucher int i, r; 55d38ceaf9SAlex Deucher 56d38ceaf9SAlex Deucher init_waitqueue_head(&sa_manager->wq); 57d38ceaf9SAlex Deucher sa_manager->bo = NULL; 58d38ceaf9SAlex Deucher sa_manager->size = size; 59d38ceaf9SAlex Deucher sa_manager->domain = domain; 60d38ceaf9SAlex Deucher sa_manager->align = align; 61d38ceaf9SAlex Deucher sa_manager->hole = &sa_manager->olist; 62d38ceaf9SAlex Deucher INIT_LIST_HEAD(&sa_manager->olist); 636ba60b89SChristian König for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) 64d38ceaf9SAlex Deucher INIT_LIST_HEAD(&sa_manager->flist[i]); 65d38ceaf9SAlex Deucher 6672d7668bSChristian König r = amdgpu_bo_create(adev, size, align, true, domain, 6772d7668bSChristian König 0, NULL, NULL, &sa_manager->bo); 68d38ceaf9SAlex Deucher if (r) { 69d38ceaf9SAlex Deucher dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r); 70d38ceaf9SAlex Deucher return r; 71d38ceaf9SAlex Deucher } 72d38ceaf9SAlex Deucher 73d38ceaf9SAlex Deucher return r; 74d38ceaf9SAlex Deucher } 75d38ceaf9SAlex Deucher 76d38ceaf9SAlex Deucher void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev, 77d38ceaf9SAlex Deucher struct amdgpu_sa_manager *sa_manager) 78d38ceaf9SAlex Deucher { 79d38ceaf9SAlex Deucher struct amdgpu_sa_bo *sa_bo, *tmp; 80d38ceaf9SAlex Deucher 81d38ceaf9SAlex Deucher if (!list_empty(&sa_manager->olist)) { 82d38ceaf9SAlex Deucher sa_manager->hole = &sa_manager->olist, 83d38ceaf9SAlex Deucher amdgpu_sa_bo_try_free(sa_manager); 84d38ceaf9SAlex Deucher if (!list_empty(&sa_manager->olist)) { 85d38ceaf9SAlex Deucher dev_err(adev->dev, "sa_manager is not empty, clearing anyway\n"); 86d38ceaf9SAlex Deucher } 87d38ceaf9SAlex Deucher } 88d38ceaf9SAlex Deucher list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) { 89d38ceaf9SAlex Deucher amdgpu_sa_bo_remove_locked(sa_bo); 90d38ceaf9SAlex Deucher } 91d38ceaf9SAlex Deucher amdgpu_bo_unref(&sa_manager->bo); 92d38ceaf9SAlex Deucher sa_manager->size = 0; 93d38ceaf9SAlex Deucher } 94d38ceaf9SAlex Deucher 95d38ceaf9SAlex Deucher int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev, 96d38ceaf9SAlex Deucher struct amdgpu_sa_manager *sa_manager) 97d38ceaf9SAlex Deucher { 98d38ceaf9SAlex Deucher int r; 99d38ceaf9SAlex Deucher 100d38ceaf9SAlex Deucher if (sa_manager->bo == NULL) { 101d38ceaf9SAlex Deucher dev_err(adev->dev, "no bo for sa manager\n"); 102d38ceaf9SAlex Deucher return -EINVAL; 103d38ceaf9SAlex Deucher } 104d38ceaf9SAlex Deucher 105d38ceaf9SAlex Deucher /* map the buffer */ 106d38ceaf9SAlex Deucher r = amdgpu_bo_reserve(sa_manager->bo, false); 107d38ceaf9SAlex Deucher if (r) { 108d38ceaf9SAlex Deucher dev_err(adev->dev, "(%d) failed to reserve manager bo\n", r); 109d38ceaf9SAlex Deucher return r; 110d38ceaf9SAlex Deucher } 111d38ceaf9SAlex Deucher r = amdgpu_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr); 112d38ceaf9SAlex Deucher if (r) { 113d38ceaf9SAlex Deucher amdgpu_bo_unreserve(sa_manager->bo); 114d38ceaf9SAlex Deucher dev_err(adev->dev, "(%d) failed to pin manager bo\n", r); 115d38ceaf9SAlex Deucher return r; 116d38ceaf9SAlex Deucher } 117d38ceaf9SAlex Deucher r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr); 118d38ceaf9SAlex Deucher amdgpu_bo_unreserve(sa_manager->bo); 119d38ceaf9SAlex Deucher return r; 120d38ceaf9SAlex Deucher } 121d38ceaf9SAlex Deucher 122d38ceaf9SAlex Deucher int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev, 123d38ceaf9SAlex Deucher struct amdgpu_sa_manager *sa_manager) 124d38ceaf9SAlex Deucher { 125d38ceaf9SAlex Deucher int r; 126d38ceaf9SAlex Deucher 127d38ceaf9SAlex Deucher if (sa_manager->bo == NULL) { 128d38ceaf9SAlex Deucher dev_err(adev->dev, "no bo for sa manager\n"); 129d38ceaf9SAlex Deucher return -EINVAL; 130d38ceaf9SAlex Deucher } 131d38ceaf9SAlex Deucher 132d38ceaf9SAlex Deucher r = amdgpu_bo_reserve(sa_manager->bo, false); 133d38ceaf9SAlex Deucher if (!r) { 134d38ceaf9SAlex Deucher amdgpu_bo_kunmap(sa_manager->bo); 135d38ceaf9SAlex Deucher amdgpu_bo_unpin(sa_manager->bo); 136d38ceaf9SAlex Deucher amdgpu_bo_unreserve(sa_manager->bo); 137d38ceaf9SAlex Deucher } 138d38ceaf9SAlex Deucher return r; 139d38ceaf9SAlex Deucher } 140d38ceaf9SAlex Deucher 141d38ceaf9SAlex Deucher static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo) 142d38ceaf9SAlex Deucher { 143d38ceaf9SAlex Deucher struct amdgpu_sa_manager *sa_manager = sa_bo->manager; 144d38ceaf9SAlex Deucher if (sa_manager->hole == &sa_bo->olist) { 145d38ceaf9SAlex Deucher sa_manager->hole = sa_bo->olist.prev; 146d38ceaf9SAlex Deucher } 147d38ceaf9SAlex Deucher list_del_init(&sa_bo->olist); 148d38ceaf9SAlex Deucher list_del_init(&sa_bo->flist); 1494ce9891eSChunming Zhou fence_put(sa_bo->fence); 150d38ceaf9SAlex Deucher kfree(sa_bo); 151d38ceaf9SAlex Deucher } 152d38ceaf9SAlex Deucher 153d38ceaf9SAlex Deucher static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager) 154d38ceaf9SAlex Deucher { 155d38ceaf9SAlex Deucher struct amdgpu_sa_bo *sa_bo, *tmp; 156d38ceaf9SAlex Deucher 157d38ceaf9SAlex Deucher if (sa_manager->hole->next == &sa_manager->olist) 158d38ceaf9SAlex Deucher return; 159d38ceaf9SAlex Deucher 160d38ceaf9SAlex Deucher sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist); 161d38ceaf9SAlex Deucher list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) { 1623cdb8119SChristian König if (sa_bo->fence == NULL || 1634ce9891eSChunming Zhou !fence_is_signaled(sa_bo->fence)) { 164d38ceaf9SAlex Deucher return; 165d38ceaf9SAlex Deucher } 166d38ceaf9SAlex Deucher amdgpu_sa_bo_remove_locked(sa_bo); 167d38ceaf9SAlex Deucher } 168d38ceaf9SAlex Deucher } 169d38ceaf9SAlex Deucher 170d38ceaf9SAlex Deucher static inline unsigned amdgpu_sa_bo_hole_soffset(struct amdgpu_sa_manager *sa_manager) 171d38ceaf9SAlex Deucher { 172d38ceaf9SAlex Deucher struct list_head *hole = sa_manager->hole; 173d38ceaf9SAlex Deucher 174d38ceaf9SAlex Deucher if (hole != &sa_manager->olist) { 175d38ceaf9SAlex Deucher return list_entry(hole, struct amdgpu_sa_bo, olist)->eoffset; 176d38ceaf9SAlex Deucher } 177d38ceaf9SAlex Deucher return 0; 178d38ceaf9SAlex Deucher } 179d38ceaf9SAlex Deucher 180d38ceaf9SAlex Deucher static inline unsigned amdgpu_sa_bo_hole_eoffset(struct amdgpu_sa_manager *sa_manager) 181d38ceaf9SAlex Deucher { 182d38ceaf9SAlex Deucher struct list_head *hole = sa_manager->hole; 183d38ceaf9SAlex Deucher 184d38ceaf9SAlex Deucher if (hole->next != &sa_manager->olist) { 185d38ceaf9SAlex Deucher return list_entry(hole->next, struct amdgpu_sa_bo, olist)->soffset; 186d38ceaf9SAlex Deucher } 187d38ceaf9SAlex Deucher return sa_manager->size; 188d38ceaf9SAlex Deucher } 189d38ceaf9SAlex Deucher 190d38ceaf9SAlex Deucher static bool amdgpu_sa_bo_try_alloc(struct amdgpu_sa_manager *sa_manager, 191d38ceaf9SAlex Deucher struct amdgpu_sa_bo *sa_bo, 192d38ceaf9SAlex Deucher unsigned size, unsigned align) 193d38ceaf9SAlex Deucher { 194d38ceaf9SAlex Deucher unsigned soffset, eoffset, wasted; 195d38ceaf9SAlex Deucher 196d38ceaf9SAlex Deucher soffset = amdgpu_sa_bo_hole_soffset(sa_manager); 197d38ceaf9SAlex Deucher eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager); 198d38ceaf9SAlex Deucher wasted = (align - (soffset % align)) % align; 199d38ceaf9SAlex Deucher 200d38ceaf9SAlex Deucher if ((eoffset - soffset) >= (size + wasted)) { 201d38ceaf9SAlex Deucher soffset += wasted; 202d38ceaf9SAlex Deucher 203d38ceaf9SAlex Deucher sa_bo->manager = sa_manager; 204d38ceaf9SAlex Deucher sa_bo->soffset = soffset; 205d38ceaf9SAlex Deucher sa_bo->eoffset = soffset + size; 206d38ceaf9SAlex Deucher list_add(&sa_bo->olist, sa_manager->hole); 207d38ceaf9SAlex Deucher INIT_LIST_HEAD(&sa_bo->flist); 208d38ceaf9SAlex Deucher sa_manager->hole = &sa_bo->olist; 209d38ceaf9SAlex Deucher return true; 210d38ceaf9SAlex Deucher } 211d38ceaf9SAlex Deucher return false; 212d38ceaf9SAlex Deucher } 213d38ceaf9SAlex Deucher 214d38ceaf9SAlex Deucher /** 215d38ceaf9SAlex Deucher * amdgpu_sa_event - Check if we can stop waiting 216d38ceaf9SAlex Deucher * 217d38ceaf9SAlex Deucher * @sa_manager: pointer to the sa_manager 218d38ceaf9SAlex Deucher * @size: number of bytes we want to allocate 219d38ceaf9SAlex Deucher * @align: alignment we need to match 220d38ceaf9SAlex Deucher * 221d38ceaf9SAlex Deucher * Check if either there is a fence we can wait for or 222d38ceaf9SAlex Deucher * enough free memory to satisfy the allocation directly 223d38ceaf9SAlex Deucher */ 224d38ceaf9SAlex Deucher static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager, 225d38ceaf9SAlex Deucher unsigned size, unsigned align) 226d38ceaf9SAlex Deucher { 227d38ceaf9SAlex Deucher unsigned soffset, eoffset, wasted; 228d38ceaf9SAlex Deucher int i; 229d38ceaf9SAlex Deucher 2306ba60b89SChristian König for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) 2316ba60b89SChristian König if (!list_empty(&sa_manager->flist[i])) 232d38ceaf9SAlex Deucher return true; 233d38ceaf9SAlex Deucher 234d38ceaf9SAlex Deucher soffset = amdgpu_sa_bo_hole_soffset(sa_manager); 235d38ceaf9SAlex Deucher eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager); 236d38ceaf9SAlex Deucher wasted = (align - (soffset % align)) % align; 237d38ceaf9SAlex Deucher 238d38ceaf9SAlex Deucher if ((eoffset - soffset) >= (size + wasted)) { 239d38ceaf9SAlex Deucher return true; 240d38ceaf9SAlex Deucher } 241d38ceaf9SAlex Deucher 242d38ceaf9SAlex Deucher return false; 243d38ceaf9SAlex Deucher } 244d38ceaf9SAlex Deucher 245d38ceaf9SAlex Deucher static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager, 2464ce9891eSChunming Zhou struct fence **fences, 247d38ceaf9SAlex Deucher unsigned *tries) 248d38ceaf9SAlex Deucher { 249d38ceaf9SAlex Deucher struct amdgpu_sa_bo *best_bo = NULL; 250d38ceaf9SAlex Deucher unsigned i, soffset, best, tmp; 251d38ceaf9SAlex Deucher 252d38ceaf9SAlex Deucher /* if hole points to the end of the buffer */ 253d38ceaf9SAlex Deucher if (sa_manager->hole->next == &sa_manager->olist) { 254d38ceaf9SAlex Deucher /* try again with its beginning */ 255d38ceaf9SAlex Deucher sa_manager->hole = &sa_manager->olist; 256d38ceaf9SAlex Deucher return true; 257d38ceaf9SAlex Deucher } 258d38ceaf9SAlex Deucher 259d38ceaf9SAlex Deucher soffset = amdgpu_sa_bo_hole_soffset(sa_manager); 260d38ceaf9SAlex Deucher /* to handle wrap around we add sa_manager->size */ 261d38ceaf9SAlex Deucher best = sa_manager->size * 2; 262d38ceaf9SAlex Deucher /* go over all fence list and try to find the closest sa_bo 263d38ceaf9SAlex Deucher * of the current last 264d38ceaf9SAlex Deucher */ 2656ba60b89SChristian König for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) { 266d38ceaf9SAlex Deucher struct amdgpu_sa_bo *sa_bo; 267d38ceaf9SAlex Deucher 2686ba60b89SChristian König if (list_empty(&sa_manager->flist[i])) 269d38ceaf9SAlex Deucher continue; 270d38ceaf9SAlex Deucher 271d38ceaf9SAlex Deucher sa_bo = list_first_entry(&sa_manager->flist[i], 272d38ceaf9SAlex Deucher struct amdgpu_sa_bo, flist); 273d38ceaf9SAlex Deucher 2744ce9891eSChunming Zhou if (!fence_is_signaled(sa_bo->fence)) { 275d38ceaf9SAlex Deucher fences[i] = sa_bo->fence; 276d38ceaf9SAlex Deucher continue; 277d38ceaf9SAlex Deucher } 278d38ceaf9SAlex Deucher 279d38ceaf9SAlex Deucher /* limit the number of tries each ring gets */ 280d38ceaf9SAlex Deucher if (tries[i] > 2) { 281d38ceaf9SAlex Deucher continue; 282d38ceaf9SAlex Deucher } 283d38ceaf9SAlex Deucher 284d38ceaf9SAlex Deucher tmp = sa_bo->soffset; 285d38ceaf9SAlex Deucher if (tmp < soffset) { 286d38ceaf9SAlex Deucher /* wrap around, pretend it's after */ 287d38ceaf9SAlex Deucher tmp += sa_manager->size; 288d38ceaf9SAlex Deucher } 289d38ceaf9SAlex Deucher tmp -= soffset; 290d38ceaf9SAlex Deucher if (tmp < best) { 291d38ceaf9SAlex Deucher /* this sa bo is the closest one */ 292d38ceaf9SAlex Deucher best = tmp; 293d38ceaf9SAlex Deucher best_bo = sa_bo; 294d38ceaf9SAlex Deucher } 295d38ceaf9SAlex Deucher } 296d38ceaf9SAlex Deucher 297d38ceaf9SAlex Deucher if (best_bo) { 2986ba60b89SChristian König uint32_t idx = best_bo->fence->context; 2996ba60b89SChristian König 3006ba60b89SChristian König idx %= AMDGPU_SA_NUM_FENCE_LISTS; 3014ce9891eSChunming Zhou ++tries[idx]; 302d38ceaf9SAlex Deucher sa_manager->hole = best_bo->olist.prev; 303d38ceaf9SAlex Deucher 304d38ceaf9SAlex Deucher /* we knew that this one is signaled, 305d38ceaf9SAlex Deucher so it's save to remote it */ 306d38ceaf9SAlex Deucher amdgpu_sa_bo_remove_locked(best_bo); 307d38ceaf9SAlex Deucher return true; 308d38ceaf9SAlex Deucher } 309d38ceaf9SAlex Deucher return false; 310d38ceaf9SAlex Deucher } 311d38ceaf9SAlex Deucher 312bbf0b345SJunwei Zhang int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, 313d38ceaf9SAlex Deucher struct amdgpu_sa_bo **sa_bo, 314d38ceaf9SAlex Deucher unsigned size, unsigned align) 315d38ceaf9SAlex Deucher { 3166ba60b89SChristian König struct fence *fences[AMDGPU_SA_NUM_FENCE_LISTS]; 3176ba60b89SChristian König unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS]; 318ee327cafSChristian König unsigned count; 319d38ceaf9SAlex Deucher int i, r; 320a8f5bf0bSmonk.liu signed long t; 321d38ceaf9SAlex Deucher 322fe6b2ad9SChristian König if (WARN_ON_ONCE(align > sa_manager->align)) 323fe6b2ad9SChristian König return -EINVAL; 324fe6b2ad9SChristian König 325fe6b2ad9SChristian König if (WARN_ON_ONCE(size > sa_manager->size)) 326fe6b2ad9SChristian König return -EINVAL; 327d38ceaf9SAlex Deucher 328d38ceaf9SAlex Deucher *sa_bo = kmalloc(sizeof(struct amdgpu_sa_bo), GFP_KERNEL); 329d38ceaf9SAlex Deucher if ((*sa_bo) == NULL) { 330d38ceaf9SAlex Deucher return -ENOMEM; 331d38ceaf9SAlex Deucher } 332d38ceaf9SAlex Deucher (*sa_bo)->manager = sa_manager; 333d38ceaf9SAlex Deucher (*sa_bo)->fence = NULL; 334d38ceaf9SAlex Deucher INIT_LIST_HEAD(&(*sa_bo)->olist); 335d38ceaf9SAlex Deucher INIT_LIST_HEAD(&(*sa_bo)->flist); 336d38ceaf9SAlex Deucher 337d38ceaf9SAlex Deucher spin_lock(&sa_manager->wq.lock); 338d38ceaf9SAlex Deucher do { 3396ba60b89SChristian König for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) { 340d38ceaf9SAlex Deucher fences[i] = NULL; 341d38ceaf9SAlex Deucher tries[i] = 0; 342d38ceaf9SAlex Deucher } 343d38ceaf9SAlex Deucher 344d38ceaf9SAlex Deucher do { 345d38ceaf9SAlex Deucher amdgpu_sa_bo_try_free(sa_manager); 346d38ceaf9SAlex Deucher 347d38ceaf9SAlex Deucher if (amdgpu_sa_bo_try_alloc(sa_manager, *sa_bo, 348d38ceaf9SAlex Deucher size, align)) { 349d38ceaf9SAlex Deucher spin_unlock(&sa_manager->wq.lock); 350d38ceaf9SAlex Deucher return 0; 351d38ceaf9SAlex Deucher } 352d38ceaf9SAlex Deucher 353d38ceaf9SAlex Deucher /* see if we can skip over some allocations */ 354d38ceaf9SAlex Deucher } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries)); 355d38ceaf9SAlex Deucher 3566ba60b89SChristian König for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) 357ee327cafSChristian König if (fences[i]) 358ee327cafSChristian König fences[count++] = fences[i]; 359ee327cafSChristian König 360ee327cafSChristian König if (count) { 361d38ceaf9SAlex Deucher spin_unlock(&sa_manager->wq.lock); 362ee327cafSChristian König t = fence_wait_any_timeout(fences, count, false, 363ee327cafSChristian König MAX_SCHEDULE_TIMEOUT); 364a8f5bf0bSmonk.liu r = (t > 0) ? 0 : t; 365d38ceaf9SAlex Deucher spin_lock(&sa_manager->wq.lock); 366ee327cafSChristian König } else { 367d38ceaf9SAlex Deucher /* if we have nothing to wait for block */ 368d38ceaf9SAlex Deucher r = wait_event_interruptible_locked( 369d38ceaf9SAlex Deucher sa_manager->wq, 370d38ceaf9SAlex Deucher amdgpu_sa_event(sa_manager, size, align) 371d38ceaf9SAlex Deucher ); 372d38ceaf9SAlex Deucher } 373d38ceaf9SAlex Deucher 374d38ceaf9SAlex Deucher } while (!r); 375d38ceaf9SAlex Deucher 376d38ceaf9SAlex Deucher spin_unlock(&sa_manager->wq.lock); 377d38ceaf9SAlex Deucher kfree(*sa_bo); 378d38ceaf9SAlex Deucher *sa_bo = NULL; 379d38ceaf9SAlex Deucher return r; 380d38ceaf9SAlex Deucher } 381d38ceaf9SAlex Deucher 382d38ceaf9SAlex Deucher void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo, 3834ce9891eSChunming Zhou struct fence *fence) 384d38ceaf9SAlex Deucher { 385d38ceaf9SAlex Deucher struct amdgpu_sa_manager *sa_manager; 386d38ceaf9SAlex Deucher 387d38ceaf9SAlex Deucher if (sa_bo == NULL || *sa_bo == NULL) { 388d38ceaf9SAlex Deucher return; 389d38ceaf9SAlex Deucher } 390d38ceaf9SAlex Deucher 391d38ceaf9SAlex Deucher sa_manager = (*sa_bo)->manager; 392d38ceaf9SAlex Deucher spin_lock(&sa_manager->wq.lock); 3934ce9891eSChunming Zhou if (fence && !fence_is_signaled(fence)) { 3944ce9891eSChunming Zhou uint32_t idx; 3956ba60b89SChristian König 3964ce9891eSChunming Zhou (*sa_bo)->fence = fence_get(fence); 3976ba60b89SChristian König idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS; 3984ce9891eSChunming Zhou list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]); 399d38ceaf9SAlex Deucher } else { 400d38ceaf9SAlex Deucher amdgpu_sa_bo_remove_locked(*sa_bo); 401d38ceaf9SAlex Deucher } 402d38ceaf9SAlex Deucher wake_up_all_locked(&sa_manager->wq); 403d38ceaf9SAlex Deucher spin_unlock(&sa_manager->wq.lock); 404d38ceaf9SAlex Deucher *sa_bo = NULL; 405d38ceaf9SAlex Deucher } 406d38ceaf9SAlex Deucher 407d38ceaf9SAlex Deucher #if defined(CONFIG_DEBUG_FS) 4084f839a24SChristian König 409d38ceaf9SAlex Deucher void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, 410d38ceaf9SAlex Deucher struct seq_file *m) 411d38ceaf9SAlex Deucher { 412d38ceaf9SAlex Deucher struct amdgpu_sa_bo *i; 413d38ceaf9SAlex Deucher 414d38ceaf9SAlex Deucher spin_lock(&sa_manager->wq.lock); 415d38ceaf9SAlex Deucher list_for_each_entry(i, &sa_manager->olist, olist) { 416d38ceaf9SAlex Deucher uint64_t soffset = i->soffset + sa_manager->gpu_addr; 417d38ceaf9SAlex Deucher uint64_t eoffset = i->eoffset + sa_manager->gpu_addr; 418d38ceaf9SAlex Deucher if (&i->olist == sa_manager->hole) { 419d38ceaf9SAlex Deucher seq_printf(m, ">"); 420d38ceaf9SAlex Deucher } else { 421d38ceaf9SAlex Deucher seq_printf(m, " "); 422d38ceaf9SAlex Deucher } 423d38ceaf9SAlex Deucher seq_printf(m, "[0x%010llx 0x%010llx] size %8lld", 424d38ceaf9SAlex Deucher soffset, eoffset, eoffset - soffset); 4256ba60b89SChristian König 4264f839a24SChristian König if (i->fence) 4276ba60b89SChristian König seq_printf(m, " protected by 0x%08x on context %d", 4286ba60b89SChristian König i->fence->seqno, i->fence->context); 4296ba60b89SChristian König 430d38ceaf9SAlex Deucher seq_printf(m, "\n"); 431d38ceaf9SAlex Deucher } 432d38ceaf9SAlex Deucher spin_unlock(&sa_manager->wq.lock); 433d38ceaf9SAlex Deucher } 434d38ceaf9SAlex Deucher #endif 435