1d38ceaf9SAlex Deucher /* 2d38ceaf9SAlex Deucher * Copyright 2011 Red Hat Inc. 3d38ceaf9SAlex Deucher * All Rights Reserved. 4d38ceaf9SAlex Deucher * 5d38ceaf9SAlex Deucher * Permission is hereby granted, free of charge, to any person obtaining a 6d38ceaf9SAlex Deucher * copy of this software and associated documentation files (the 7d38ceaf9SAlex Deucher * "Software"), to deal in the Software without restriction, including 8d38ceaf9SAlex Deucher * without limitation the rights to use, copy, modify, merge, publish, 9d38ceaf9SAlex Deucher * distribute, sub license, and/or sell copies of the Software, and to 10d38ceaf9SAlex Deucher * permit persons to whom the Software is furnished to do so, subject to 11d38ceaf9SAlex Deucher * the following conditions: 12d38ceaf9SAlex Deucher * 13d38ceaf9SAlex Deucher * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14d38ceaf9SAlex Deucher * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15d38ceaf9SAlex Deucher * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16d38ceaf9SAlex Deucher * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17d38ceaf9SAlex Deucher * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18d38ceaf9SAlex Deucher * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19d38ceaf9SAlex Deucher * USE OR OTHER DEALINGS IN THE SOFTWARE. 20d38ceaf9SAlex Deucher * 21d38ceaf9SAlex Deucher * The above copyright notice and this permission notice (including the 22d38ceaf9SAlex Deucher * next paragraph) shall be included in all copies or substantial portions 23d38ceaf9SAlex Deucher * of the Software. 24d38ceaf9SAlex Deucher * 25d38ceaf9SAlex Deucher */ 26d38ceaf9SAlex Deucher /* 27d38ceaf9SAlex Deucher * Authors: 28d38ceaf9SAlex Deucher * Jerome Glisse <glisse@freedesktop.org> 29d38ceaf9SAlex Deucher */ 30d38ceaf9SAlex Deucher /* Algorithm: 31d38ceaf9SAlex Deucher * 32d38ceaf9SAlex Deucher * We store the last allocated bo in "hole", we always try to allocate 33d38ceaf9SAlex Deucher * after the last allocated bo. Principle is that in a linear GPU ring 34d38ceaf9SAlex Deucher * progression was is after last is the oldest bo we allocated and thus 35d38ceaf9SAlex Deucher * the first one that should no longer be in use by the GPU. 36d38ceaf9SAlex Deucher * 37d38ceaf9SAlex Deucher * If it's not the case we skip over the bo after last to the closest 38d38ceaf9SAlex Deucher * done bo if such one exist. If none exist and we are not asked to 39d38ceaf9SAlex Deucher * block we report failure to allocate. 40d38ceaf9SAlex Deucher * 41d38ceaf9SAlex Deucher * If we are asked to block we wait on all the oldest fence of all 42d38ceaf9SAlex Deucher * rings. We just wait for any of those fence to complete. 43d38ceaf9SAlex Deucher */ 44d38ceaf9SAlex Deucher #include <drm/drmP.h> 45d38ceaf9SAlex Deucher #include "amdgpu.h" 46d38ceaf9SAlex Deucher 47d38ceaf9SAlex Deucher static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo); 48d38ceaf9SAlex Deucher static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager); 49d38ceaf9SAlex Deucher 50d38ceaf9SAlex Deucher int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev, 51d38ceaf9SAlex Deucher struct amdgpu_sa_manager *sa_manager, 52d38ceaf9SAlex Deucher unsigned size, u32 align, u32 domain) 53d38ceaf9SAlex Deucher { 54d38ceaf9SAlex Deucher int i, r; 55d38ceaf9SAlex Deucher 56d38ceaf9SAlex Deucher init_waitqueue_head(&sa_manager->wq); 57d38ceaf9SAlex Deucher sa_manager->bo = NULL; 58d38ceaf9SAlex Deucher sa_manager->size = size; 59d38ceaf9SAlex Deucher sa_manager->domain = domain; 60d38ceaf9SAlex Deucher sa_manager->align = align; 61d38ceaf9SAlex Deucher sa_manager->hole = &sa_manager->olist; 62d38ceaf9SAlex Deucher INIT_LIST_HEAD(&sa_manager->olist); 636ba60b89SChristian König for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) 64d38ceaf9SAlex Deucher INIT_LIST_HEAD(&sa_manager->flist[i]); 65d38ceaf9SAlex Deucher 6672d7668bSChristian König r = amdgpu_bo_create(adev, size, align, true, domain, 6772d7668bSChristian König 0, NULL, NULL, &sa_manager->bo); 68d38ceaf9SAlex Deucher if (r) { 69d38ceaf9SAlex Deucher dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r); 70d38ceaf9SAlex Deucher return r; 71d38ceaf9SAlex Deucher } 72d38ceaf9SAlex Deucher 73d38ceaf9SAlex Deucher return r; 74d38ceaf9SAlex Deucher } 75d38ceaf9SAlex Deucher 76d38ceaf9SAlex Deucher void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev, 77d38ceaf9SAlex Deucher struct amdgpu_sa_manager *sa_manager) 78d38ceaf9SAlex Deucher { 79d38ceaf9SAlex Deucher struct amdgpu_sa_bo *sa_bo, *tmp; 80d38ceaf9SAlex Deucher 81d38ceaf9SAlex Deucher if (!list_empty(&sa_manager->olist)) { 82d38ceaf9SAlex Deucher sa_manager->hole = &sa_manager->olist, 83d38ceaf9SAlex Deucher amdgpu_sa_bo_try_free(sa_manager); 84d38ceaf9SAlex Deucher if (!list_empty(&sa_manager->olist)) { 85d38ceaf9SAlex Deucher dev_err(adev->dev, "sa_manager is not empty, clearing anyway\n"); 86d38ceaf9SAlex Deucher } 87d38ceaf9SAlex Deucher } 88d38ceaf9SAlex Deucher list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) { 89d38ceaf9SAlex Deucher amdgpu_sa_bo_remove_locked(sa_bo); 90d38ceaf9SAlex Deucher } 91d38ceaf9SAlex Deucher amdgpu_bo_unref(&sa_manager->bo); 92d38ceaf9SAlex Deucher sa_manager->size = 0; 93d38ceaf9SAlex Deucher } 94d38ceaf9SAlex Deucher 95d38ceaf9SAlex Deucher int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev, 96d38ceaf9SAlex Deucher struct amdgpu_sa_manager *sa_manager) 97d38ceaf9SAlex Deucher { 98d38ceaf9SAlex Deucher int r; 99d38ceaf9SAlex Deucher 100d38ceaf9SAlex Deucher if (sa_manager->bo == NULL) { 101d38ceaf9SAlex Deucher dev_err(adev->dev, "no bo for sa manager\n"); 102d38ceaf9SAlex Deucher return -EINVAL; 103d38ceaf9SAlex Deucher } 104d38ceaf9SAlex Deucher 105d38ceaf9SAlex Deucher /* map the buffer */ 106d38ceaf9SAlex Deucher r = amdgpu_bo_reserve(sa_manager->bo, false); 107d38ceaf9SAlex Deucher if (r) { 108d38ceaf9SAlex Deucher dev_err(adev->dev, "(%d) failed to reserve manager bo\n", r); 109d38ceaf9SAlex Deucher return r; 110d38ceaf9SAlex Deucher } 111d38ceaf9SAlex Deucher r = amdgpu_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr); 112d38ceaf9SAlex Deucher if (r) { 113d38ceaf9SAlex Deucher amdgpu_bo_unreserve(sa_manager->bo); 114d38ceaf9SAlex Deucher dev_err(adev->dev, "(%d) failed to pin manager bo\n", r); 115d38ceaf9SAlex Deucher return r; 116d38ceaf9SAlex Deucher } 117d38ceaf9SAlex Deucher r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr); 1189a005befSMonk Liu memset(sa_manager->cpu_ptr, 0, sa_manager->size); 119d38ceaf9SAlex Deucher amdgpu_bo_unreserve(sa_manager->bo); 120d38ceaf9SAlex Deucher return r; 121d38ceaf9SAlex Deucher } 122d38ceaf9SAlex Deucher 123d38ceaf9SAlex Deucher int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev, 124d38ceaf9SAlex Deucher struct amdgpu_sa_manager *sa_manager) 125d38ceaf9SAlex Deucher { 126d38ceaf9SAlex Deucher int r; 127d38ceaf9SAlex Deucher 128d38ceaf9SAlex Deucher if (sa_manager->bo == NULL) { 129d38ceaf9SAlex Deucher dev_err(adev->dev, "no bo for sa manager\n"); 130d38ceaf9SAlex Deucher return -EINVAL; 131d38ceaf9SAlex Deucher } 132d38ceaf9SAlex Deucher 133d38ceaf9SAlex Deucher r = amdgpu_bo_reserve(sa_manager->bo, false); 134d38ceaf9SAlex Deucher if (!r) { 135d38ceaf9SAlex Deucher amdgpu_bo_kunmap(sa_manager->bo); 136d38ceaf9SAlex Deucher amdgpu_bo_unpin(sa_manager->bo); 137d38ceaf9SAlex Deucher amdgpu_bo_unreserve(sa_manager->bo); 138d38ceaf9SAlex Deucher } 139d38ceaf9SAlex Deucher return r; 140d38ceaf9SAlex Deucher } 141d38ceaf9SAlex Deucher 142d38ceaf9SAlex Deucher static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo) 143d38ceaf9SAlex Deucher { 144d38ceaf9SAlex Deucher struct amdgpu_sa_manager *sa_manager = sa_bo->manager; 145d38ceaf9SAlex Deucher if (sa_manager->hole == &sa_bo->olist) { 146d38ceaf9SAlex Deucher sa_manager->hole = sa_bo->olist.prev; 147d38ceaf9SAlex Deucher } 148d38ceaf9SAlex Deucher list_del_init(&sa_bo->olist); 149d38ceaf9SAlex Deucher list_del_init(&sa_bo->flist); 150f54d1867SChris Wilson dma_fence_put(sa_bo->fence); 151d38ceaf9SAlex Deucher kfree(sa_bo); 152d38ceaf9SAlex Deucher } 153d38ceaf9SAlex Deucher 154d38ceaf9SAlex Deucher static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager) 155d38ceaf9SAlex Deucher { 156d38ceaf9SAlex Deucher struct amdgpu_sa_bo *sa_bo, *tmp; 157d38ceaf9SAlex Deucher 158d38ceaf9SAlex Deucher if (sa_manager->hole->next == &sa_manager->olist) 159d38ceaf9SAlex Deucher return; 160d38ceaf9SAlex Deucher 161d38ceaf9SAlex Deucher sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist); 162d38ceaf9SAlex Deucher list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) { 1633cdb8119SChristian König if (sa_bo->fence == NULL || 164f54d1867SChris Wilson !dma_fence_is_signaled(sa_bo->fence)) { 165d38ceaf9SAlex Deucher return; 166d38ceaf9SAlex Deucher } 167d38ceaf9SAlex Deucher amdgpu_sa_bo_remove_locked(sa_bo); 168d38ceaf9SAlex Deucher } 169d38ceaf9SAlex Deucher } 170d38ceaf9SAlex Deucher 171d38ceaf9SAlex Deucher static inline unsigned amdgpu_sa_bo_hole_soffset(struct amdgpu_sa_manager *sa_manager) 172d38ceaf9SAlex Deucher { 173d38ceaf9SAlex Deucher struct list_head *hole = sa_manager->hole; 174d38ceaf9SAlex Deucher 175d38ceaf9SAlex Deucher if (hole != &sa_manager->olist) { 176d38ceaf9SAlex Deucher return list_entry(hole, struct amdgpu_sa_bo, olist)->eoffset; 177d38ceaf9SAlex Deucher } 178d38ceaf9SAlex Deucher return 0; 179d38ceaf9SAlex Deucher } 180d38ceaf9SAlex Deucher 181d38ceaf9SAlex Deucher static inline unsigned amdgpu_sa_bo_hole_eoffset(struct amdgpu_sa_manager *sa_manager) 182d38ceaf9SAlex Deucher { 183d38ceaf9SAlex Deucher struct list_head *hole = sa_manager->hole; 184d38ceaf9SAlex Deucher 185d38ceaf9SAlex Deucher if (hole->next != &sa_manager->olist) { 186d38ceaf9SAlex Deucher return list_entry(hole->next, struct amdgpu_sa_bo, olist)->soffset; 187d38ceaf9SAlex Deucher } 188d38ceaf9SAlex Deucher return sa_manager->size; 189d38ceaf9SAlex Deucher } 190d38ceaf9SAlex Deucher 191d38ceaf9SAlex Deucher static bool amdgpu_sa_bo_try_alloc(struct amdgpu_sa_manager *sa_manager, 192d38ceaf9SAlex Deucher struct amdgpu_sa_bo *sa_bo, 193d38ceaf9SAlex Deucher unsigned size, unsigned align) 194d38ceaf9SAlex Deucher { 195d38ceaf9SAlex Deucher unsigned soffset, eoffset, wasted; 196d38ceaf9SAlex Deucher 197d38ceaf9SAlex Deucher soffset = amdgpu_sa_bo_hole_soffset(sa_manager); 198d38ceaf9SAlex Deucher eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager); 199d38ceaf9SAlex Deucher wasted = (align - (soffset % align)) % align; 200d38ceaf9SAlex Deucher 201d38ceaf9SAlex Deucher if ((eoffset - soffset) >= (size + wasted)) { 202d38ceaf9SAlex Deucher soffset += wasted; 203d38ceaf9SAlex Deucher 204d38ceaf9SAlex Deucher sa_bo->manager = sa_manager; 205d38ceaf9SAlex Deucher sa_bo->soffset = soffset; 206d38ceaf9SAlex Deucher sa_bo->eoffset = soffset + size; 207d38ceaf9SAlex Deucher list_add(&sa_bo->olist, sa_manager->hole); 208d38ceaf9SAlex Deucher INIT_LIST_HEAD(&sa_bo->flist); 209d38ceaf9SAlex Deucher sa_manager->hole = &sa_bo->olist; 210d38ceaf9SAlex Deucher return true; 211d38ceaf9SAlex Deucher } 212d38ceaf9SAlex Deucher return false; 213d38ceaf9SAlex Deucher } 214d38ceaf9SAlex Deucher 215d38ceaf9SAlex Deucher /** 216d38ceaf9SAlex Deucher * amdgpu_sa_event - Check if we can stop waiting 217d38ceaf9SAlex Deucher * 218d38ceaf9SAlex Deucher * @sa_manager: pointer to the sa_manager 219d38ceaf9SAlex Deucher * @size: number of bytes we want to allocate 220d38ceaf9SAlex Deucher * @align: alignment we need to match 221d38ceaf9SAlex Deucher * 222d38ceaf9SAlex Deucher * Check if either there is a fence we can wait for or 223d38ceaf9SAlex Deucher * enough free memory to satisfy the allocation directly 224d38ceaf9SAlex Deucher */ 225d38ceaf9SAlex Deucher static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager, 226d38ceaf9SAlex Deucher unsigned size, unsigned align) 227d38ceaf9SAlex Deucher { 228d38ceaf9SAlex Deucher unsigned soffset, eoffset, wasted; 229d38ceaf9SAlex Deucher int i; 230d38ceaf9SAlex Deucher 2316ba60b89SChristian König for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) 2326ba60b89SChristian König if (!list_empty(&sa_manager->flist[i])) 233d38ceaf9SAlex Deucher return true; 234d38ceaf9SAlex Deucher 235d38ceaf9SAlex Deucher soffset = amdgpu_sa_bo_hole_soffset(sa_manager); 236d38ceaf9SAlex Deucher eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager); 237d38ceaf9SAlex Deucher wasted = (align - (soffset % align)) % align; 238d38ceaf9SAlex Deucher 239d38ceaf9SAlex Deucher if ((eoffset - soffset) >= (size + wasted)) { 240d38ceaf9SAlex Deucher return true; 241d38ceaf9SAlex Deucher } 242d38ceaf9SAlex Deucher 243d38ceaf9SAlex Deucher return false; 244d38ceaf9SAlex Deucher } 245d38ceaf9SAlex Deucher 246d38ceaf9SAlex Deucher static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager, 247f54d1867SChris Wilson struct dma_fence **fences, 248d38ceaf9SAlex Deucher unsigned *tries) 249d38ceaf9SAlex Deucher { 250d38ceaf9SAlex Deucher struct amdgpu_sa_bo *best_bo = NULL; 251d38ceaf9SAlex Deucher unsigned i, soffset, best, tmp; 252d38ceaf9SAlex Deucher 253d38ceaf9SAlex Deucher /* if hole points to the end of the buffer */ 254d38ceaf9SAlex Deucher if (sa_manager->hole->next == &sa_manager->olist) { 255d38ceaf9SAlex Deucher /* try again with its beginning */ 256d38ceaf9SAlex Deucher sa_manager->hole = &sa_manager->olist; 257d38ceaf9SAlex Deucher return true; 258d38ceaf9SAlex Deucher } 259d38ceaf9SAlex Deucher 260d38ceaf9SAlex Deucher soffset = amdgpu_sa_bo_hole_soffset(sa_manager); 261d38ceaf9SAlex Deucher /* to handle wrap around we add sa_manager->size */ 262d38ceaf9SAlex Deucher best = sa_manager->size * 2; 263d38ceaf9SAlex Deucher /* go over all fence list and try to find the closest sa_bo 264d38ceaf9SAlex Deucher * of the current last 265d38ceaf9SAlex Deucher */ 2666ba60b89SChristian König for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) { 267d38ceaf9SAlex Deucher struct amdgpu_sa_bo *sa_bo; 268d38ceaf9SAlex Deucher 2696ba60b89SChristian König if (list_empty(&sa_manager->flist[i])) 270d38ceaf9SAlex Deucher continue; 271d38ceaf9SAlex Deucher 272d38ceaf9SAlex Deucher sa_bo = list_first_entry(&sa_manager->flist[i], 273d38ceaf9SAlex Deucher struct amdgpu_sa_bo, flist); 274d38ceaf9SAlex Deucher 275f54d1867SChris Wilson if (!dma_fence_is_signaled(sa_bo->fence)) { 276d38ceaf9SAlex Deucher fences[i] = sa_bo->fence; 277d38ceaf9SAlex Deucher continue; 278d38ceaf9SAlex Deucher } 279d38ceaf9SAlex Deucher 280d38ceaf9SAlex Deucher /* limit the number of tries each ring gets */ 281d38ceaf9SAlex Deucher if (tries[i] > 2) { 282d38ceaf9SAlex Deucher continue; 283d38ceaf9SAlex Deucher } 284d38ceaf9SAlex Deucher 285d38ceaf9SAlex Deucher tmp = sa_bo->soffset; 286d38ceaf9SAlex Deucher if (tmp < soffset) { 287d38ceaf9SAlex Deucher /* wrap around, pretend it's after */ 288d38ceaf9SAlex Deucher tmp += sa_manager->size; 289d38ceaf9SAlex Deucher } 290d38ceaf9SAlex Deucher tmp -= soffset; 291d38ceaf9SAlex Deucher if (tmp < best) { 292d38ceaf9SAlex Deucher /* this sa bo is the closest one */ 293d38ceaf9SAlex Deucher best = tmp; 294d38ceaf9SAlex Deucher best_bo = sa_bo; 295d38ceaf9SAlex Deucher } 296d38ceaf9SAlex Deucher } 297d38ceaf9SAlex Deucher 298d38ceaf9SAlex Deucher if (best_bo) { 2996ba60b89SChristian König uint32_t idx = best_bo->fence->context; 3006ba60b89SChristian König 3016ba60b89SChristian König idx %= AMDGPU_SA_NUM_FENCE_LISTS; 3024ce9891eSChunming Zhou ++tries[idx]; 303d38ceaf9SAlex Deucher sa_manager->hole = best_bo->olist.prev; 304d38ceaf9SAlex Deucher 305d38ceaf9SAlex Deucher /* we knew that this one is signaled, 306d38ceaf9SAlex Deucher so it's save to remote it */ 307d38ceaf9SAlex Deucher amdgpu_sa_bo_remove_locked(best_bo); 308d38ceaf9SAlex Deucher return true; 309d38ceaf9SAlex Deucher } 310d38ceaf9SAlex Deucher return false; 311d38ceaf9SAlex Deucher } 312d38ceaf9SAlex Deucher 313bbf0b345SJunwei Zhang int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, 314d38ceaf9SAlex Deucher struct amdgpu_sa_bo **sa_bo, 315d38ceaf9SAlex Deucher unsigned size, unsigned align) 316d38ceaf9SAlex Deucher { 317f54d1867SChris Wilson struct dma_fence *fences[AMDGPU_SA_NUM_FENCE_LISTS]; 3186ba60b89SChristian König unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS]; 319ee327cafSChristian König unsigned count; 320d38ceaf9SAlex Deucher int i, r; 321a8f5bf0bSmonk.liu signed long t; 322d38ceaf9SAlex Deucher 323fe6b2ad9SChristian König if (WARN_ON_ONCE(align > sa_manager->align)) 324fe6b2ad9SChristian König return -EINVAL; 325fe6b2ad9SChristian König 326fe6b2ad9SChristian König if (WARN_ON_ONCE(size > sa_manager->size)) 327fe6b2ad9SChristian König return -EINVAL; 328d38ceaf9SAlex Deucher 329d38ceaf9SAlex Deucher *sa_bo = kmalloc(sizeof(struct amdgpu_sa_bo), GFP_KERNEL); 330d38ceaf9SAlex Deucher if ((*sa_bo) == NULL) { 331d38ceaf9SAlex Deucher return -ENOMEM; 332d38ceaf9SAlex Deucher } 333d38ceaf9SAlex Deucher (*sa_bo)->manager = sa_manager; 334d38ceaf9SAlex Deucher (*sa_bo)->fence = NULL; 335d38ceaf9SAlex Deucher INIT_LIST_HEAD(&(*sa_bo)->olist); 336d38ceaf9SAlex Deucher INIT_LIST_HEAD(&(*sa_bo)->flist); 337d38ceaf9SAlex Deucher 338d38ceaf9SAlex Deucher spin_lock(&sa_manager->wq.lock); 339d38ceaf9SAlex Deucher do { 3406ba60b89SChristian König for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) { 341d38ceaf9SAlex Deucher fences[i] = NULL; 342d38ceaf9SAlex Deucher tries[i] = 0; 343d38ceaf9SAlex Deucher } 344d38ceaf9SAlex Deucher 345d38ceaf9SAlex Deucher do { 346d38ceaf9SAlex Deucher amdgpu_sa_bo_try_free(sa_manager); 347d38ceaf9SAlex Deucher 348d38ceaf9SAlex Deucher if (amdgpu_sa_bo_try_alloc(sa_manager, *sa_bo, 349d38ceaf9SAlex Deucher size, align)) { 350d38ceaf9SAlex Deucher spin_unlock(&sa_manager->wq.lock); 351d38ceaf9SAlex Deucher return 0; 352d38ceaf9SAlex Deucher } 353d38ceaf9SAlex Deucher 354d38ceaf9SAlex Deucher /* see if we can skip over some allocations */ 355d38ceaf9SAlex Deucher } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries)); 356d38ceaf9SAlex Deucher 3576ba60b89SChristian König for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) 358ee327cafSChristian König if (fences[i]) 359f54d1867SChris Wilson fences[count++] = dma_fence_get(fences[i]); 360ee327cafSChristian König 361ee327cafSChristian König if (count) { 362d38ceaf9SAlex Deucher spin_unlock(&sa_manager->wq.lock); 363f54d1867SChris Wilson t = dma_fence_wait_any_timeout(fences, count, false, 364ee327cafSChristian König MAX_SCHEDULE_TIMEOUT); 365a8d81b36SNicolai Hähnle for (i = 0; i < count; ++i) 366f54d1867SChris Wilson dma_fence_put(fences[i]); 367a8d81b36SNicolai Hähnle 368a8f5bf0bSmonk.liu r = (t > 0) ? 0 : t; 369d38ceaf9SAlex Deucher spin_lock(&sa_manager->wq.lock); 370ee327cafSChristian König } else { 371d38ceaf9SAlex Deucher /* if we have nothing to wait for block */ 372d38ceaf9SAlex Deucher r = wait_event_interruptible_locked( 373d38ceaf9SAlex Deucher sa_manager->wq, 374d38ceaf9SAlex Deucher amdgpu_sa_event(sa_manager, size, align) 375d38ceaf9SAlex Deucher ); 376d38ceaf9SAlex Deucher } 377d38ceaf9SAlex Deucher 378d38ceaf9SAlex Deucher } while (!r); 379d38ceaf9SAlex Deucher 380d38ceaf9SAlex Deucher spin_unlock(&sa_manager->wq.lock); 381d38ceaf9SAlex Deucher kfree(*sa_bo); 382d38ceaf9SAlex Deucher *sa_bo = NULL; 383d38ceaf9SAlex Deucher return r; 384d38ceaf9SAlex Deucher } 385d38ceaf9SAlex Deucher 386d38ceaf9SAlex Deucher void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo, 387f54d1867SChris Wilson struct dma_fence *fence) 388d38ceaf9SAlex Deucher { 389d38ceaf9SAlex Deucher struct amdgpu_sa_manager *sa_manager; 390d38ceaf9SAlex Deucher 391d38ceaf9SAlex Deucher if (sa_bo == NULL || *sa_bo == NULL) { 392d38ceaf9SAlex Deucher return; 393d38ceaf9SAlex Deucher } 394d38ceaf9SAlex Deucher 395d38ceaf9SAlex Deucher sa_manager = (*sa_bo)->manager; 396d38ceaf9SAlex Deucher spin_lock(&sa_manager->wq.lock); 397f54d1867SChris Wilson if (fence && !dma_fence_is_signaled(fence)) { 3984ce9891eSChunming Zhou uint32_t idx; 3996ba60b89SChristian König 400f54d1867SChris Wilson (*sa_bo)->fence = dma_fence_get(fence); 4016ba60b89SChristian König idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS; 4024ce9891eSChunming Zhou list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]); 403d38ceaf9SAlex Deucher } else { 404d38ceaf9SAlex Deucher amdgpu_sa_bo_remove_locked(*sa_bo); 405d38ceaf9SAlex Deucher } 406d38ceaf9SAlex Deucher wake_up_all_locked(&sa_manager->wq); 407d38ceaf9SAlex Deucher spin_unlock(&sa_manager->wq.lock); 408d38ceaf9SAlex Deucher *sa_bo = NULL; 409d38ceaf9SAlex Deucher } 410d38ceaf9SAlex Deucher 411d38ceaf9SAlex Deucher #if defined(CONFIG_DEBUG_FS) 4124f839a24SChristian König 413d38ceaf9SAlex Deucher void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, 414d38ceaf9SAlex Deucher struct seq_file *m) 415d38ceaf9SAlex Deucher { 416d38ceaf9SAlex Deucher struct amdgpu_sa_bo *i; 417d38ceaf9SAlex Deucher 418d38ceaf9SAlex Deucher spin_lock(&sa_manager->wq.lock); 419d38ceaf9SAlex Deucher list_for_each_entry(i, &sa_manager->olist, olist) { 420d38ceaf9SAlex Deucher uint64_t soffset = i->soffset + sa_manager->gpu_addr; 421d38ceaf9SAlex Deucher uint64_t eoffset = i->eoffset + sa_manager->gpu_addr; 422d38ceaf9SAlex Deucher if (&i->olist == sa_manager->hole) { 423d38ceaf9SAlex Deucher seq_printf(m, ">"); 424d38ceaf9SAlex Deucher } else { 425d38ceaf9SAlex Deucher seq_printf(m, " "); 426d38ceaf9SAlex Deucher } 427d38ceaf9SAlex Deucher seq_printf(m, "[0x%010llx 0x%010llx] size %8lld", 428d38ceaf9SAlex Deucher soffset, eoffset, eoffset - soffset); 4296ba60b89SChristian König 4304f839a24SChristian König if (i->fence) 43176bf0db5SChristian König seq_printf(m, " protected by 0x%08x on context %llu", 4326ba60b89SChristian König i->fence->seqno, i->fence->context); 4336ba60b89SChristian König 434d38ceaf9SAlex Deucher seq_printf(m, "\n"); 435d38ceaf9SAlex Deucher } 436d38ceaf9SAlex Deucher spin_unlock(&sa_manager->wq.lock); 437d38ceaf9SAlex Deucher } 438d38ceaf9SAlex Deucher #endif 439