1 /* 2 * Copyright 2009 VMware, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Michel Dänzer 23 */ 24 #include <drm/drmP.h> 25 #include <drm/radeon_drm.h> 26 #include "radeon_reg.h" 27 #include "radeon.h" 28 29 30 /* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */ 31 void radeon_test_moves(struct radeon_device *rdev) 32 { 33 struct radeon_bo *vram_obj = NULL; 34 struct radeon_bo **gtt_obj = NULL; 35 struct radeon_fence *fence = NULL; 36 uint64_t gtt_addr, vram_addr; 37 unsigned i, n, size; 38 int r; 39 40 size = 1024 * 1024; 41 42 /* Number of tests = 43 * (Total GTT - IB pool - writeback page - ring buffers) / test size 44 */ 45 n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - rdev->cp.ring_size; 46 if (rdev->wb.wb_obj) 47 n -= RADEON_GPU_PAGE_SIZE; 48 if (rdev->ih.ring_obj) 49 n -= rdev->ih.ring_size; 50 n /= size; 51 52 gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); 53 if (!gtt_obj) { 54 DRM_ERROR("Failed to allocate %d pointers\n", n); 55 r = 1; 56 goto out_cleanup; 57 } 58 59 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 60 &vram_obj); 61 if (r) { 62 DRM_ERROR("Failed to create VRAM object\n"); 63 goto out_cleanup; 64 } 65 r = radeon_bo_reserve(vram_obj, false); 66 if (unlikely(r != 0)) 67 goto out_cleanup; 68 r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr); 69 if (r) { 70 DRM_ERROR("Failed to pin VRAM object\n"); 71 goto out_cleanup; 72 } 73 for (i = 0; i < n; i++) { 74 void *gtt_map, *vram_map; 75 void **gtt_start, **gtt_end; 76 void **vram_start, **vram_end; 77 78 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, 79 RADEON_GEM_DOMAIN_GTT, gtt_obj + i); 80 if (r) { 81 DRM_ERROR("Failed to create GTT object %d\n", i); 82 goto out_cleanup; 83 } 84 85 r = radeon_bo_reserve(gtt_obj[i], false); 86 if (unlikely(r != 0)) 87 goto out_cleanup; 88 r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, >t_addr); 89 if (r) { 90 DRM_ERROR("Failed to pin GTT object %d\n", i); 91 goto out_cleanup; 92 } 93 94 r = radeon_bo_kmap(gtt_obj[i], >t_map); 95 if (r) { 96 DRM_ERROR("Failed to map GTT object %d\n", i); 97 goto out_cleanup; 98 } 99 100 for (gtt_start = gtt_map, gtt_end = gtt_map + size; 101 gtt_start < gtt_end; 102 gtt_start++) 103 *gtt_start = gtt_start; 104 105 radeon_bo_kunmap(gtt_obj[i]); 106 107 r = radeon_fence_create(rdev, &fence); 108 if (r) { 109 DRM_ERROR("Failed to create GTT->VRAM fence %d\n", i); 110 goto out_cleanup; 111 } 112 113 r = radeon_copy(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, fence); 114 if (r) { 115 DRM_ERROR("Failed GTT->VRAM copy %d\n", i); 116 goto out_cleanup; 117 } 118 119 r = radeon_fence_wait(fence, false); 120 if (r) { 121 DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i); 122 goto out_cleanup; 123 } 124 125 radeon_fence_unref(&fence); 126 127 r = radeon_bo_kmap(vram_obj, &vram_map); 128 if (r) { 129 DRM_ERROR("Failed to map VRAM object after copy %d\n", i); 130 goto out_cleanup; 131 } 132 133 for (gtt_start = gtt_map, gtt_end = gtt_map + size, 134 vram_start = vram_map, vram_end = vram_map + size; 135 vram_start < vram_end; 136 gtt_start++, vram_start++) { 137 if (*vram_start != gtt_start) { 138 DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, " 139 "expected 0x%p (GTT/VRAM offset " 140 "0x%16llx/0x%16llx)\n", 141 i, *vram_start, gtt_start, 142 (unsigned long long) 143 (gtt_addr - rdev->mc.gtt_start + 144 (void*)gtt_start - gtt_map), 145 (unsigned long long) 146 (vram_addr - rdev->mc.vram_start + 147 (void*)gtt_start - gtt_map)); 148 radeon_bo_kunmap(vram_obj); 149 goto out_cleanup; 150 } 151 *vram_start = vram_start; 152 } 153 154 radeon_bo_kunmap(vram_obj); 155 156 r = radeon_fence_create(rdev, &fence); 157 if (r) { 158 DRM_ERROR("Failed to create VRAM->GTT fence %d\n", i); 159 goto out_cleanup; 160 } 161 162 r = radeon_copy(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, fence); 163 if (r) { 164 DRM_ERROR("Failed VRAM->GTT copy %d\n", i); 165 goto out_cleanup; 166 } 167 168 r = radeon_fence_wait(fence, false); 169 if (r) { 170 DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i); 171 goto out_cleanup; 172 } 173 174 radeon_fence_unref(&fence); 175 176 r = radeon_bo_kmap(gtt_obj[i], >t_map); 177 if (r) { 178 DRM_ERROR("Failed to map GTT object after copy %d\n", i); 179 goto out_cleanup; 180 } 181 182 for (gtt_start = gtt_map, gtt_end = gtt_map + size, 183 vram_start = vram_map, vram_end = vram_map + size; 184 gtt_start < gtt_end; 185 gtt_start++, vram_start++) { 186 if (*gtt_start != vram_start) { 187 DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, " 188 "expected 0x%p (VRAM/GTT offset " 189 "0x%16llx/0x%16llx)\n", 190 i, *gtt_start, vram_start, 191 (unsigned long long) 192 (vram_addr - rdev->mc.vram_start + 193 (void*)vram_start - vram_map), 194 (unsigned long long) 195 (gtt_addr - rdev->mc.gtt_start + 196 (void*)vram_start - vram_map)); 197 radeon_bo_kunmap(gtt_obj[i]); 198 goto out_cleanup; 199 } 200 } 201 202 radeon_bo_kunmap(gtt_obj[i]); 203 204 DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n", 205 gtt_addr - rdev->mc.gtt_start); 206 } 207 208 out_cleanup: 209 if (vram_obj) { 210 if (radeon_bo_is_reserved(vram_obj)) { 211 radeon_bo_unpin(vram_obj); 212 radeon_bo_unreserve(vram_obj); 213 } 214 radeon_bo_unref(&vram_obj); 215 } 216 if (gtt_obj) { 217 for (i = 0; i < n; i++) { 218 if (gtt_obj[i]) { 219 if (radeon_bo_is_reserved(gtt_obj[i])) { 220 radeon_bo_unpin(gtt_obj[i]); 221 radeon_bo_unreserve(gtt_obj[i]); 222 } 223 radeon_bo_unref(>t_obj[i]); 224 } 225 } 226 kfree(gtt_obj); 227 } 228 if (fence) { 229 radeon_fence_unref(&fence); 230 } 231 if (r) { 232 printk(KERN_WARNING "Error while testing BO move.\n"); 233 } 234 } 235