1 /*
2  * Copyright 2009 Jerome Glisse.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Jerome Glisse
23  */
24 
25 #include <drm/amdgpu_drm.h>
26 #include "amdgpu.h"
27 
28 #define AMDGPU_BENCHMARK_ITERATIONS 1024
29 #define AMDGPU_BENCHMARK_COMMON_MODES_N 17
30 
31 static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
32 				    uint64_t saddr, uint64_t daddr, int n)
33 {
34 	unsigned long start_jiffies;
35 	unsigned long end_jiffies;
36 	struct dma_fence *fence;
37 	int i, r;
38 
39 	start_jiffies = jiffies;
40 	for (i = 0; i < n; i++) {
41 		struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
42 		r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence,
43 				       false, false);
44 		if (r)
45 			goto exit_do_move;
46 		r = dma_fence_wait(fence, false);
47 		dma_fence_put(fence);
48 		if (r)
49 			goto exit_do_move;
50 	}
51 	end_jiffies = jiffies;
52 	r = jiffies_to_msecs(end_jiffies - start_jiffies);
53 
54 exit_do_move:
55 	return r;
56 }
57 
58 
59 static void amdgpu_benchmark_log_results(int n, unsigned size,
60 					 unsigned int time,
61 					 unsigned sdomain, unsigned ddomain,
62 					 char *kind)
63 {
64 	unsigned int throughput = (n * (size >> 10)) / time;
65 	DRM_INFO("amdgpu: %s %u bo moves of %u kB from"
66 		 " %d to %d in %u ms, throughput: %u Mb/s or %u MB/s\n",
67 		 kind, n, size >> 10, sdomain, ddomain, time,
68 		 throughput * 8, throughput);
69 }
70 
71 static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
72 				  unsigned sdomain, unsigned ddomain)
73 {
74 	struct amdgpu_bo *dobj = NULL;
75 	struct amdgpu_bo *sobj = NULL;
76 	struct amdgpu_bo_param bp;
77 	uint64_t saddr, daddr;
78 	int r, n;
79 	int time;
80 
81 	memset(&bp, 0, sizeof(bp));
82 	bp.size = size;
83 	bp.byte_align = PAGE_SIZE;
84 	bp.domain = sdomain;
85 	bp.flags = 0;
86 	bp.type = ttm_bo_type_kernel;
87 	bp.resv = NULL;
88 	n = AMDGPU_BENCHMARK_ITERATIONS;
89 	r = amdgpu_bo_create(adev, &bp, &sobj);
90 	if (r) {
91 		goto out_cleanup;
92 	}
93 	r = amdgpu_bo_reserve(sobj, false);
94 	if (unlikely(r != 0))
95 		goto out_cleanup;
96 	r = amdgpu_bo_pin(sobj, sdomain);
97 	if (r) {
98 		amdgpu_bo_unreserve(sobj);
99 		goto out_cleanup;
100 	}
101 	r = amdgpu_ttm_alloc_gart(&sobj->tbo);
102 	amdgpu_bo_unreserve(sobj);
103 	if (r) {
104 		goto out_cleanup;
105 	}
106 	saddr = amdgpu_bo_gpu_offset(sobj);
107 	bp.domain = ddomain;
108 	r = amdgpu_bo_create(adev, &bp, &dobj);
109 	if (r) {
110 		goto out_cleanup;
111 	}
112 	r = amdgpu_bo_reserve(dobj, false);
113 	if (unlikely(r != 0))
114 		goto out_cleanup;
115 	r = amdgpu_bo_pin(dobj, ddomain);
116 	if (r) {
117 		amdgpu_bo_unreserve(sobj);
118 		goto out_cleanup;
119 	}
120 	r = amdgpu_ttm_alloc_gart(&dobj->tbo);
121 	amdgpu_bo_unreserve(dobj);
122 	if (r) {
123 		goto out_cleanup;
124 	}
125 	daddr = amdgpu_bo_gpu_offset(dobj);
126 
127 	if (adev->mman.buffer_funcs) {
128 		time = amdgpu_benchmark_do_move(adev, size, saddr, daddr, n);
129 		if (time < 0)
130 			goto out_cleanup;
131 		if (time > 0)
132 			amdgpu_benchmark_log_results(n, size, time,
133 						     sdomain, ddomain, "dma");
134 	}
135 
136 out_cleanup:
137 	/* Check error value now. The value can be overwritten when clean up.*/
138 	if (r) {
139 		DRM_ERROR("Error while benchmarking BO move.\n");
140 	}
141 
142 	if (sobj) {
143 		r = amdgpu_bo_reserve(sobj, true);
144 		if (likely(r == 0)) {
145 			amdgpu_bo_unpin(sobj);
146 			amdgpu_bo_unreserve(sobj);
147 		}
148 		amdgpu_bo_unref(&sobj);
149 	}
150 	if (dobj) {
151 		r = amdgpu_bo_reserve(dobj, true);
152 		if (likely(r == 0)) {
153 			amdgpu_bo_unpin(dobj);
154 			amdgpu_bo_unreserve(dobj);
155 		}
156 		amdgpu_bo_unref(&dobj);
157 	}
158 }
159 
160 void amdgpu_benchmark(struct amdgpu_device *adev, int test_number)
161 {
162 	int i;
163 	static const int common_modes[AMDGPU_BENCHMARK_COMMON_MODES_N] = {
164 		640 * 480 * 4,
165 		720 * 480 * 4,
166 		800 * 600 * 4,
167 		848 * 480 * 4,
168 		1024 * 768 * 4,
169 		1152 * 768 * 4,
170 		1280 * 720 * 4,
171 		1280 * 800 * 4,
172 		1280 * 854 * 4,
173 		1280 * 960 * 4,
174 		1280 * 1024 * 4,
175 		1440 * 900 * 4,
176 		1400 * 1050 * 4,
177 		1680 * 1050 * 4,
178 		1600 * 1200 * 4,
179 		1920 * 1080 * 4,
180 		1920 * 1200 * 4
181 	};
182 
183 	switch (test_number) {
184 	case 1:
185 		/* simple test, VRAM to GTT and GTT to VRAM */
186 		amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_GTT,
187 				      AMDGPU_GEM_DOMAIN_VRAM);
188 		amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_VRAM,
189 				      AMDGPU_GEM_DOMAIN_GTT);
190 		break;
191 	case 2:
192 		/* simple test, VRAM to VRAM */
193 		amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_VRAM,
194 				      AMDGPU_GEM_DOMAIN_VRAM);
195 		break;
196 	case 3:
197 		/* GTT to VRAM, buffer size sweep, powers of 2 */
198 		for (i = 1; i <= 16384; i <<= 1)
199 			amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
200 					      AMDGPU_GEM_DOMAIN_GTT,
201 					      AMDGPU_GEM_DOMAIN_VRAM);
202 		break;
203 	case 4:
204 		/* VRAM to GTT, buffer size sweep, powers of 2 */
205 		for (i = 1; i <= 16384; i <<= 1)
206 			amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
207 					      AMDGPU_GEM_DOMAIN_VRAM,
208 					      AMDGPU_GEM_DOMAIN_GTT);
209 		break;
210 	case 5:
211 		/* VRAM to VRAM, buffer size sweep, powers of 2 */
212 		for (i = 1; i <= 16384; i <<= 1)
213 			amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
214 					      AMDGPU_GEM_DOMAIN_VRAM,
215 					      AMDGPU_GEM_DOMAIN_VRAM);
216 		break;
217 	case 6:
218 		/* GTT to VRAM, buffer size sweep, common modes */
219 		for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++)
220 			amdgpu_benchmark_move(adev, common_modes[i],
221 					      AMDGPU_GEM_DOMAIN_GTT,
222 					      AMDGPU_GEM_DOMAIN_VRAM);
223 		break;
224 	case 7:
225 		/* VRAM to GTT, buffer size sweep, common modes */
226 		for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++)
227 			amdgpu_benchmark_move(adev, common_modes[i],
228 					      AMDGPU_GEM_DOMAIN_VRAM,
229 					      AMDGPU_GEM_DOMAIN_GTT);
230 		break;
231 	case 8:
232 		/* VRAM to VRAM, buffer size sweep, common modes */
233 		for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++)
234 			amdgpu_benchmark_move(adev, common_modes[i],
235 					      AMDGPU_GEM_DOMAIN_VRAM,
236 					      AMDGPU_GEM_DOMAIN_VRAM);
237 		break;
238 
239 	default:
240 		DRM_ERROR("Unknown benchmark\n");
241 	}
242 }
243