1 /*
2  * Copyright 2009 Jerome Glisse.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Jerome Glisse
23  */
24 #include <drm/drmP.h>
25 #include <drm/amdgpu_drm.h>
26 #include "amdgpu.h"
27 
28 #define AMDGPU_BENCHMARK_ITERATIONS 1024
29 #define AMDGPU_BENCHMARK_COMMON_MODES_N 17
30 
31 static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
32 				    uint64_t saddr, uint64_t daddr, int n)
33 {
34 	unsigned long start_jiffies;
35 	unsigned long end_jiffies;
36 	struct dma_fence *fence = NULL;
37 	int i, r;
38 
39 	start_jiffies = jiffies;
40 	for (i = 0; i < n; i++) {
41 		struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
42 		r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence,
43 				       false, false);
44 		if (r)
45 			goto exit_do_move;
46 		r = dma_fence_wait(fence, false);
47 		if (r)
48 			goto exit_do_move;
49 		dma_fence_put(fence);
50 	}
51 	end_jiffies = jiffies;
52 	r = jiffies_to_msecs(end_jiffies - start_jiffies);
53 
54 exit_do_move:
55 	if (fence)
56 		dma_fence_put(fence);
57 	return r;
58 }
59 
60 
61 static void amdgpu_benchmark_log_results(int n, unsigned size,
62 					 unsigned int time,
63 					 unsigned sdomain, unsigned ddomain,
64 					 char *kind)
65 {
66 	unsigned int throughput = (n * (size >> 10)) / time;
67 	DRM_INFO("amdgpu: %s %u bo moves of %u kB from"
68 		 " %d to %d in %u ms, throughput: %u Mb/s or %u MB/s\n",
69 		 kind, n, size >> 10, sdomain, ddomain, time,
70 		 throughput * 8, throughput);
71 }
72 
73 static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
74 				  unsigned sdomain, unsigned ddomain)
75 {
76 	struct amdgpu_bo *dobj = NULL;
77 	struct amdgpu_bo *sobj = NULL;
78 	uint64_t saddr, daddr;
79 	int r, n;
80 	int time;
81 
82 	n = AMDGPU_BENCHMARK_ITERATIONS;
83 	r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL,
84 			     NULL, 0, &sobj);
85 	if (r) {
86 		goto out_cleanup;
87 	}
88 	r = amdgpu_bo_reserve(sobj, false);
89 	if (unlikely(r != 0))
90 		goto out_cleanup;
91 	r = amdgpu_bo_pin(sobj, sdomain, &saddr);
92 	amdgpu_bo_unreserve(sobj);
93 	if (r) {
94 		goto out_cleanup;
95 	}
96 	r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL,
97 			     NULL, 0, &dobj);
98 	if (r) {
99 		goto out_cleanup;
100 	}
101 	r = amdgpu_bo_reserve(dobj, false);
102 	if (unlikely(r != 0))
103 		goto out_cleanup;
104 	r = amdgpu_bo_pin(dobj, ddomain, &daddr);
105 	amdgpu_bo_unreserve(dobj);
106 	if (r) {
107 		goto out_cleanup;
108 	}
109 
110 	if (adev->mman.buffer_funcs) {
111 		time = amdgpu_benchmark_do_move(adev, size, saddr, daddr, n);
112 		if (time < 0)
113 			goto out_cleanup;
114 		if (time > 0)
115 			amdgpu_benchmark_log_results(n, size, time,
116 						     sdomain, ddomain, "dma");
117 	}
118 
119 out_cleanup:
120 	/* Check error value now. The value can be overwritten when clean up.*/
121 	if (r) {
122 		DRM_ERROR("Error while benchmarking BO move.\n");
123 	}
124 
125 	if (sobj) {
126 		r = amdgpu_bo_reserve(sobj, true);
127 		if (likely(r == 0)) {
128 			amdgpu_bo_unpin(sobj);
129 			amdgpu_bo_unreserve(sobj);
130 		}
131 		amdgpu_bo_unref(&sobj);
132 	}
133 	if (dobj) {
134 		r = amdgpu_bo_reserve(dobj, true);
135 		if (likely(r == 0)) {
136 			amdgpu_bo_unpin(dobj);
137 			amdgpu_bo_unreserve(dobj);
138 		}
139 		amdgpu_bo_unref(&dobj);
140 	}
141 }
142 
143 void amdgpu_benchmark(struct amdgpu_device *adev, int test_number)
144 {
145 	int i;
146 	static const int common_modes[AMDGPU_BENCHMARK_COMMON_MODES_N] = {
147 		640 * 480 * 4,
148 		720 * 480 * 4,
149 		800 * 600 * 4,
150 		848 * 480 * 4,
151 		1024 * 768 * 4,
152 		1152 * 768 * 4,
153 		1280 * 720 * 4,
154 		1280 * 800 * 4,
155 		1280 * 854 * 4,
156 		1280 * 960 * 4,
157 		1280 * 1024 * 4,
158 		1440 * 900 * 4,
159 		1400 * 1050 * 4,
160 		1680 * 1050 * 4,
161 		1600 * 1200 * 4,
162 		1920 * 1080 * 4,
163 		1920 * 1200 * 4
164 	};
165 
166 	switch (test_number) {
167 	case 1:
168 		/* simple test, VRAM to GTT and GTT to VRAM */
169 		amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_GTT,
170 				      AMDGPU_GEM_DOMAIN_VRAM);
171 		amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_VRAM,
172 				      AMDGPU_GEM_DOMAIN_GTT);
173 		break;
174 	case 2:
175 		/* simple test, VRAM to VRAM */
176 		amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_VRAM,
177 				      AMDGPU_GEM_DOMAIN_VRAM);
178 		break;
179 	case 3:
180 		/* GTT to VRAM, buffer size sweep, powers of 2 */
181 		for (i = 1; i <= 16384; i <<= 1)
182 			amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
183 					      AMDGPU_GEM_DOMAIN_GTT,
184 					      AMDGPU_GEM_DOMAIN_VRAM);
185 		break;
186 	case 4:
187 		/* VRAM to GTT, buffer size sweep, powers of 2 */
188 		for (i = 1; i <= 16384; i <<= 1)
189 			amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
190 					      AMDGPU_GEM_DOMAIN_VRAM,
191 					      AMDGPU_GEM_DOMAIN_GTT);
192 		break;
193 	case 5:
194 		/* VRAM to VRAM, buffer size sweep, powers of 2 */
195 		for (i = 1; i <= 16384; i <<= 1)
196 			amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
197 					      AMDGPU_GEM_DOMAIN_VRAM,
198 					      AMDGPU_GEM_DOMAIN_VRAM);
199 		break;
200 	case 6:
201 		/* GTT to VRAM, buffer size sweep, common modes */
202 		for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++)
203 			amdgpu_benchmark_move(adev, common_modes[i],
204 					      AMDGPU_GEM_DOMAIN_GTT,
205 					      AMDGPU_GEM_DOMAIN_VRAM);
206 		break;
207 	case 7:
208 		/* VRAM to GTT, buffer size sweep, common modes */
209 		for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++)
210 			amdgpu_benchmark_move(adev, common_modes[i],
211 					      AMDGPU_GEM_DOMAIN_VRAM,
212 					      AMDGPU_GEM_DOMAIN_GTT);
213 		break;
214 	case 8:
215 		/* VRAM to VRAM, buffer size sweep, common modes */
216 		for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++)
217 			amdgpu_benchmark_move(adev, common_modes[i],
218 					      AMDGPU_GEM_DOMAIN_VRAM,
219 					      AMDGPU_GEM_DOMAIN_VRAM);
220 		break;
221 
222 	default:
223 		DRM_ERROR("Unknown benchmark\n");
224 	}
225 }
226