1 /*
2  * Copyright 2009 Jerome Glisse.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Jerome Glisse
23  */
24 #include <drm/drmP.h>
25 #include <drm/amdgpu_drm.h>
26 #include "amdgpu.h"
27 
28 #define AMDGPU_BENCHMARK_ITERATIONS 1024
29 #define AMDGPU_BENCHMARK_COMMON_MODES_N 17
30 
31 static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
32 				    uint64_t saddr, uint64_t daddr, int n)
33 {
34 	unsigned long start_jiffies;
35 	unsigned long end_jiffies;
36 	struct fence *fence = NULL;
37 	int i, r;
38 
39 	start_jiffies = jiffies;
40 	for (i = 0; i < n; i++) {
41 		struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
42 		r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence);
43 		if (r)
44 			goto exit_do_move;
45 		r = fence_wait(fence, false);
46 		if (r)
47 			goto exit_do_move;
48 		fence_put(fence);
49 	}
50 	end_jiffies = jiffies;
51 	r = jiffies_to_msecs(end_jiffies - start_jiffies);
52 
53 exit_do_move:
54 	if (fence)
55 		fence_put(fence);
56 	return r;
57 }
58 
59 
60 static void amdgpu_benchmark_log_results(int n, unsigned size,
61 					 unsigned int time,
62 					 unsigned sdomain, unsigned ddomain,
63 					 char *kind)
64 {
65 	unsigned int throughput = (n * (size >> 10)) / time;
66 	DRM_INFO("amdgpu: %s %u bo moves of %u kB from"
67 		 " %d to %d in %u ms, throughput: %u Mb/s or %u MB/s\n",
68 		 kind, n, size >> 10, sdomain, ddomain, time,
69 		 throughput * 8, throughput);
70 }
71 
72 static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
73 				  unsigned sdomain, unsigned ddomain)
74 {
75 	struct amdgpu_bo *dobj = NULL;
76 	struct amdgpu_bo *sobj = NULL;
77 	uint64_t saddr, daddr;
78 	int r, n;
79 	int time;
80 
81 	n = AMDGPU_BENCHMARK_ITERATIONS;
82 	r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL, &sobj);
83 	if (r) {
84 		goto out_cleanup;
85 	}
86 	r = amdgpu_bo_reserve(sobj, false);
87 	if (unlikely(r != 0))
88 		goto out_cleanup;
89 	r = amdgpu_bo_pin(sobj, sdomain, &saddr);
90 	amdgpu_bo_unreserve(sobj);
91 	if (r) {
92 		goto out_cleanup;
93 	}
94 	r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL, &dobj);
95 	if (r) {
96 		goto out_cleanup;
97 	}
98 	r = amdgpu_bo_reserve(dobj, false);
99 	if (unlikely(r != 0))
100 		goto out_cleanup;
101 	r = amdgpu_bo_pin(dobj, ddomain, &daddr);
102 	amdgpu_bo_unreserve(dobj);
103 	if (r) {
104 		goto out_cleanup;
105 	}
106 
107 	if (adev->mman.buffer_funcs) {
108 		time = amdgpu_benchmark_do_move(adev, size, saddr, daddr, n);
109 		if (time < 0)
110 			goto out_cleanup;
111 		if (time > 0)
112 			amdgpu_benchmark_log_results(n, size, time,
113 						     sdomain, ddomain, "dma");
114 	}
115 
116 out_cleanup:
117 	if (sobj) {
118 		r = amdgpu_bo_reserve(sobj, false);
119 		if (likely(r == 0)) {
120 			amdgpu_bo_unpin(sobj);
121 			amdgpu_bo_unreserve(sobj);
122 		}
123 		amdgpu_bo_unref(&sobj);
124 	}
125 	if (dobj) {
126 		r = amdgpu_bo_reserve(dobj, false);
127 		if (likely(r == 0)) {
128 			amdgpu_bo_unpin(dobj);
129 			amdgpu_bo_unreserve(dobj);
130 		}
131 		amdgpu_bo_unref(&dobj);
132 	}
133 
134 	if (r) {
135 		DRM_ERROR("Error while benchmarking BO move.\n");
136 	}
137 }
138 
139 void amdgpu_benchmark(struct amdgpu_device *adev, int test_number)
140 {
141 	int i;
142 	int common_modes[AMDGPU_BENCHMARK_COMMON_MODES_N] = {
143 		640 * 480 * 4,
144 		720 * 480 * 4,
145 		800 * 600 * 4,
146 		848 * 480 * 4,
147 		1024 * 768 * 4,
148 		1152 * 768 * 4,
149 		1280 * 720 * 4,
150 		1280 * 800 * 4,
151 		1280 * 854 * 4,
152 		1280 * 960 * 4,
153 		1280 * 1024 * 4,
154 		1440 * 900 * 4,
155 		1400 * 1050 * 4,
156 		1680 * 1050 * 4,
157 		1600 * 1200 * 4,
158 		1920 * 1080 * 4,
159 		1920 * 1200 * 4
160 	};
161 
162 	switch (test_number) {
163 	case 1:
164 		/* simple test, VRAM to GTT and GTT to VRAM */
165 		amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_GTT,
166 				      AMDGPU_GEM_DOMAIN_VRAM);
167 		amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_VRAM,
168 				      AMDGPU_GEM_DOMAIN_GTT);
169 		break;
170 	case 2:
171 		/* simple test, VRAM to VRAM */
172 		amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_VRAM,
173 				      AMDGPU_GEM_DOMAIN_VRAM);
174 		break;
175 	case 3:
176 		/* GTT to VRAM, buffer size sweep, powers of 2 */
177 		for (i = 1; i <= 16384; i <<= 1)
178 			amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
179 					      AMDGPU_GEM_DOMAIN_GTT,
180 					      AMDGPU_GEM_DOMAIN_VRAM);
181 		break;
182 	case 4:
183 		/* VRAM to GTT, buffer size sweep, powers of 2 */
184 		for (i = 1; i <= 16384; i <<= 1)
185 			amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
186 					      AMDGPU_GEM_DOMAIN_VRAM,
187 					      AMDGPU_GEM_DOMAIN_GTT);
188 		break;
189 	case 5:
190 		/* VRAM to VRAM, buffer size sweep, powers of 2 */
191 		for (i = 1; i <= 16384; i <<= 1)
192 			amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
193 					      AMDGPU_GEM_DOMAIN_VRAM,
194 					      AMDGPU_GEM_DOMAIN_VRAM);
195 		break;
196 	case 6:
197 		/* GTT to VRAM, buffer size sweep, common modes */
198 		for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++)
199 			amdgpu_benchmark_move(adev, common_modes[i],
200 					      AMDGPU_GEM_DOMAIN_GTT,
201 					      AMDGPU_GEM_DOMAIN_VRAM);
202 		break;
203 	case 7:
204 		/* VRAM to GTT, buffer size sweep, common modes */
205 		for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++)
206 			amdgpu_benchmark_move(adev, common_modes[i],
207 					      AMDGPU_GEM_DOMAIN_VRAM,
208 					      AMDGPU_GEM_DOMAIN_GTT);
209 		break;
210 	case 8:
211 		/* VRAM to VRAM, buffer size sweep, common modes */
212 		for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++)
213 			amdgpu_benchmark_move(adev, common_modes[i],
214 					      AMDGPU_GEM_DOMAIN_VRAM,
215 					      AMDGPU_GEM_DOMAIN_VRAM);
216 		break;
217 
218 	default:
219 		DRM_ERROR("Unknown benchmark\n");
220 	}
221 }
222