1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include <linux/bsearch.h>
24 #include <linux/pci.h>
25 #include <linux/slab.h>
26 #include "kfd_priv.h"
27 #include "kfd_device_queue_manager.h"
28 #include "kfd_pm4_headers_vi.h"
29 #include "cwsr_trap_handler.h"
30 #include "kfd_iommu.h"
31 #include "amdgpu_amdkfd.h"
32 
33 #define MQD_SIZE_ALIGNED 768
34 
35 /*
36  * kfd_locked is used to lock the kfd driver during suspend or reset
37  * once locked, kfd driver will stop any further GPU execution.
38  * create process (open) will return -EAGAIN.
39  */
40 static atomic_t kfd_locked = ATOMIC_INIT(0);
41 
42 #ifdef CONFIG_DRM_AMDGPU_CIK
43 extern const struct kfd2kgd_calls gfx_v7_kfd2kgd;
44 #endif
45 extern const struct kfd2kgd_calls gfx_v8_kfd2kgd;
46 extern const struct kfd2kgd_calls gfx_v9_kfd2kgd;
47 extern const struct kfd2kgd_calls arcturus_kfd2kgd;
48 extern const struct kfd2kgd_calls gfx_v10_kfd2kgd;
49 
50 static const struct kfd2kgd_calls *kfd2kgd_funcs[] = {
51 #ifdef KFD_SUPPORT_IOMMU_V2
52 #ifdef CONFIG_DRM_AMDGPU_CIK
53 	[CHIP_KAVERI] = &gfx_v7_kfd2kgd,
54 #endif
55 	[CHIP_CARRIZO] = &gfx_v8_kfd2kgd,
56 	[CHIP_RAVEN] = &gfx_v9_kfd2kgd,
57 #endif
58 #ifdef CONFIG_DRM_AMDGPU_CIK
59 	[CHIP_HAWAII] = &gfx_v7_kfd2kgd,
60 #endif
61 	[CHIP_TONGA] = &gfx_v8_kfd2kgd,
62 	[CHIP_FIJI] = &gfx_v8_kfd2kgd,
63 	[CHIP_POLARIS10] = &gfx_v8_kfd2kgd,
64 	[CHIP_POLARIS11] = &gfx_v8_kfd2kgd,
65 	[CHIP_POLARIS12] = &gfx_v8_kfd2kgd,
66 	[CHIP_VEGAM] = &gfx_v8_kfd2kgd,
67 	[CHIP_VEGA10] = &gfx_v9_kfd2kgd,
68 	[CHIP_VEGA12] = &gfx_v9_kfd2kgd,
69 	[CHIP_VEGA20] = &gfx_v9_kfd2kgd,
70 	[CHIP_RENOIR] = &gfx_v9_kfd2kgd,
71 	[CHIP_ARCTURUS] = &arcturus_kfd2kgd,
72 	[CHIP_NAVI10] = &gfx_v10_kfd2kgd,
73 	[CHIP_NAVI12] = &gfx_v10_kfd2kgd,
74 	[CHIP_NAVI14] = &gfx_v10_kfd2kgd,
75 };
76 
77 #ifdef KFD_SUPPORT_IOMMU_V2
78 static const struct kfd_device_info kaveri_device_info = {
79 	.asic_family = CHIP_KAVERI,
80 	.asic_name = "kaveri",
81 	.max_pasid_bits = 16,
82 	/* max num of queues for KV.TODO should be a dynamic value */
83 	.max_no_of_hqd	= 24,
84 	.doorbell_size  = 4,
85 	.ih_ring_entry_size = 4 * sizeof(uint32_t),
86 	.event_interrupt_class = &event_interrupt_class_cik,
87 	.num_of_watch_points = 4,
88 	.mqd_size_aligned = MQD_SIZE_ALIGNED,
89 	.supports_cwsr = false,
90 	.needs_iommu_device = true,
91 	.needs_pci_atomics = false,
92 	.num_sdma_engines = 2,
93 	.num_xgmi_sdma_engines = 0,
94 	.num_sdma_queues_per_engine = 2,
95 };
96 
97 static const struct kfd_device_info carrizo_device_info = {
98 	.asic_family = CHIP_CARRIZO,
99 	.asic_name = "carrizo",
100 	.max_pasid_bits = 16,
101 	/* max num of queues for CZ.TODO should be a dynamic value */
102 	.max_no_of_hqd	= 24,
103 	.doorbell_size  = 4,
104 	.ih_ring_entry_size = 4 * sizeof(uint32_t),
105 	.event_interrupt_class = &event_interrupt_class_cik,
106 	.num_of_watch_points = 4,
107 	.mqd_size_aligned = MQD_SIZE_ALIGNED,
108 	.supports_cwsr = true,
109 	.needs_iommu_device = true,
110 	.needs_pci_atomics = false,
111 	.num_sdma_engines = 2,
112 	.num_xgmi_sdma_engines = 0,
113 	.num_sdma_queues_per_engine = 2,
114 };
115 
116 static const struct kfd_device_info raven_device_info = {
117 	.asic_family = CHIP_RAVEN,
118 	.asic_name = "raven",
119 	.max_pasid_bits = 16,
120 	.max_no_of_hqd  = 24,
121 	.doorbell_size  = 8,
122 	.ih_ring_entry_size = 8 * sizeof(uint32_t),
123 	.event_interrupt_class = &event_interrupt_class_v9,
124 	.num_of_watch_points = 4,
125 	.mqd_size_aligned = MQD_SIZE_ALIGNED,
126 	.supports_cwsr = true,
127 	.needs_iommu_device = true,
128 	.needs_pci_atomics = true,
129 	.num_sdma_engines = 1,
130 	.num_xgmi_sdma_engines = 0,
131 	.num_sdma_queues_per_engine = 2,
132 };
133 #endif
134 
135 static const struct kfd_device_info hawaii_device_info = {
136 	.asic_family = CHIP_HAWAII,
137 	.asic_name = "hawaii",
138 	.max_pasid_bits = 16,
139 	/* max num of queues for KV.TODO should be a dynamic value */
140 	.max_no_of_hqd	= 24,
141 	.doorbell_size  = 4,
142 	.ih_ring_entry_size = 4 * sizeof(uint32_t),
143 	.event_interrupt_class = &event_interrupt_class_cik,
144 	.num_of_watch_points = 4,
145 	.mqd_size_aligned = MQD_SIZE_ALIGNED,
146 	.supports_cwsr = false,
147 	.needs_iommu_device = false,
148 	.needs_pci_atomics = false,
149 	.num_sdma_engines = 2,
150 	.num_xgmi_sdma_engines = 0,
151 	.num_sdma_queues_per_engine = 2,
152 };
153 
154 static const struct kfd_device_info tonga_device_info = {
155 	.asic_family = CHIP_TONGA,
156 	.asic_name = "tonga",
157 	.max_pasid_bits = 16,
158 	.max_no_of_hqd  = 24,
159 	.doorbell_size  = 4,
160 	.ih_ring_entry_size = 4 * sizeof(uint32_t),
161 	.event_interrupt_class = &event_interrupt_class_cik,
162 	.num_of_watch_points = 4,
163 	.mqd_size_aligned = MQD_SIZE_ALIGNED,
164 	.supports_cwsr = false,
165 	.needs_iommu_device = false,
166 	.needs_pci_atomics = true,
167 	.num_sdma_engines = 2,
168 	.num_xgmi_sdma_engines = 0,
169 	.num_sdma_queues_per_engine = 2,
170 };
171 
172 static const struct kfd_device_info fiji_device_info = {
173 	.asic_family = CHIP_FIJI,
174 	.asic_name = "fiji",
175 	.max_pasid_bits = 16,
176 	.max_no_of_hqd  = 24,
177 	.doorbell_size  = 4,
178 	.ih_ring_entry_size = 4 * sizeof(uint32_t),
179 	.event_interrupt_class = &event_interrupt_class_cik,
180 	.num_of_watch_points = 4,
181 	.mqd_size_aligned = MQD_SIZE_ALIGNED,
182 	.supports_cwsr = true,
183 	.needs_iommu_device = false,
184 	.needs_pci_atomics = true,
185 	.num_sdma_engines = 2,
186 	.num_xgmi_sdma_engines = 0,
187 	.num_sdma_queues_per_engine = 2,
188 };
189 
190 static const struct kfd_device_info fiji_vf_device_info = {
191 	.asic_family = CHIP_FIJI,
192 	.asic_name = "fiji",
193 	.max_pasid_bits = 16,
194 	.max_no_of_hqd  = 24,
195 	.doorbell_size  = 4,
196 	.ih_ring_entry_size = 4 * sizeof(uint32_t),
197 	.event_interrupt_class = &event_interrupt_class_cik,
198 	.num_of_watch_points = 4,
199 	.mqd_size_aligned = MQD_SIZE_ALIGNED,
200 	.supports_cwsr = true,
201 	.needs_iommu_device = false,
202 	.needs_pci_atomics = false,
203 	.num_sdma_engines = 2,
204 	.num_xgmi_sdma_engines = 0,
205 	.num_sdma_queues_per_engine = 2,
206 };
207 
208 
209 static const struct kfd_device_info polaris10_device_info = {
210 	.asic_family = CHIP_POLARIS10,
211 	.asic_name = "polaris10",
212 	.max_pasid_bits = 16,
213 	.max_no_of_hqd  = 24,
214 	.doorbell_size  = 4,
215 	.ih_ring_entry_size = 4 * sizeof(uint32_t),
216 	.event_interrupt_class = &event_interrupt_class_cik,
217 	.num_of_watch_points = 4,
218 	.mqd_size_aligned = MQD_SIZE_ALIGNED,
219 	.supports_cwsr = true,
220 	.needs_iommu_device = false,
221 	.needs_pci_atomics = true,
222 	.num_sdma_engines = 2,
223 	.num_xgmi_sdma_engines = 0,
224 	.num_sdma_queues_per_engine = 2,
225 };
226 
227 static const struct kfd_device_info polaris10_vf_device_info = {
228 	.asic_family = CHIP_POLARIS10,
229 	.asic_name = "polaris10",
230 	.max_pasid_bits = 16,
231 	.max_no_of_hqd  = 24,
232 	.doorbell_size  = 4,
233 	.ih_ring_entry_size = 4 * sizeof(uint32_t),
234 	.event_interrupt_class = &event_interrupt_class_cik,
235 	.num_of_watch_points = 4,
236 	.mqd_size_aligned = MQD_SIZE_ALIGNED,
237 	.supports_cwsr = true,
238 	.needs_iommu_device = false,
239 	.needs_pci_atomics = false,
240 	.num_sdma_engines = 2,
241 	.num_xgmi_sdma_engines = 0,
242 	.num_sdma_queues_per_engine = 2,
243 };
244 
245 static const struct kfd_device_info polaris11_device_info = {
246 	.asic_family = CHIP_POLARIS11,
247 	.asic_name = "polaris11",
248 	.max_pasid_bits = 16,
249 	.max_no_of_hqd  = 24,
250 	.doorbell_size  = 4,
251 	.ih_ring_entry_size = 4 * sizeof(uint32_t),
252 	.event_interrupt_class = &event_interrupt_class_cik,
253 	.num_of_watch_points = 4,
254 	.mqd_size_aligned = MQD_SIZE_ALIGNED,
255 	.supports_cwsr = true,
256 	.needs_iommu_device = false,
257 	.needs_pci_atomics = true,
258 	.num_sdma_engines = 2,
259 	.num_xgmi_sdma_engines = 0,
260 	.num_sdma_queues_per_engine = 2,
261 };
262 
263 static const struct kfd_device_info polaris12_device_info = {
264 	.asic_family = CHIP_POLARIS12,
265 	.asic_name = "polaris12",
266 	.max_pasid_bits = 16,
267 	.max_no_of_hqd  = 24,
268 	.doorbell_size  = 4,
269 	.ih_ring_entry_size = 4 * sizeof(uint32_t),
270 	.event_interrupt_class = &event_interrupt_class_cik,
271 	.num_of_watch_points = 4,
272 	.mqd_size_aligned = MQD_SIZE_ALIGNED,
273 	.supports_cwsr = true,
274 	.needs_iommu_device = false,
275 	.needs_pci_atomics = true,
276 	.num_sdma_engines = 2,
277 	.num_xgmi_sdma_engines = 0,
278 	.num_sdma_queues_per_engine = 2,
279 };
280 
281 static const struct kfd_device_info vegam_device_info = {
282 	.asic_family = CHIP_VEGAM,
283 	.asic_name = "vegam",
284 	.max_pasid_bits = 16,
285 	.max_no_of_hqd  = 24,
286 	.doorbell_size  = 4,
287 	.ih_ring_entry_size = 4 * sizeof(uint32_t),
288 	.event_interrupt_class = &event_interrupt_class_cik,
289 	.num_of_watch_points = 4,
290 	.mqd_size_aligned = MQD_SIZE_ALIGNED,
291 	.supports_cwsr = true,
292 	.needs_iommu_device = false,
293 	.needs_pci_atomics = true,
294 	.num_sdma_engines = 2,
295 	.num_xgmi_sdma_engines = 0,
296 	.num_sdma_queues_per_engine = 2,
297 };
298 
299 static const struct kfd_device_info vega10_device_info = {
300 	.asic_family = CHIP_VEGA10,
301 	.asic_name = "vega10",
302 	.max_pasid_bits = 16,
303 	.max_no_of_hqd  = 24,
304 	.doorbell_size  = 8,
305 	.ih_ring_entry_size = 8 * sizeof(uint32_t),
306 	.event_interrupt_class = &event_interrupt_class_v9,
307 	.num_of_watch_points = 4,
308 	.mqd_size_aligned = MQD_SIZE_ALIGNED,
309 	.supports_cwsr = true,
310 	.needs_iommu_device = false,
311 	.needs_pci_atomics = false,
312 	.num_sdma_engines = 2,
313 	.num_xgmi_sdma_engines = 0,
314 	.num_sdma_queues_per_engine = 2,
315 };
316 
317 static const struct kfd_device_info vega10_vf_device_info = {
318 	.asic_family = CHIP_VEGA10,
319 	.asic_name = "vega10",
320 	.max_pasid_bits = 16,
321 	.max_no_of_hqd  = 24,
322 	.doorbell_size  = 8,
323 	.ih_ring_entry_size = 8 * sizeof(uint32_t),
324 	.event_interrupt_class = &event_interrupt_class_v9,
325 	.num_of_watch_points = 4,
326 	.mqd_size_aligned = MQD_SIZE_ALIGNED,
327 	.supports_cwsr = true,
328 	.needs_iommu_device = false,
329 	.needs_pci_atomics = false,
330 	.num_sdma_engines = 2,
331 	.num_xgmi_sdma_engines = 0,
332 	.num_sdma_queues_per_engine = 2,
333 };
334 
335 static const struct kfd_device_info vega12_device_info = {
336 	.asic_family = CHIP_VEGA12,
337 	.asic_name = "vega12",
338 	.max_pasid_bits = 16,
339 	.max_no_of_hqd  = 24,
340 	.doorbell_size  = 8,
341 	.ih_ring_entry_size = 8 * sizeof(uint32_t),
342 	.event_interrupt_class = &event_interrupt_class_v9,
343 	.num_of_watch_points = 4,
344 	.mqd_size_aligned = MQD_SIZE_ALIGNED,
345 	.supports_cwsr = true,
346 	.needs_iommu_device = false,
347 	.needs_pci_atomics = false,
348 	.num_sdma_engines = 2,
349 	.num_xgmi_sdma_engines = 0,
350 	.num_sdma_queues_per_engine = 2,
351 };
352 
353 static const struct kfd_device_info vega20_device_info = {
354 	.asic_family = CHIP_VEGA20,
355 	.asic_name = "vega20",
356 	.max_pasid_bits = 16,
357 	.max_no_of_hqd	= 24,
358 	.doorbell_size	= 8,
359 	.ih_ring_entry_size = 8 * sizeof(uint32_t),
360 	.event_interrupt_class = &event_interrupt_class_v9,
361 	.num_of_watch_points = 4,
362 	.mqd_size_aligned = MQD_SIZE_ALIGNED,
363 	.supports_cwsr = true,
364 	.needs_iommu_device = false,
365 	.needs_pci_atomics = false,
366 	.num_sdma_engines = 2,
367 	.num_xgmi_sdma_engines = 0,
368 	.num_sdma_queues_per_engine = 8,
369 };
370 
371 static const struct kfd_device_info arcturus_device_info = {
372 	.asic_family = CHIP_ARCTURUS,
373 	.asic_name = "arcturus",
374 	.max_pasid_bits = 16,
375 	.max_no_of_hqd	= 24,
376 	.doorbell_size	= 8,
377 	.ih_ring_entry_size = 8 * sizeof(uint32_t),
378 	.event_interrupt_class = &event_interrupt_class_v9,
379 	.num_of_watch_points = 4,
380 	.mqd_size_aligned = MQD_SIZE_ALIGNED,
381 	.supports_cwsr = true,
382 	.needs_iommu_device = false,
383 	.needs_pci_atomics = false,
384 	.num_sdma_engines = 2,
385 	.num_xgmi_sdma_engines = 6,
386 	.num_sdma_queues_per_engine = 8,
387 };
388 
389 static const struct kfd_device_info renoir_device_info = {
390 	.asic_family = CHIP_RENOIR,
391 	.asic_name = "renoir",
392 	.max_pasid_bits = 16,
393 	.max_no_of_hqd  = 24,
394 	.doorbell_size  = 8,
395 	.ih_ring_entry_size = 8 * sizeof(uint32_t),
396 	.event_interrupt_class = &event_interrupt_class_v9,
397 	.num_of_watch_points = 4,
398 	.mqd_size_aligned = MQD_SIZE_ALIGNED,
399 	.supports_cwsr = true,
400 	.needs_iommu_device = false,
401 	.needs_pci_atomics = false,
402 	.num_sdma_engines = 1,
403 	.num_xgmi_sdma_engines = 0,
404 	.num_sdma_queues_per_engine = 2,
405 };
406 
407 static const struct kfd_device_info navi10_device_info = {
408 	.asic_family = CHIP_NAVI10,
409 	.asic_name = "navi10",
410 	.max_pasid_bits = 16,
411 	.max_no_of_hqd  = 24,
412 	.doorbell_size  = 8,
413 	.ih_ring_entry_size = 8 * sizeof(uint32_t),
414 	.event_interrupt_class = &event_interrupt_class_v9,
415 	.num_of_watch_points = 4,
416 	.mqd_size_aligned = MQD_SIZE_ALIGNED,
417 	.needs_iommu_device = false,
418 	.supports_cwsr = true,
419 	.needs_pci_atomics = false,
420 	.num_sdma_engines = 2,
421 	.num_xgmi_sdma_engines = 0,
422 	.num_sdma_queues_per_engine = 8,
423 };
424 
425 static const struct kfd_device_info navi12_device_info = {
426 	.asic_family = CHIP_NAVI12,
427 	.asic_name = "navi12",
428 	.max_pasid_bits = 16,
429 	.max_no_of_hqd  = 24,
430 	.doorbell_size  = 8,
431 	.ih_ring_entry_size = 8 * sizeof(uint32_t),
432 	.event_interrupt_class = &event_interrupt_class_v9,
433 	.num_of_watch_points = 4,
434 	.mqd_size_aligned = MQD_SIZE_ALIGNED,
435 	.needs_iommu_device = false,
436 	.supports_cwsr = true,
437 	.needs_pci_atomics = false,
438 	.num_sdma_engines = 2,
439 	.num_xgmi_sdma_engines = 0,
440 	.num_sdma_queues_per_engine = 8,
441 };
442 
443 static const struct kfd_device_info navi14_device_info = {
444 	.asic_family = CHIP_NAVI14,
445 	.asic_name = "navi14",
446 	.max_pasid_bits = 16,
447 	.max_no_of_hqd  = 24,
448 	.doorbell_size  = 8,
449 	.ih_ring_entry_size = 8 * sizeof(uint32_t),
450 	.event_interrupt_class = &event_interrupt_class_v9,
451 	.num_of_watch_points = 4,
452 	.mqd_size_aligned = MQD_SIZE_ALIGNED,
453 	.needs_iommu_device = false,
454 	.supports_cwsr = true,
455 	.needs_pci_atomics = false,
456 	.num_sdma_engines = 2,
457 	.num_xgmi_sdma_engines = 0,
458 	.num_sdma_queues_per_engine = 8,
459 };
460 
461 /* For each entry, [0] is regular and [1] is virtualisation device. */
462 static const struct kfd_device_info *kfd_supported_devices[][2] = {
463 #ifdef KFD_SUPPORT_IOMMU_V2
464 	[CHIP_KAVERI] = {&kaveri_device_info, NULL},
465 	[CHIP_CARRIZO] = {&carrizo_device_info, NULL},
466 	[CHIP_RAVEN] = {&raven_device_info, NULL},
467 #endif
468 	[CHIP_HAWAII] = {&hawaii_device_info, NULL},
469 	[CHIP_TONGA] = {&tonga_device_info, NULL},
470 	[CHIP_FIJI] = {&fiji_device_info, &fiji_vf_device_info},
471 	[CHIP_POLARIS10] = {&polaris10_device_info, &polaris10_vf_device_info},
472 	[CHIP_POLARIS11] = {&polaris11_device_info, NULL},
473 	[CHIP_POLARIS12] = {&polaris12_device_info, NULL},
474 	[CHIP_VEGAM] = {&vegam_device_info, NULL},
475 	[CHIP_VEGA10] = {&vega10_device_info, &vega10_vf_device_info},
476 	[CHIP_VEGA12] = {&vega12_device_info, NULL},
477 	[CHIP_VEGA20] = {&vega20_device_info, NULL},
478 	[CHIP_RENOIR] = {&renoir_device_info, NULL},
479 	[CHIP_ARCTURUS] = {&arcturus_device_info, &arcturus_device_info},
480 	[CHIP_NAVI10] = {&navi10_device_info, NULL},
481 	[CHIP_NAVI12] = {&navi12_device_info, &navi12_device_info},
482 	[CHIP_NAVI14] = {&navi14_device_info, NULL},
483 };
484 
485 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
486 				unsigned int chunk_size);
487 static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
488 
489 static int kfd_resume(struct kfd_dev *kfd);
490 
491 struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
492 	struct pci_dev *pdev, unsigned int asic_type, bool vf)
493 {
494 	struct kfd_dev *kfd;
495 	const struct kfd_device_info *device_info;
496 	const struct kfd2kgd_calls *f2g;
497 
498 	if (asic_type >= sizeof(kfd_supported_devices) / (sizeof(void *) * 2)
499 		|| asic_type >= sizeof(kfd2kgd_funcs) / sizeof(void *)) {
500 		dev_err(kfd_device, "asic_type %d out of range\n", asic_type);
501 		return NULL; /* asic_type out of range */
502 	}
503 
504 	device_info = kfd_supported_devices[asic_type][vf];
505 	f2g = kfd2kgd_funcs[asic_type];
506 
507 	if (!device_info || !f2g) {
508 		dev_err(kfd_device, "%s %s not supported in kfd\n",
509 			amdgpu_asic_name[asic_type], vf ? "VF" : "");
510 		return NULL;
511 	}
512 
513 	kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
514 	if (!kfd)
515 		return NULL;
516 
517 	/* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
518 	 * 32 and 64-bit requests are possible and must be
519 	 * supported.
520 	 */
521 	kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kgd);
522 	if (device_info->needs_pci_atomics &&
523 	    !kfd->pci_atomic_requested) {
524 		dev_info(kfd_device,
525 			 "skipped device %x:%x, PCI rejects atomics\n",
526 			 pdev->vendor, pdev->device);
527 		kfree(kfd);
528 		return NULL;
529 	}
530 
531 	kfd->kgd = kgd;
532 	kfd->device_info = device_info;
533 	kfd->pdev = pdev;
534 	kfd->init_complete = false;
535 	kfd->kfd2kgd = f2g;
536 	atomic_set(&kfd->compute_profile, 0);
537 
538 	mutex_init(&kfd->doorbell_mutex);
539 	memset(&kfd->doorbell_available_index, 0,
540 		sizeof(kfd->doorbell_available_index));
541 
542 	atomic_set(&kfd->sram_ecc_flag, 0);
543 
544 	return kfd;
545 }
546 
547 static void kfd_cwsr_init(struct kfd_dev *kfd)
548 {
549 	if (cwsr_enable && kfd->device_info->supports_cwsr) {
550 		if (kfd->device_info->asic_family < CHIP_VEGA10) {
551 			BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE);
552 			kfd->cwsr_isa = cwsr_trap_gfx8_hex;
553 			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex);
554 		} else if (kfd->device_info->asic_family == CHIP_ARCTURUS) {
555 			BUILD_BUG_ON(sizeof(cwsr_trap_arcturus_hex) > PAGE_SIZE);
556 			kfd->cwsr_isa = cwsr_trap_arcturus_hex;
557 			kfd->cwsr_isa_size = sizeof(cwsr_trap_arcturus_hex);
558 		} else if (kfd->device_info->asic_family < CHIP_NAVI10) {
559 			BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex) > PAGE_SIZE);
560 			kfd->cwsr_isa = cwsr_trap_gfx9_hex;
561 			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_hex);
562 		} else {
563 			BUILD_BUG_ON(sizeof(cwsr_trap_gfx10_hex) > PAGE_SIZE);
564 			kfd->cwsr_isa = cwsr_trap_gfx10_hex;
565 			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx10_hex);
566 		}
567 
568 		kfd->cwsr_enabled = true;
569 	}
570 }
571 
572 static int kfd_gws_init(struct kfd_dev *kfd)
573 {
574 	int ret = 0;
575 
576 	if (kfd->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS)
577 		return 0;
578 
579 	if (hws_gws_support
580 		|| (kfd->device_info->asic_family >= CHIP_VEGA10
581 			&& kfd->device_info->asic_family <= CHIP_RAVEN
582 			&& kfd->mec2_fw_version >= 0x1b3))
583 		ret = amdgpu_amdkfd_alloc_gws(kfd->kgd,
584 				amdgpu_amdkfd_get_num_gws(kfd->kgd), &kfd->gws);
585 
586 	return ret;
587 }
588 
589 bool kgd2kfd_device_init(struct kfd_dev *kfd,
590 			 struct drm_device *ddev,
591 			 const struct kgd2kfd_shared_resources *gpu_resources)
592 {
593 	unsigned int size;
594 
595 	kfd->ddev = ddev;
596 	kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
597 			KGD_ENGINE_MEC1);
598 	kfd->mec2_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
599 			KGD_ENGINE_MEC2);
600 	kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
601 			KGD_ENGINE_SDMA1);
602 	kfd->shared_resources = *gpu_resources;
603 
604 	kfd->vm_info.first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1;
605 	kfd->vm_info.last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1;
606 	kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd
607 			- kfd->vm_info.first_vmid_kfd + 1;
608 
609 	/* Verify module parameters regarding mapped process number*/
610 	if ((hws_max_conc_proc < 0)
611 			|| (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) {
612 		dev_err(kfd_device,
613 			"hws_max_conc_proc %d must be between 0 and %d, use %d instead\n",
614 			hws_max_conc_proc, kfd->vm_info.vmid_num_kfd,
615 			kfd->vm_info.vmid_num_kfd);
616 		kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd;
617 	} else
618 		kfd->max_proc_per_quantum = hws_max_conc_proc;
619 
620 	/* calculate max size of mqds needed for queues */
621 	size = max_num_of_queues_per_device *
622 			kfd->device_info->mqd_size_aligned;
623 
624 	/*
625 	 * calculate max size of runlist packet.
626 	 * There can be only 2 packets at once
627 	 */
628 	size += (KFD_MAX_NUM_OF_PROCESSES * sizeof(struct pm4_mes_map_process) +
629 		max_num_of_queues_per_device * sizeof(struct pm4_mes_map_queues)
630 		+ sizeof(struct pm4_mes_runlist)) * 2;
631 
632 	/* Add size of HIQ & DIQ */
633 	size += KFD_KERNEL_QUEUE_SIZE * 2;
634 
635 	/* add another 512KB for all other allocations on gart (HPD, fences) */
636 	size += 512 * 1024;
637 
638 	if (amdgpu_amdkfd_alloc_gtt_mem(
639 			kfd->kgd, size, &kfd->gtt_mem,
640 			&kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr,
641 			false)) {
642 		dev_err(kfd_device, "Could not allocate %d bytes\n", size);
643 		goto alloc_gtt_mem_failure;
644 	}
645 
646 	dev_info(kfd_device, "Allocated %d bytes on gart\n", size);
647 
648 	/* Initialize GTT sa with 512 byte chunk size */
649 	if (kfd_gtt_sa_init(kfd, size, 512) != 0) {
650 		dev_err(kfd_device, "Error initializing gtt sub-allocator\n");
651 		goto kfd_gtt_sa_init_error;
652 	}
653 
654 	if (kfd_doorbell_init(kfd)) {
655 		dev_err(kfd_device,
656 			"Error initializing doorbell aperture\n");
657 		goto kfd_doorbell_error;
658 	}
659 
660 	if (kfd->kfd2kgd->get_hive_id)
661 		kfd->hive_id = kfd->kfd2kgd->get_hive_id(kfd->kgd);
662 
663 	if (kfd->kfd2kgd->get_unique_id)
664 		kfd->unique_id = kfd->kfd2kgd->get_unique_id(kfd->kgd);
665 
666 	if (kfd_interrupt_init(kfd)) {
667 		dev_err(kfd_device, "Error initializing interrupts\n");
668 		goto kfd_interrupt_error;
669 	}
670 
671 	kfd->dqm = device_queue_manager_init(kfd);
672 	if (!kfd->dqm) {
673 		dev_err(kfd_device, "Error initializing queue manager\n");
674 		goto device_queue_manager_error;
675 	}
676 
677 	/* If supported on this device, allocate global GWS that is shared
678 	 * by all KFD processes
679 	 */
680 	if (kfd_gws_init(kfd)) {
681 		dev_err(kfd_device, "Could not allocate %d gws\n",
682 			amdgpu_amdkfd_get_num_gws(kfd->kgd));
683 		goto gws_error;
684 	}
685 
686 	if (kfd_iommu_device_init(kfd)) {
687 		dev_err(kfd_device, "Error initializing iommuv2\n");
688 		goto device_iommu_error;
689 	}
690 
691 	kfd_cwsr_init(kfd);
692 
693 	if (kfd_resume(kfd))
694 		goto kfd_resume_error;
695 
696 	kfd->dbgmgr = NULL;
697 
698 	if (kfd_topology_add_device(kfd)) {
699 		dev_err(kfd_device, "Error adding device to topology\n");
700 		goto kfd_topology_add_device_error;
701 	}
702 
703 	kfd->init_complete = true;
704 	dev_info(kfd_device, "added device %x:%x\n", kfd->pdev->vendor,
705 		 kfd->pdev->device);
706 
707 	pr_debug("Starting kfd with the following scheduling policy %d\n",
708 		kfd->dqm->sched_policy);
709 
710 	goto out;
711 
712 kfd_topology_add_device_error:
713 kfd_resume_error:
714 device_iommu_error:
715 gws_error:
716 	device_queue_manager_uninit(kfd->dqm);
717 device_queue_manager_error:
718 	kfd_interrupt_exit(kfd);
719 kfd_interrupt_error:
720 	kfd_doorbell_fini(kfd);
721 kfd_doorbell_error:
722 	kfd_gtt_sa_fini(kfd);
723 kfd_gtt_sa_init_error:
724 	amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem);
725 alloc_gtt_mem_failure:
726 	if (kfd->gws)
727 		amdgpu_amdkfd_free_gws(kfd->kgd, kfd->gws);
728 	dev_err(kfd_device,
729 		"device %x:%x NOT added due to errors\n",
730 		kfd->pdev->vendor, kfd->pdev->device);
731 out:
732 	return kfd->init_complete;
733 }
734 
735 void kgd2kfd_device_exit(struct kfd_dev *kfd)
736 {
737 	if (kfd->init_complete) {
738 		kgd2kfd_suspend(kfd, false);
739 		device_queue_manager_uninit(kfd->dqm);
740 		kfd_interrupt_exit(kfd);
741 		kfd_topology_remove_device(kfd);
742 		kfd_doorbell_fini(kfd);
743 		kfd_gtt_sa_fini(kfd);
744 		amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem);
745 		if (kfd->gws)
746 			amdgpu_amdkfd_free_gws(kfd->kgd, kfd->gws);
747 	}
748 
749 	kfree(kfd);
750 }
751 
752 int kgd2kfd_pre_reset(struct kfd_dev *kfd)
753 {
754 	if (!kfd->init_complete)
755 		return 0;
756 
757 	kfd->dqm->ops.pre_reset(kfd->dqm);
758 
759 	kgd2kfd_suspend(kfd, false);
760 
761 	kfd_signal_reset_event(kfd);
762 	return 0;
763 }
764 
765 /*
766  * Fix me. KFD won't be able to resume existing process for now.
767  * We will keep all existing process in a evicted state and
768  * wait the process to be terminated.
769  */
770 
771 int kgd2kfd_post_reset(struct kfd_dev *kfd)
772 {
773 	int ret;
774 
775 	if (!kfd->init_complete)
776 		return 0;
777 
778 	ret = kfd_resume(kfd);
779 	if (ret)
780 		return ret;
781 	atomic_dec(&kfd_locked);
782 
783 	atomic_set(&kfd->sram_ecc_flag, 0);
784 
785 	return 0;
786 }
787 
788 bool kfd_is_locked(void)
789 {
790 	return  (atomic_read(&kfd_locked) > 0);
791 }
792 
793 void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
794 {
795 	if (!kfd->init_complete)
796 		return;
797 
798 	/* for runtime suspend, skip locking kfd */
799 	if (!run_pm) {
800 		/* For first KFD device suspend all the KFD processes */
801 		if (atomic_inc_return(&kfd_locked) == 1)
802 			kfd_suspend_all_processes();
803 	}
804 
805 	kfd->dqm->ops.stop(kfd->dqm);
806 	kfd_iommu_suspend(kfd);
807 }
808 
809 int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
810 {
811 	int ret, count;
812 
813 	if (!kfd->init_complete)
814 		return 0;
815 
816 	ret = kfd_resume(kfd);
817 	if (ret)
818 		return ret;
819 
820 	/* for runtime resume, skip unlocking kfd */
821 	if (!run_pm) {
822 		count = atomic_dec_return(&kfd_locked);
823 		WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
824 		if (count == 0)
825 			ret = kfd_resume_all_processes();
826 	}
827 
828 	return ret;
829 }
830 
831 static int kfd_resume(struct kfd_dev *kfd)
832 {
833 	int err = 0;
834 
835 	err = kfd_iommu_resume(kfd);
836 	if (err) {
837 		dev_err(kfd_device,
838 			"Failed to resume IOMMU for device %x:%x\n",
839 			kfd->pdev->vendor, kfd->pdev->device);
840 		return err;
841 	}
842 
843 	err = kfd->dqm->ops.start(kfd->dqm);
844 	if (err) {
845 		dev_err(kfd_device,
846 			"Error starting queue manager for device %x:%x\n",
847 			kfd->pdev->vendor, kfd->pdev->device);
848 		goto dqm_start_error;
849 	}
850 
851 	return err;
852 
853 dqm_start_error:
854 	kfd_iommu_suspend(kfd);
855 	return err;
856 }
857 
858 static inline void kfd_queue_work(struct workqueue_struct *wq,
859 				  struct work_struct *work)
860 {
861 	int cpu, new_cpu;
862 
863 	cpu = new_cpu = smp_processor_id();
864 	do {
865 		new_cpu = cpumask_next(new_cpu, cpu_online_mask) % nr_cpu_ids;
866 		if (cpu_to_node(new_cpu) == numa_node_id())
867 			break;
868 	} while (cpu != new_cpu);
869 
870 	queue_work_on(new_cpu, wq, work);
871 }
872 
873 /* This is called directly from KGD at ISR. */
874 void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
875 {
876 	uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE];
877 	bool is_patched = false;
878 	unsigned long flags;
879 
880 	if (!kfd->init_complete)
881 		return;
882 
883 	if (kfd->device_info->ih_ring_entry_size > sizeof(patched_ihre)) {
884 		dev_err_once(kfd_device, "Ring entry too small\n");
885 		return;
886 	}
887 
888 	spin_lock_irqsave(&kfd->interrupt_lock, flags);
889 
890 	if (kfd->interrupts_active
891 	    && interrupt_is_wanted(kfd, ih_ring_entry,
892 				   patched_ihre, &is_patched)
893 	    && enqueue_ih_ring_entry(kfd,
894 				     is_patched ? patched_ihre : ih_ring_entry))
895 		kfd_queue_work(kfd->ih_wq, &kfd->interrupt_work);
896 
897 	spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
898 }
899 
900 int kgd2kfd_quiesce_mm(struct mm_struct *mm)
901 {
902 	struct kfd_process *p;
903 	int r;
904 
905 	/* Because we are called from arbitrary context (workqueue) as opposed
906 	 * to process context, kfd_process could attempt to exit while we are
907 	 * running so the lookup function increments the process ref count.
908 	 */
909 	p = kfd_lookup_process_by_mm(mm);
910 	if (!p)
911 		return -ESRCH;
912 
913 	r = kfd_process_evict_queues(p);
914 
915 	kfd_unref_process(p);
916 	return r;
917 }
918 
919 int kgd2kfd_resume_mm(struct mm_struct *mm)
920 {
921 	struct kfd_process *p;
922 	int r;
923 
924 	/* Because we are called from arbitrary context (workqueue) as opposed
925 	 * to process context, kfd_process could attempt to exit while we are
926 	 * running so the lookup function increments the process ref count.
927 	 */
928 	p = kfd_lookup_process_by_mm(mm);
929 	if (!p)
930 		return -ESRCH;
931 
932 	r = kfd_process_restore_queues(p);
933 
934 	kfd_unref_process(p);
935 	return r;
936 }
937 
938 /** kgd2kfd_schedule_evict_and_restore_process - Schedules work queue that will
939  *   prepare for safe eviction of KFD BOs that belong to the specified
940  *   process.
941  *
942  * @mm: mm_struct that identifies the specified KFD process
943  * @fence: eviction fence attached to KFD process BOs
944  *
945  */
946 int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
947 					       struct dma_fence *fence)
948 {
949 	struct kfd_process *p;
950 	unsigned long active_time;
951 	unsigned long delay_jiffies = msecs_to_jiffies(PROCESS_ACTIVE_TIME_MS);
952 
953 	if (!fence)
954 		return -EINVAL;
955 
956 	if (dma_fence_is_signaled(fence))
957 		return 0;
958 
959 	p = kfd_lookup_process_by_mm(mm);
960 	if (!p)
961 		return -ENODEV;
962 
963 	if (fence->seqno == p->last_eviction_seqno)
964 		goto out;
965 
966 	p->last_eviction_seqno = fence->seqno;
967 
968 	/* Avoid KFD process starvation. Wait for at least
969 	 * PROCESS_ACTIVE_TIME_MS before evicting the process again
970 	 */
971 	active_time = get_jiffies_64() - p->last_restore_timestamp;
972 	if (delay_jiffies > active_time)
973 		delay_jiffies -= active_time;
974 	else
975 		delay_jiffies = 0;
976 
977 	/* During process initialization eviction_work.dwork is initialized
978 	 * to kfd_evict_bo_worker
979 	 */
980 	schedule_delayed_work(&p->eviction_work, delay_jiffies);
981 out:
982 	kfd_unref_process(p);
983 	return 0;
984 }
985 
986 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
987 				unsigned int chunk_size)
988 {
989 	unsigned int num_of_longs;
990 
991 	if (WARN_ON(buf_size < chunk_size))
992 		return -EINVAL;
993 	if (WARN_ON(buf_size == 0))
994 		return -EINVAL;
995 	if (WARN_ON(chunk_size == 0))
996 		return -EINVAL;
997 
998 	kfd->gtt_sa_chunk_size = chunk_size;
999 	kfd->gtt_sa_num_of_chunks = buf_size / chunk_size;
1000 
1001 	num_of_longs = (kfd->gtt_sa_num_of_chunks + BITS_PER_LONG - 1) /
1002 		BITS_PER_LONG;
1003 
1004 	kfd->gtt_sa_bitmap = kcalloc(num_of_longs, sizeof(long), GFP_KERNEL);
1005 
1006 	if (!kfd->gtt_sa_bitmap)
1007 		return -ENOMEM;
1008 
1009 	pr_debug("gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n",
1010 			kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap);
1011 
1012 	mutex_init(&kfd->gtt_sa_lock);
1013 
1014 	return 0;
1015 
1016 }
1017 
1018 static void kfd_gtt_sa_fini(struct kfd_dev *kfd)
1019 {
1020 	mutex_destroy(&kfd->gtt_sa_lock);
1021 	kfree(kfd->gtt_sa_bitmap);
1022 }
1023 
1024 static inline uint64_t kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr,
1025 						unsigned int bit_num,
1026 						unsigned int chunk_size)
1027 {
1028 	return start_addr + bit_num * chunk_size;
1029 }
1030 
1031 static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr,
1032 						unsigned int bit_num,
1033 						unsigned int chunk_size)
1034 {
1035 	return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size);
1036 }
1037 
1038 int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
1039 			struct kfd_mem_obj **mem_obj)
1040 {
1041 	unsigned int found, start_search, cur_size;
1042 
1043 	if (size == 0)
1044 		return -EINVAL;
1045 
1046 	if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size)
1047 		return -ENOMEM;
1048 
1049 	*mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
1050 	if (!(*mem_obj))
1051 		return -ENOMEM;
1052 
1053 	pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size);
1054 
1055 	start_search = 0;
1056 
1057 	mutex_lock(&kfd->gtt_sa_lock);
1058 
1059 kfd_gtt_restart_search:
1060 	/* Find the first chunk that is free */
1061 	found = find_next_zero_bit(kfd->gtt_sa_bitmap,
1062 					kfd->gtt_sa_num_of_chunks,
1063 					start_search);
1064 
1065 	pr_debug("Found = %d\n", found);
1066 
1067 	/* If there wasn't any free chunk, bail out */
1068 	if (found == kfd->gtt_sa_num_of_chunks)
1069 		goto kfd_gtt_no_free_chunk;
1070 
1071 	/* Update fields of mem_obj */
1072 	(*mem_obj)->range_start = found;
1073 	(*mem_obj)->range_end = found;
1074 	(*mem_obj)->gpu_addr = kfd_gtt_sa_calc_gpu_addr(
1075 					kfd->gtt_start_gpu_addr,
1076 					found,
1077 					kfd->gtt_sa_chunk_size);
1078 	(*mem_obj)->cpu_ptr = kfd_gtt_sa_calc_cpu_addr(
1079 					kfd->gtt_start_cpu_ptr,
1080 					found,
1081 					kfd->gtt_sa_chunk_size);
1082 
1083 	pr_debug("gpu_addr = %p, cpu_addr = %p\n",
1084 			(uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr);
1085 
1086 	/* If we need only one chunk, mark it as allocated and get out */
1087 	if (size <= kfd->gtt_sa_chunk_size) {
1088 		pr_debug("Single bit\n");
1089 		set_bit(found, kfd->gtt_sa_bitmap);
1090 		goto kfd_gtt_out;
1091 	}
1092 
1093 	/* Otherwise, try to see if we have enough contiguous chunks */
1094 	cur_size = size - kfd->gtt_sa_chunk_size;
1095 	do {
1096 		(*mem_obj)->range_end =
1097 			find_next_zero_bit(kfd->gtt_sa_bitmap,
1098 					kfd->gtt_sa_num_of_chunks, ++found);
1099 		/*
1100 		 * If next free chunk is not contiguous than we need to
1101 		 * restart our search from the last free chunk we found (which
1102 		 * wasn't contiguous to the previous ones
1103 		 */
1104 		if ((*mem_obj)->range_end != found) {
1105 			start_search = found;
1106 			goto kfd_gtt_restart_search;
1107 		}
1108 
1109 		/*
1110 		 * If we reached end of buffer, bail out with error
1111 		 */
1112 		if (found == kfd->gtt_sa_num_of_chunks)
1113 			goto kfd_gtt_no_free_chunk;
1114 
1115 		/* Check if we don't need another chunk */
1116 		if (cur_size <= kfd->gtt_sa_chunk_size)
1117 			cur_size = 0;
1118 		else
1119 			cur_size -= kfd->gtt_sa_chunk_size;
1120 
1121 	} while (cur_size > 0);
1122 
1123 	pr_debug("range_start = %d, range_end = %d\n",
1124 		(*mem_obj)->range_start, (*mem_obj)->range_end);
1125 
1126 	/* Mark the chunks as allocated */
1127 	for (found = (*mem_obj)->range_start;
1128 		found <= (*mem_obj)->range_end;
1129 		found++)
1130 		set_bit(found, kfd->gtt_sa_bitmap);
1131 
1132 kfd_gtt_out:
1133 	mutex_unlock(&kfd->gtt_sa_lock);
1134 	return 0;
1135 
1136 kfd_gtt_no_free_chunk:
1137 	pr_debug("Allocation failed with mem_obj = %p\n", *mem_obj);
1138 	mutex_unlock(&kfd->gtt_sa_lock);
1139 	kfree(*mem_obj);
1140 	return -ENOMEM;
1141 }
1142 
1143 int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
1144 {
1145 	unsigned int bit;
1146 
1147 	/* Act like kfree when trying to free a NULL object */
1148 	if (!mem_obj)
1149 		return 0;
1150 
1151 	pr_debug("Free mem_obj = %p, range_start = %d, range_end = %d\n",
1152 			mem_obj, mem_obj->range_start, mem_obj->range_end);
1153 
1154 	mutex_lock(&kfd->gtt_sa_lock);
1155 
1156 	/* Mark the chunks as free */
1157 	for (bit = mem_obj->range_start;
1158 		bit <= mem_obj->range_end;
1159 		bit++)
1160 		clear_bit(bit, kfd->gtt_sa_bitmap);
1161 
1162 	mutex_unlock(&kfd->gtt_sa_lock);
1163 
1164 	kfree(mem_obj);
1165 	return 0;
1166 }
1167 
1168 void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
1169 {
1170 	if (kfd)
1171 		atomic_inc(&kfd->sram_ecc_flag);
1172 }
1173 
1174 void kfd_inc_compute_active(struct kfd_dev *kfd)
1175 {
1176 	if (atomic_inc_return(&kfd->compute_profile) == 1)
1177 		amdgpu_amdkfd_set_compute_idle(kfd->kgd, false);
1178 }
1179 
1180 void kfd_dec_compute_active(struct kfd_dev *kfd)
1181 {
1182 	int count = atomic_dec_return(&kfd->compute_profile);
1183 
1184 	if (count == 0)
1185 		amdgpu_amdkfd_set_compute_idle(kfd->kgd, true);
1186 	WARN_ONCE(count < 0, "Compute profile ref. count error");
1187 }
1188 
1189 #if defined(CONFIG_DEBUG_FS)
1190 
1191 /* This function will send a package to HIQ to hang the HWS
1192  * which will trigger a GPU reset and bring the HWS back to normal state
1193  */
1194 int kfd_debugfs_hang_hws(struct kfd_dev *dev)
1195 {
1196 	int r = 0;
1197 
1198 	if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) {
1199 		pr_err("HWS is not enabled");
1200 		return -EINVAL;
1201 	}
1202 
1203 	r = pm_debugfs_hang_hws(&dev->dqm->packets);
1204 	if (!r)
1205 		r = dqm_debugfs_execute_queues(dev->dqm);
1206 
1207 	return r;
1208 }
1209 
1210 #endif
1211