1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
27 #include <drm/drmP.h>
28 #include "amdgpu.h"
29 #include "amdgpu_trace.h"
30 
31 static void amdgpu_job_timedout(struct amd_sched_job *s_job)
32 {
33 	struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
34 
35 	DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n",
36 		  job->base.sched->name,
37 		  atomic_read(&job->ring->fence_drv.last_seq),
38 		  job->ring->fence_drv.sync_seq);
39 
40 	if (amdgpu_sriov_vf(job->adev))
41 		amdgpu_sriov_gpu_reset(job->adev, job);
42 	else
43 		amdgpu_gpu_reset(job->adev);
44 }
45 
46 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
47 		     struct amdgpu_job **job, struct amdgpu_vm *vm)
48 {
49 	size_t size = sizeof(struct amdgpu_job);
50 
51 	if (num_ibs == 0)
52 		return -EINVAL;
53 
54 	size += sizeof(struct amdgpu_ib) * num_ibs;
55 
56 	*job = kzalloc(size, GFP_KERNEL);
57 	if (!*job)
58 		return -ENOMEM;
59 
60 	(*job)->adev = adev;
61 	(*job)->vm = vm;
62 	(*job)->ibs = (void *)&(*job)[1];
63 	(*job)->num_ibs = num_ibs;
64 
65 	amdgpu_sync_create(&(*job)->sync);
66 	amdgpu_sync_create(&(*job)->dep_sync);
67 	amdgpu_sync_create(&(*job)->sched_sync);
68 	(*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
69 
70 	return 0;
71 }
72 
73 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
74 			     struct amdgpu_job **job)
75 {
76 	int r;
77 
78 	r = amdgpu_job_alloc(adev, 1, job, NULL);
79 	if (r)
80 		return r;
81 
82 	r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]);
83 	if (r)
84 		kfree(*job);
85 	else
86 		(*job)->vm_pd_addr = adev->gart.table_addr;
87 
88 	return r;
89 }
90 
91 void amdgpu_job_free_resources(struct amdgpu_job *job)
92 {
93 	struct dma_fence *f;
94 	unsigned i;
95 
96 	/* use sched fence if available */
97 	f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
98 
99 	for (i = 0; i < job->num_ibs; ++i)
100 		amdgpu_ib_free(job->adev, &job->ibs[i], f);
101 }
102 
103 static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
104 {
105 	struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
106 
107 	amdgpu_ring_priority_put(job->ring, amd_sched_get_job_priority(s_job));
108 	dma_fence_put(job->fence);
109 	amdgpu_sync_free(&job->sync);
110 	amdgpu_sync_free(&job->dep_sync);
111 	amdgpu_sync_free(&job->sched_sync);
112 	kfree(job);
113 }
114 
115 void amdgpu_job_free(struct amdgpu_job *job)
116 {
117 	amdgpu_job_free_resources(job);
118 
119 	dma_fence_put(job->fence);
120 	amdgpu_sync_free(&job->sync);
121 	amdgpu_sync_free(&job->dep_sync);
122 	amdgpu_sync_free(&job->sched_sync);
123 	kfree(job);
124 }
125 
126 int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
127 		      struct amd_sched_entity *entity, void *owner,
128 		      struct dma_fence **f)
129 {
130 	int r;
131 	job->ring = ring;
132 
133 	if (!f)
134 		return -EINVAL;
135 
136 	r = amd_sched_job_init(&job->base, &ring->sched, entity, owner);
137 	if (r)
138 		return r;
139 
140 	job->owner = owner;
141 	job->fence_ctx = entity->fence_context;
142 	*f = dma_fence_get(&job->base.s_fence->finished);
143 	amdgpu_job_free_resources(job);
144 	amdgpu_ring_priority_get(job->ring,
145 				 amd_sched_get_job_priority(&job->base));
146 	amd_sched_entity_push_job(&job->base);
147 
148 	return 0;
149 }
150 
151 static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
152 {
153 	struct amdgpu_job *job = to_amdgpu_job(sched_job);
154 	struct amdgpu_vm *vm = job->vm;
155 
156 	struct dma_fence *fence = amdgpu_sync_get_fence(&job->dep_sync);
157 	int r;
158 
159 	if (amd_sched_dependency_optimized(fence, sched_job->s_entity)) {
160 		r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence);
161 		if (r)
162 			DRM_ERROR("Error adding fence to sync (%d)\n", r);
163 	}
164 	if (!fence)
165 		fence = amdgpu_sync_get_fence(&job->sync);
166 	while (fence == NULL && vm && !job->vm_id) {
167 		struct amdgpu_ring *ring = job->ring;
168 
169 		r = amdgpu_vm_grab_id(vm, ring, &job->sync,
170 				      &job->base.s_fence->finished,
171 				      job);
172 		if (r)
173 			DRM_ERROR("Error getting VM ID (%d)\n", r);
174 
175 		fence = amdgpu_sync_get_fence(&job->sync);
176 	}
177 
178 	return fence;
179 }
180 
181 static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
182 {
183 	struct dma_fence *fence = NULL;
184 	struct amdgpu_device *adev;
185 	struct amdgpu_job *job;
186 	int r;
187 
188 	if (!sched_job) {
189 		DRM_ERROR("job is null\n");
190 		return NULL;
191 	}
192 	job = to_amdgpu_job(sched_job);
193 	adev = job->adev;
194 
195 	BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
196 
197 	trace_amdgpu_sched_run_job(job);
198 	/* skip ib schedule when vram is lost */
199 	if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter)) {
200 		dma_fence_set_error(&job->base.s_fence->finished, -ECANCELED);
201 		DRM_ERROR("Skip scheduling IBs!\n");
202 	} else {
203 		r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job,
204 				       &fence);
205 		if (r)
206 			DRM_ERROR("Error scheduling IBs (%d)\n", r);
207 	}
208 	/* if gpu reset, hw fence will be replaced here */
209 	dma_fence_put(job->fence);
210 	job->fence = dma_fence_get(fence);
211 
212 	amdgpu_job_free_resources(job);
213 	return fence;
214 }
215 
216 const struct amd_sched_backend_ops amdgpu_sched_ops = {
217 	.dependency = amdgpu_job_dependency,
218 	.run_job = amdgpu_job_run,
219 	.timedout_job = amdgpu_job_timedout,
220 	.free_job = amdgpu_job_free_cb
221 };
222