1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
27 #include <drm/drmP.h>
28 #include "amdgpu.h"
29 #include "amdgpu_trace.h"
30 
31 static void amdgpu_job_free_handler(struct work_struct *ws)
32 {
33 	struct amdgpu_job *job = container_of(ws, struct amdgpu_job, base.work_free_job);
34 	amd_sched_job_put(&job->base);
35 }
36 
37 void amdgpu_job_timeout_func(struct work_struct *work)
38 {
39 	struct amdgpu_job *job = container_of(work, struct amdgpu_job, base.work_tdr.work);
40 	DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n",
41 				job->base.sched->name,
42 				(uint32_t)atomic_read(&job->ring->fence_drv.last_seq),
43 				job->ring->fence_drv.sync_seq);
44 
45 	amd_sched_job_put(&job->base);
46 }
47 
48 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
49 		     struct amdgpu_job **job)
50 {
51 	size_t size = sizeof(struct amdgpu_job);
52 
53 	if (num_ibs == 0)
54 		return -EINVAL;
55 
56 	size += sizeof(struct amdgpu_ib) * num_ibs;
57 
58 	*job = kzalloc(size, GFP_KERNEL);
59 	if (!*job)
60 		return -ENOMEM;
61 
62 	(*job)->adev = adev;
63 	(*job)->ibs = (void *)&(*job)[1];
64 	(*job)->num_ibs = num_ibs;
65 	INIT_WORK(&(*job)->base.work_free_job, amdgpu_job_free_handler);
66 
67 	amdgpu_sync_create(&(*job)->sync);
68 
69 	return 0;
70 }
71 
72 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
73 			     struct amdgpu_job **job)
74 {
75 	int r;
76 
77 	r = amdgpu_job_alloc(adev, 1, job);
78 	if (r)
79 		return r;
80 
81 	r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]);
82 	if (r)
83 		kfree(*job);
84 
85 	return r;
86 }
87 
88 void amdgpu_job_free(struct amdgpu_job *job)
89 {
90 	unsigned i;
91 	struct fence *f;
92 	/* use sched fence if available */
93 	f = (job->base.s_fence)? &job->base.s_fence->base : job->fence;
94 
95 	for (i = 0; i < job->num_ibs; ++i)
96 		amdgpu_sa_bo_free(job->adev, &job->ibs[i].sa_bo, f);
97 	fence_put(job->fence);
98 
99 	amdgpu_bo_unref(&job->uf.bo);
100 	amdgpu_sync_free(&job->sync);
101 
102 	if (!job->base.use_sched)
103 		kfree(job);
104 }
105 
106 void amdgpu_job_free_func(struct kref *refcount)
107 {
108 	struct amdgpu_job *job = container_of(refcount, struct amdgpu_job, base.refcount);
109 	kfree(job);
110 }
111 
112 int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
113 		      struct amd_sched_entity *entity, void *owner,
114 		      struct fence **f)
115 {
116 	struct fence *fence;
117 	int r;
118 	job->ring = ring;
119 
120 	if (!f)
121 		return -EINVAL;
122 
123 	r = amd_sched_job_init(&job->base, &ring->sched,
124 							entity,
125 							amdgpu_job_timeout_func,
126 							amdgpu_job_free_func,
127 							owner, &fence);
128 	if (r)
129 		return r;
130 
131 	job->owner = owner;
132 	*f = fence_get(fence);
133 	amd_sched_entity_push_job(&job->base);
134 
135 	return 0;
136 }
137 
138 static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
139 {
140 	struct amdgpu_job *job = to_amdgpu_job(sched_job);
141 	struct amdgpu_vm *vm = job->ibs->vm;
142 
143 	struct fence *fence = amdgpu_sync_get_fence(&job->sync);
144 
145 	if (fence == NULL && vm && !job->ibs->vm_id) {
146 		struct amdgpu_ring *ring = job->ring;
147 		unsigned i, vm_id;
148 		uint64_t vm_pd_addr;
149 		int r;
150 
151 		r = amdgpu_vm_grab_id(vm, ring, &job->sync,
152 				      &job->base.s_fence->base,
153 				      &vm_id, &vm_pd_addr);
154 		if (r)
155 			DRM_ERROR("Error getting VM ID (%d)\n", r);
156 		else {
157 			for (i = 0; i < job->num_ibs; ++i) {
158 				job->ibs[i].vm_id = vm_id;
159 				job->ibs[i].vm_pd_addr = vm_pd_addr;
160 			}
161 		}
162 
163 		fence = amdgpu_sync_get_fence(&job->sync);
164 	}
165 
166 	return fence;
167 }
168 
169 static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
170 {
171 	struct fence *fence = NULL;
172 	struct amdgpu_job *job;
173 	int r;
174 
175 	if (!sched_job) {
176 		DRM_ERROR("job is null\n");
177 		return NULL;
178 	}
179 	job = to_amdgpu_job(sched_job);
180 
181 	r = amdgpu_sync_wait(&job->sync);
182 	if (r) {
183 		DRM_ERROR("failed to sync wait (%d)\n", r);
184 		return NULL;
185 	}
186 
187 	trace_amdgpu_sched_run_job(job);
188 	r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs,
189 			       job->sync.last_vm_update, &fence);
190 	if (r) {
191 		DRM_ERROR("Error scheduling IBs (%d)\n", r);
192 		goto err;
193 	}
194 
195 err:
196 	job->fence = fence;
197 	amdgpu_job_free(job);
198 	return fence;
199 }
200 
201 const struct amd_sched_backend_ops amdgpu_sched_ops = {
202 	.dependency = amdgpu_job_dependency,
203 	.run_job = amdgpu_job_run,
204 	.begin_job = amd_sched_job_begin,
205 	.finish_job = amd_sched_job_finish,
206 };
207