1 /**************************************************************************
2  *
3  * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_resource_priv.h"
30 #include "ttm/ttm_placement.h"
31 
32 struct vmw_shader {
33 	struct vmw_resource res;
34 	SVGA3dShaderType type;
35 	uint32_t size;
36 };
37 
38 struct vmw_user_shader {
39 	struct ttm_base_object base;
40 	struct vmw_shader shader;
41 };
42 
43 static void vmw_user_shader_free(struct vmw_resource *res);
44 static struct vmw_resource *
45 vmw_user_shader_base_to_res(struct ttm_base_object *base);
46 
47 static int vmw_gb_shader_create(struct vmw_resource *res);
48 static int vmw_gb_shader_bind(struct vmw_resource *res,
49 			       struct ttm_validate_buffer *val_buf);
50 static int vmw_gb_shader_unbind(struct vmw_resource *res,
51 				 bool readback,
52 				 struct ttm_validate_buffer *val_buf);
53 static int vmw_gb_shader_destroy(struct vmw_resource *res);
54 
55 static uint64_t vmw_user_shader_size;
56 
57 static const struct vmw_user_resource_conv user_shader_conv = {
58 	.object_type = VMW_RES_SHADER,
59 	.base_obj_to_res = vmw_user_shader_base_to_res,
60 	.res_free = vmw_user_shader_free
61 };
62 
63 const struct vmw_user_resource_conv *user_shader_converter =
64 	&user_shader_conv;
65 
66 
67 static const struct vmw_res_func vmw_gb_shader_func = {
68 	.res_type = vmw_res_shader,
69 	.needs_backup = true,
70 	.may_evict = true,
71 	.type_name = "guest backed shaders",
72 	.backup_placement = &vmw_mob_placement,
73 	.create = vmw_gb_shader_create,
74 	.destroy = vmw_gb_shader_destroy,
75 	.bind = vmw_gb_shader_bind,
76 	.unbind = vmw_gb_shader_unbind
77 };
78 
79 /**
80  * Shader management:
81  */
82 
83 static inline struct vmw_shader *
84 vmw_res_to_shader(struct vmw_resource *res)
85 {
86 	return container_of(res, struct vmw_shader, res);
87 }
88 
89 static void vmw_hw_shader_destroy(struct vmw_resource *res)
90 {
91 	(void) vmw_gb_shader_destroy(res);
92 }
93 
94 static int vmw_gb_shader_init(struct vmw_private *dev_priv,
95 			      struct vmw_resource *res,
96 			      uint32_t size,
97 			      uint64_t offset,
98 			      SVGA3dShaderType type,
99 			      struct vmw_dma_buffer *byte_code,
100 			      void (*res_free) (struct vmw_resource *res))
101 {
102 	struct vmw_shader *shader = vmw_res_to_shader(res);
103 	int ret;
104 
105 	ret = vmw_resource_init(dev_priv, res, true,
106 				res_free, &vmw_gb_shader_func);
107 
108 
109 	if (unlikely(ret != 0)) {
110 		if (res_free)
111 			res_free(res);
112 		else
113 			kfree(res);
114 		return ret;
115 	}
116 
117 	res->backup_size = size;
118 	if (byte_code) {
119 		res->backup = vmw_dmabuf_reference(byte_code);
120 		res->backup_offset = offset;
121 	}
122 	shader->size = size;
123 	shader->type = type;
124 
125 	vmw_resource_activate(res, vmw_hw_shader_destroy);
126 	return 0;
127 }
128 
129 static int vmw_gb_shader_create(struct vmw_resource *res)
130 {
131 	struct vmw_private *dev_priv = res->dev_priv;
132 	struct vmw_shader *shader = vmw_res_to_shader(res);
133 	int ret;
134 	struct {
135 		SVGA3dCmdHeader header;
136 		SVGA3dCmdDefineGBShader body;
137 	} *cmd;
138 
139 	if (likely(res->id != -1))
140 		return 0;
141 
142 	ret = vmw_resource_alloc_id(res);
143 	if (unlikely(ret != 0)) {
144 		DRM_ERROR("Failed to allocate a shader id.\n");
145 		goto out_no_id;
146 	}
147 
148 	if (unlikely(res->id >= VMWGFX_NUM_GB_SHADER)) {
149 		ret = -EBUSY;
150 		goto out_no_fifo;
151 	}
152 
153 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
154 	if (unlikely(cmd == NULL)) {
155 		DRM_ERROR("Failed reserving FIFO space for shader "
156 			  "creation.\n");
157 		ret = -ENOMEM;
158 		goto out_no_fifo;
159 	}
160 
161 	cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SHADER;
162 	cmd->header.size = sizeof(cmd->body);
163 	cmd->body.shid = res->id;
164 	cmd->body.type = shader->type;
165 	cmd->body.sizeInBytes = shader->size;
166 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
167 	(void) vmw_3d_resource_inc(dev_priv, false);
168 
169 	return 0;
170 
171 out_no_fifo:
172 	vmw_resource_release_id(res);
173 out_no_id:
174 	return ret;
175 }
176 
177 static int vmw_gb_shader_bind(struct vmw_resource *res,
178 			      struct ttm_validate_buffer *val_buf)
179 {
180 	struct vmw_private *dev_priv = res->dev_priv;
181 	struct {
182 		SVGA3dCmdHeader header;
183 		SVGA3dCmdBindGBShader body;
184 	} *cmd;
185 	struct ttm_buffer_object *bo = val_buf->bo;
186 
187 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
188 
189 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
190 	if (unlikely(cmd == NULL)) {
191 		DRM_ERROR("Failed reserving FIFO space for shader "
192 			  "binding.\n");
193 		return -ENOMEM;
194 	}
195 
196 	cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
197 	cmd->header.size = sizeof(cmd->body);
198 	cmd->body.shid = res->id;
199 	cmd->body.mobid = bo->mem.start;
200 	cmd->body.offsetInBytes = 0;
201 	res->backup_dirty = false;
202 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
203 
204 	return 0;
205 }
206 
207 static int vmw_gb_shader_unbind(struct vmw_resource *res,
208 				bool readback,
209 				struct ttm_validate_buffer *val_buf)
210 {
211 	struct vmw_private *dev_priv = res->dev_priv;
212 	struct {
213 		SVGA3dCmdHeader header;
214 		SVGA3dCmdBindGBShader body;
215 	} *cmd;
216 	struct vmw_fence_obj *fence;
217 
218 	BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
219 
220 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
221 	if (unlikely(cmd == NULL)) {
222 		DRM_ERROR("Failed reserving FIFO space for shader "
223 			  "unbinding.\n");
224 		return -ENOMEM;
225 	}
226 
227 	cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
228 	cmd->header.size = sizeof(cmd->body);
229 	cmd->body.shid = res->id;
230 	cmd->body.mobid = SVGA3D_INVALID_ID;
231 	cmd->body.offsetInBytes = 0;
232 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
233 
234 	/*
235 	 * Create a fence object and fence the backup buffer.
236 	 */
237 
238 	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
239 					  &fence, NULL);
240 
241 	vmw_fence_single_bo(val_buf->bo, fence);
242 
243 	if (likely(fence != NULL))
244 		vmw_fence_obj_unreference(&fence);
245 
246 	return 0;
247 }
248 
249 static int vmw_gb_shader_destroy(struct vmw_resource *res)
250 {
251 	struct vmw_private *dev_priv = res->dev_priv;
252 	struct {
253 		SVGA3dCmdHeader header;
254 		SVGA3dCmdDestroyGBShader body;
255 	} *cmd;
256 
257 	if (likely(res->id == -1))
258 		return 0;
259 
260 	mutex_lock(&dev_priv->binding_mutex);
261 	vmw_context_binding_res_list_kill(&res->binding_head);
262 
263 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
264 	if (unlikely(cmd == NULL)) {
265 		DRM_ERROR("Failed reserving FIFO space for shader "
266 			  "destruction.\n");
267 		mutex_unlock(&dev_priv->binding_mutex);
268 		return -ENOMEM;
269 	}
270 
271 	cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SHADER;
272 	cmd->header.size = sizeof(cmd->body);
273 	cmd->body.shid = res->id;
274 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
275 	mutex_unlock(&dev_priv->binding_mutex);
276 	vmw_resource_release_id(res);
277 	vmw_3d_resource_dec(dev_priv, false);
278 
279 	return 0;
280 }
281 
282 /**
283  * User-space shader management:
284  */
285 
286 static struct vmw_resource *
287 vmw_user_shader_base_to_res(struct ttm_base_object *base)
288 {
289 	return &(container_of(base, struct vmw_user_shader, base)->
290 		 shader.res);
291 }
292 
293 static void vmw_user_shader_free(struct vmw_resource *res)
294 {
295 	struct vmw_user_shader *ushader =
296 		container_of(res, struct vmw_user_shader, shader.res);
297 	struct vmw_private *dev_priv = res->dev_priv;
298 
299 	ttm_base_object_kfree(ushader, base);
300 	ttm_mem_global_free(vmw_mem_glob(dev_priv),
301 			    vmw_user_shader_size);
302 }
303 
304 /**
305  * This function is called when user space has no more references on the
306  * base object. It releases the base-object's reference on the resource object.
307  */
308 
309 static void vmw_user_shader_base_release(struct ttm_base_object **p_base)
310 {
311 	struct ttm_base_object *base = *p_base;
312 	struct vmw_resource *res = vmw_user_shader_base_to_res(base);
313 
314 	*p_base = NULL;
315 	vmw_resource_unreference(&res);
316 }
317 
318 int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
319 			      struct drm_file *file_priv)
320 {
321 	struct drm_vmw_shader_arg *arg = (struct drm_vmw_shader_arg *)data;
322 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
323 
324 	return ttm_ref_object_base_unref(tfile, arg->handle,
325 					 TTM_REF_USAGE);
326 }
327 
328 int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
329 			     struct drm_file *file_priv)
330 {
331 	struct vmw_private *dev_priv = vmw_priv(dev);
332 	struct vmw_user_shader *ushader;
333 	struct vmw_resource *res;
334 	struct vmw_resource *tmp;
335 	struct drm_vmw_shader_create_arg *arg =
336 		(struct drm_vmw_shader_create_arg *)data;
337 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
338 	struct vmw_master *vmaster = vmw_master(file_priv->master);
339 	struct vmw_dma_buffer *buffer = NULL;
340 	SVGA3dShaderType shader_type;
341 	int ret;
342 
343 	if (arg->buffer_handle != SVGA3D_INVALID_ID) {
344 		ret = vmw_user_dmabuf_lookup(tfile, arg->buffer_handle,
345 					     &buffer);
346 		if (unlikely(ret != 0)) {
347 			DRM_ERROR("Could not find buffer for shader "
348 				  "creation.\n");
349 			return ret;
350 		}
351 
352 		if ((u64)buffer->base.num_pages * PAGE_SIZE <
353 		    (u64)arg->size + (u64)arg->offset) {
354 			DRM_ERROR("Illegal buffer- or shader size.\n");
355 			ret = -EINVAL;
356 			goto out_bad_arg;
357 		}
358 	}
359 
360 	switch (arg->shader_type) {
361 	case drm_vmw_shader_type_vs:
362 		shader_type = SVGA3D_SHADERTYPE_VS;
363 		break;
364 	case drm_vmw_shader_type_ps:
365 		shader_type = SVGA3D_SHADERTYPE_PS;
366 		break;
367 	case drm_vmw_shader_type_gs:
368 		shader_type = SVGA3D_SHADERTYPE_GS;
369 		break;
370 	default:
371 		DRM_ERROR("Illegal shader type.\n");
372 		ret = -EINVAL;
373 		goto out_bad_arg;
374 	}
375 
376 	/*
377 	 * Approximate idr memory usage with 128 bytes. It will be limited
378 	 * by maximum number_of shaders anyway.
379 	 */
380 
381 	if (unlikely(vmw_user_shader_size == 0))
382 		vmw_user_shader_size = ttm_round_pot(sizeof(*ushader))
383 			+ 128;
384 
385 	ret = ttm_read_lock(&vmaster->lock, true);
386 	if (unlikely(ret != 0))
387 		return ret;
388 
389 	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
390 				   vmw_user_shader_size,
391 				   false, true);
392 	if (unlikely(ret != 0)) {
393 		if (ret != -ERESTARTSYS)
394 			DRM_ERROR("Out of graphics memory for shader"
395 				  " creation.\n");
396 		goto out_unlock;
397 	}
398 
399 	ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
400 	if (unlikely(ushader == NULL)) {
401 		ttm_mem_global_free(vmw_mem_glob(dev_priv),
402 				    vmw_user_shader_size);
403 		ret = -ENOMEM;
404 		goto out_unlock;
405 	}
406 
407 	res = &ushader->shader.res;
408 	ushader->base.shareable = false;
409 	ushader->base.tfile = NULL;
410 
411 	/*
412 	 * From here on, the destructor takes over resource freeing.
413 	 */
414 
415 	ret = vmw_gb_shader_init(dev_priv, res, arg->size,
416 				 arg->offset, shader_type, buffer,
417 				 vmw_user_shader_free);
418 	if (unlikely(ret != 0))
419 		goto out_unlock;
420 
421 	tmp = vmw_resource_reference(res);
422 	ret = ttm_base_object_init(tfile, &ushader->base, false,
423 				   VMW_RES_SHADER,
424 				   &vmw_user_shader_base_release, NULL);
425 
426 	if (unlikely(ret != 0)) {
427 		vmw_resource_unreference(&tmp);
428 		goto out_err;
429 	}
430 
431 	arg->shader_handle = ushader->base.hash.key;
432 out_err:
433 	vmw_resource_unreference(&res);
434 out_unlock:
435 	ttm_read_unlock(&vmaster->lock);
436 out_bad_arg:
437 	vmw_dmabuf_unreference(&buffer);
438 
439 	return ret;
440 
441 }
442