1 /**************************************************************************
2  *
3  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_drm.h"
30 #include "ttm/ttm_object.h"
31 #include "ttm/ttm_placement.h"
32 #include "drmP.h"
33 
34 #define VMW_RES_CONTEXT ttm_driver_type0
35 #define VMW_RES_SURFACE ttm_driver_type1
36 #define VMW_RES_STREAM ttm_driver_type2
37 
38 struct vmw_user_context {
39 	struct ttm_base_object base;
40 	struct vmw_resource res;
41 };
42 
43 struct vmw_user_surface {
44 	struct ttm_base_object base;
45 	struct vmw_surface srf;
46 };
47 
48 struct vmw_user_dma_buffer {
49 	struct ttm_base_object base;
50 	struct vmw_dma_buffer dma;
51 };
52 
53 struct vmw_bo_user_rep {
54 	uint32_t handle;
55 	uint64_t map_handle;
56 };
57 
58 struct vmw_stream {
59 	struct vmw_resource res;
60 	uint32_t stream_id;
61 };
62 
63 struct vmw_user_stream {
64 	struct ttm_base_object base;
65 	struct vmw_stream stream;
66 };
67 
68 static inline struct vmw_dma_buffer *
69 vmw_dma_buffer(struct ttm_buffer_object *bo)
70 {
71 	return container_of(bo, struct vmw_dma_buffer, base);
72 }
73 
74 static inline struct vmw_user_dma_buffer *
75 vmw_user_dma_buffer(struct ttm_buffer_object *bo)
76 {
77 	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
78 	return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
79 }
80 
81 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
82 {
83 	kref_get(&res->kref);
84 	return res;
85 }
86 
87 static void vmw_resource_release(struct kref *kref)
88 {
89 	struct vmw_resource *res =
90 	    container_of(kref, struct vmw_resource, kref);
91 	struct vmw_private *dev_priv = res->dev_priv;
92 
93 	idr_remove(res->idr, res->id);
94 	write_unlock(&dev_priv->resource_lock);
95 
96 	if (likely(res->hw_destroy != NULL))
97 		res->hw_destroy(res);
98 
99 	if (res->res_free != NULL)
100 		res->res_free(res);
101 	else
102 		kfree(res);
103 
104 	write_lock(&dev_priv->resource_lock);
105 }
106 
107 void vmw_resource_unreference(struct vmw_resource **p_res)
108 {
109 	struct vmw_resource *res = *p_res;
110 	struct vmw_private *dev_priv = res->dev_priv;
111 
112 	*p_res = NULL;
113 	write_lock(&dev_priv->resource_lock);
114 	kref_put(&res->kref, vmw_resource_release);
115 	write_unlock(&dev_priv->resource_lock);
116 }
117 
118 static int vmw_resource_init(struct vmw_private *dev_priv,
119 			     struct vmw_resource *res,
120 			     struct idr *idr,
121 			     enum ttm_object_type obj_type,
122 			     void (*res_free) (struct vmw_resource *res))
123 {
124 	int ret;
125 
126 	kref_init(&res->kref);
127 	res->hw_destroy = NULL;
128 	res->res_free = res_free;
129 	res->res_type = obj_type;
130 	res->idr = idr;
131 	res->avail = false;
132 	res->dev_priv = dev_priv;
133 
134 	do {
135 		if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
136 			return -ENOMEM;
137 
138 		write_lock(&dev_priv->resource_lock);
139 		ret = idr_get_new_above(idr, res, 1, &res->id);
140 		write_unlock(&dev_priv->resource_lock);
141 
142 	} while (ret == -EAGAIN);
143 
144 	return ret;
145 }
146 
147 /**
148  * vmw_resource_activate
149  *
150  * @res:        Pointer to the newly created resource
151  * @hw_destroy: Destroy function. NULL if none.
152  *
153  * Activate a resource after the hardware has been made aware of it.
154  * Set tye destroy function to @destroy. Typically this frees the
155  * resource and destroys the hardware resources associated with it.
156  * Activate basically means that the function vmw_resource_lookup will
157  * find it.
158  */
159 
160 static void vmw_resource_activate(struct vmw_resource *res,
161 				  void (*hw_destroy) (struct vmw_resource *))
162 {
163 	struct vmw_private *dev_priv = res->dev_priv;
164 
165 	write_lock(&dev_priv->resource_lock);
166 	res->avail = true;
167 	res->hw_destroy = hw_destroy;
168 	write_unlock(&dev_priv->resource_lock);
169 }
170 
171 struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
172 					 struct idr *idr, int id)
173 {
174 	struct vmw_resource *res;
175 
176 	read_lock(&dev_priv->resource_lock);
177 	res = idr_find(idr, id);
178 	if (res && res->avail)
179 		kref_get(&res->kref);
180 	else
181 		res = NULL;
182 	read_unlock(&dev_priv->resource_lock);
183 
184 	if (unlikely(res == NULL))
185 		return NULL;
186 
187 	return res;
188 }
189 
190 /**
191  * Context management:
192  */
193 
194 static void vmw_hw_context_destroy(struct vmw_resource *res)
195 {
196 
197 	struct vmw_private *dev_priv = res->dev_priv;
198 	struct {
199 		SVGA3dCmdHeader header;
200 		SVGA3dCmdDestroyContext body;
201 	} *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
202 
203 	if (unlikely(cmd == NULL)) {
204 		DRM_ERROR("Failed reserving FIFO space for surface "
205 			  "destruction.\n");
206 		return;
207 	}
208 
209 	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
210 	cmd->header.size = cpu_to_le32(sizeof(cmd->body));
211 	cmd->body.cid = cpu_to_le32(res->id);
212 
213 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
214 }
215 
216 static int vmw_context_init(struct vmw_private *dev_priv,
217 			    struct vmw_resource *res,
218 			    void (*res_free) (struct vmw_resource *res))
219 {
220 	int ret;
221 
222 	struct {
223 		SVGA3dCmdHeader header;
224 		SVGA3dCmdDefineContext body;
225 	} *cmd;
226 
227 	ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
228 				VMW_RES_CONTEXT, res_free);
229 
230 	if (unlikely(ret != 0)) {
231 		if (res_free == NULL)
232 			kfree(res);
233 		else
234 			res_free(res);
235 		return ret;
236 	}
237 
238 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
239 	if (unlikely(cmd == NULL)) {
240 		DRM_ERROR("Fifo reserve failed.\n");
241 		vmw_resource_unreference(&res);
242 		return -ENOMEM;
243 	}
244 
245 	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
246 	cmd->header.size = cpu_to_le32(sizeof(cmd->body));
247 	cmd->body.cid = cpu_to_le32(res->id);
248 
249 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
250 	vmw_resource_activate(res, vmw_hw_context_destroy);
251 	return 0;
252 }
253 
254 struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
255 {
256 	struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
257 	int ret;
258 
259 	if (unlikely(res == NULL))
260 		return NULL;
261 
262 	ret = vmw_context_init(dev_priv, res, NULL);
263 	return (ret == 0) ? res : NULL;
264 }
265 
266 /**
267  * User-space context management:
268  */
269 
270 static void vmw_user_context_free(struct vmw_resource *res)
271 {
272 	struct vmw_user_context *ctx =
273 	    container_of(res, struct vmw_user_context, res);
274 
275 	kfree(ctx);
276 }
277 
278 /**
279  * This function is called when user space has no more references on the
280  * base object. It releases the base-object's reference on the resource object.
281  */
282 
283 static void vmw_user_context_base_release(struct ttm_base_object **p_base)
284 {
285 	struct ttm_base_object *base = *p_base;
286 	struct vmw_user_context *ctx =
287 	    container_of(base, struct vmw_user_context, base);
288 	struct vmw_resource *res = &ctx->res;
289 
290 	*p_base = NULL;
291 	vmw_resource_unreference(&res);
292 }
293 
294 int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
295 			      struct drm_file *file_priv)
296 {
297 	struct vmw_private *dev_priv = vmw_priv(dev);
298 	struct vmw_resource *res;
299 	struct vmw_user_context *ctx;
300 	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
301 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
302 	int ret = 0;
303 
304 	res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
305 	if (unlikely(res == NULL))
306 		return -EINVAL;
307 
308 	if (res->res_free != &vmw_user_context_free) {
309 		ret = -EINVAL;
310 		goto out;
311 	}
312 
313 	ctx = container_of(res, struct vmw_user_context, res);
314 	if (ctx->base.tfile != tfile && !ctx->base.shareable) {
315 		ret = -EPERM;
316 		goto out;
317 	}
318 
319 	ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
320 out:
321 	vmw_resource_unreference(&res);
322 	return ret;
323 }
324 
325 int vmw_context_define_ioctl(struct drm_device *dev, void *data,
326 			     struct drm_file *file_priv)
327 {
328 	struct vmw_private *dev_priv = vmw_priv(dev);
329 	struct vmw_user_context *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
330 	struct vmw_resource *res;
331 	struct vmw_resource *tmp;
332 	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
333 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
334 	int ret;
335 
336 	if (unlikely(ctx == NULL))
337 		return -ENOMEM;
338 
339 	res = &ctx->res;
340 	ctx->base.shareable = false;
341 	ctx->base.tfile = NULL;
342 
343 	ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
344 	if (unlikely(ret != 0))
345 		return ret;
346 
347 	tmp = vmw_resource_reference(&ctx->res);
348 	ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
349 				   &vmw_user_context_base_release, NULL);
350 
351 	if (unlikely(ret != 0)) {
352 		vmw_resource_unreference(&tmp);
353 		goto out_err;
354 	}
355 
356 	arg->cid = res->id;
357 out_err:
358 	vmw_resource_unreference(&res);
359 	return ret;
360 
361 }
362 
363 int vmw_context_check(struct vmw_private *dev_priv,
364 		      struct ttm_object_file *tfile,
365 		      int id)
366 {
367 	struct vmw_resource *res;
368 	int ret = 0;
369 
370 	read_lock(&dev_priv->resource_lock);
371 	res = idr_find(&dev_priv->context_idr, id);
372 	if (res && res->avail) {
373 		struct vmw_user_context *ctx =
374 			container_of(res, struct vmw_user_context, res);
375 		if (ctx->base.tfile != tfile && !ctx->base.shareable)
376 			ret = -EPERM;
377 	} else
378 		ret = -EINVAL;
379 	read_unlock(&dev_priv->resource_lock);
380 
381 	return ret;
382 }
383 
384 
385 /**
386  * Surface management.
387  */
388 
389 static void vmw_hw_surface_destroy(struct vmw_resource *res)
390 {
391 
392 	struct vmw_private *dev_priv = res->dev_priv;
393 	struct {
394 		SVGA3dCmdHeader header;
395 		SVGA3dCmdDestroySurface body;
396 	} *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
397 
398 	if (unlikely(cmd == NULL)) {
399 		DRM_ERROR("Failed reserving FIFO space for surface "
400 			  "destruction.\n");
401 		return;
402 	}
403 
404 	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY);
405 	cmd->header.size = cpu_to_le32(sizeof(cmd->body));
406 	cmd->body.sid = cpu_to_le32(res->id);
407 
408 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
409 }
410 
411 void vmw_surface_res_free(struct vmw_resource *res)
412 {
413 	struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
414 
415 	kfree(srf->sizes);
416 	kfree(srf->snooper.image);
417 	kfree(srf);
418 }
419 
420 int vmw_surface_init(struct vmw_private *dev_priv,
421 		     struct vmw_surface *srf,
422 		     void (*res_free) (struct vmw_resource *res))
423 {
424 	int ret;
425 	struct {
426 		SVGA3dCmdHeader header;
427 		SVGA3dCmdDefineSurface body;
428 	} *cmd;
429 	SVGA3dSize *cmd_size;
430 	struct vmw_resource *res = &srf->res;
431 	struct drm_vmw_size *src_size;
432 	size_t submit_size;
433 	uint32_t cmd_len;
434 	int i;
435 
436 	BUG_ON(res_free == NULL);
437 	ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
438 				VMW_RES_SURFACE, res_free);
439 
440 	if (unlikely(ret != 0)) {
441 		res_free(res);
442 		return ret;
443 	}
444 
445 	submit_size = sizeof(*cmd) + srf->num_sizes * sizeof(SVGA3dSize);
446 	cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
447 
448 	cmd = vmw_fifo_reserve(dev_priv, submit_size);
449 	if (unlikely(cmd == NULL)) {
450 		DRM_ERROR("Fifo reserve failed for create surface.\n");
451 		vmw_resource_unreference(&res);
452 		return -ENOMEM;
453 	}
454 
455 	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE);
456 	cmd->header.size = cpu_to_le32(cmd_len);
457 	cmd->body.sid = cpu_to_le32(res->id);
458 	cmd->body.surfaceFlags = cpu_to_le32(srf->flags);
459 	cmd->body.format = cpu_to_le32(srf->format);
460 	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
461 		cmd->body.face[i].numMipLevels =
462 		    cpu_to_le32(srf->mip_levels[i]);
463 	}
464 
465 	cmd += 1;
466 	cmd_size = (SVGA3dSize *) cmd;
467 	src_size = srf->sizes;
468 
469 	for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
470 		cmd_size->width = cpu_to_le32(src_size->width);
471 		cmd_size->height = cpu_to_le32(src_size->height);
472 		cmd_size->depth = cpu_to_le32(src_size->depth);
473 	}
474 
475 	vmw_fifo_commit(dev_priv, submit_size);
476 	vmw_resource_activate(res, vmw_hw_surface_destroy);
477 	return 0;
478 }
479 
480 static void vmw_user_surface_free(struct vmw_resource *res)
481 {
482 	struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
483 	struct vmw_user_surface *user_srf =
484 	    container_of(srf, struct vmw_user_surface, srf);
485 
486 	kfree(srf->sizes);
487 	kfree(srf->snooper.image);
488 	kfree(user_srf);
489 }
490 
491 int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
492 				   struct ttm_object_file *tfile,
493 				   uint32_t handle, struct vmw_surface **out)
494 {
495 	struct vmw_resource *res;
496 	struct vmw_surface *srf;
497 	struct vmw_user_surface *user_srf;
498 	struct ttm_base_object *base;
499 	int ret = -EINVAL;
500 
501 	base = ttm_base_object_lookup(tfile, handle);
502 	if (unlikely(base == NULL))
503 		return -EINVAL;
504 
505 	if (unlikely(base->object_type != VMW_RES_SURFACE))
506 		goto out_bad_resource;
507 
508 	user_srf = container_of(base, struct vmw_user_surface, base);
509 	srf = &user_srf->srf;
510 	res = &srf->res;
511 
512 	read_lock(&dev_priv->resource_lock);
513 
514 	if (!res->avail || res->res_free != &vmw_user_surface_free) {
515 		read_unlock(&dev_priv->resource_lock);
516 		goto out_bad_resource;
517 	}
518 
519 	kref_get(&res->kref);
520 	read_unlock(&dev_priv->resource_lock);
521 
522 	*out = srf;
523 	ret = 0;
524 
525 out_bad_resource:
526 	ttm_base_object_unref(&base);
527 
528 	return ret;
529 }
530 
531 static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
532 {
533 	struct ttm_base_object *base = *p_base;
534 	struct vmw_user_surface *user_srf =
535 	    container_of(base, struct vmw_user_surface, base);
536 	struct vmw_resource *res = &user_srf->srf.res;
537 
538 	*p_base = NULL;
539 	vmw_resource_unreference(&res);
540 }
541 
542 int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
543 			      struct drm_file *file_priv)
544 {
545 	struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
546 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
547 
548 	return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
549 }
550 
551 int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
552 			     struct drm_file *file_priv)
553 {
554 	struct vmw_private *dev_priv = vmw_priv(dev);
555 	struct vmw_user_surface *user_srf =
556 	    kmalloc(sizeof(*user_srf), GFP_KERNEL);
557 	struct vmw_surface *srf;
558 	struct vmw_resource *res;
559 	struct vmw_resource *tmp;
560 	union drm_vmw_surface_create_arg *arg =
561 	    (union drm_vmw_surface_create_arg *)data;
562 	struct drm_vmw_surface_create_req *req = &arg->req;
563 	struct drm_vmw_surface_arg *rep = &arg->rep;
564 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
565 	struct drm_vmw_size __user *user_sizes;
566 	int ret;
567 	int i;
568 
569 	if (unlikely(user_srf == NULL))
570 		return -ENOMEM;
571 
572 	srf = &user_srf->srf;
573 	res = &srf->res;
574 
575 	srf->flags = req->flags;
576 	srf->format = req->format;
577 	srf->scanout = req->scanout;
578 	memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
579 	srf->num_sizes = 0;
580 	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
581 		srf->num_sizes += srf->mip_levels[i];
582 
583 	if (srf->num_sizes > DRM_VMW_MAX_SURFACE_FACES *
584 	    DRM_VMW_MAX_MIP_LEVELS) {
585 		ret = -EINVAL;
586 		goto out_err0;
587 	}
588 
589 	srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
590 	if (unlikely(srf->sizes == NULL)) {
591 		ret = -ENOMEM;
592 		goto out_err0;
593 	}
594 
595 	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
596 	    req->size_addr;
597 
598 	ret = copy_from_user(srf->sizes, user_sizes,
599 			     srf->num_sizes * sizeof(*srf->sizes));
600 	if (unlikely(ret != 0))
601 		goto out_err1;
602 
603 	if (srf->scanout &&
604 	    srf->num_sizes == 1 &&
605 	    srf->sizes[0].width == 64 &&
606 	    srf->sizes[0].height == 64 &&
607 	    srf->format == SVGA3D_A8R8G8B8) {
608 
609 		srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
610 		/* clear the image */
611 		if (srf->snooper.image) {
612 			memset(srf->snooper.image, 0x00, 64 * 64 * 4);
613 		} else {
614 			DRM_ERROR("Failed to allocate cursor_image\n");
615 			ret = -ENOMEM;
616 			goto out_err1;
617 		}
618 	} else {
619 		srf->snooper.image = NULL;
620 	}
621 	srf->snooper.crtc = NULL;
622 
623 	user_srf->base.shareable = false;
624 	user_srf->base.tfile = NULL;
625 
626 	/**
627 	 * From this point, the generic resource management functions
628 	 * destroy the object on failure.
629 	 */
630 
631 	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
632 	if (unlikely(ret != 0))
633 		return ret;
634 
635 	tmp = vmw_resource_reference(&srf->res);
636 	ret = ttm_base_object_init(tfile, &user_srf->base,
637 				   req->shareable, VMW_RES_SURFACE,
638 				   &vmw_user_surface_base_release, NULL);
639 
640 	if (unlikely(ret != 0)) {
641 		vmw_resource_unreference(&tmp);
642 		vmw_resource_unreference(&res);
643 		return ret;
644 	}
645 
646 	rep->sid = user_srf->base.hash.key;
647 	if (rep->sid == SVGA3D_INVALID_ID)
648 		DRM_ERROR("Created bad Surface ID.\n");
649 
650 	vmw_resource_unreference(&res);
651 	return 0;
652 out_err1:
653 	kfree(srf->sizes);
654 out_err0:
655 	kfree(user_srf);
656 	return ret;
657 }
658 
659 int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
660 				struct drm_file *file_priv)
661 {
662 	union drm_vmw_surface_reference_arg *arg =
663 	    (union drm_vmw_surface_reference_arg *)data;
664 	struct drm_vmw_surface_arg *req = &arg->req;
665 	struct drm_vmw_surface_create_req *rep = &arg->rep;
666 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
667 	struct vmw_surface *srf;
668 	struct vmw_user_surface *user_srf;
669 	struct drm_vmw_size __user *user_sizes;
670 	struct ttm_base_object *base;
671 	int ret = -EINVAL;
672 
673 	base = ttm_base_object_lookup(tfile, req->sid);
674 	if (unlikely(base == NULL)) {
675 		DRM_ERROR("Could not find surface to reference.\n");
676 		return -EINVAL;
677 	}
678 
679 	if (unlikely(base->object_type != VMW_RES_SURFACE))
680 		goto out_bad_resource;
681 
682 	user_srf = container_of(base, struct vmw_user_surface, base);
683 	srf = &user_srf->srf;
684 
685 	ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
686 	if (unlikely(ret != 0)) {
687 		DRM_ERROR("Could not add a reference to a surface.\n");
688 		goto out_no_reference;
689 	}
690 
691 	rep->flags = srf->flags;
692 	rep->format = srf->format;
693 	memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
694 	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
695 	    rep->size_addr;
696 
697 	if (user_sizes)
698 		ret = copy_to_user(user_sizes, srf->sizes,
699 				   srf->num_sizes * sizeof(*srf->sizes));
700 	if (unlikely(ret != 0))
701 		DRM_ERROR("copy_to_user failed %p %u\n",
702 			  user_sizes, srf->num_sizes);
703 out_bad_resource:
704 out_no_reference:
705 	ttm_base_object_unref(&base);
706 
707 	return ret;
708 }
709 
710 int vmw_surface_check(struct vmw_private *dev_priv,
711 		      struct ttm_object_file *tfile,
712 		      uint32_t handle, int *id)
713 {
714 	struct ttm_base_object *base;
715 	struct vmw_user_surface *user_srf;
716 
717 	int ret = -EPERM;
718 
719 	base = ttm_base_object_lookup(tfile, handle);
720 	if (unlikely(base == NULL))
721 		return -EINVAL;
722 
723 	if (unlikely(base->object_type != VMW_RES_SURFACE))
724 		goto out_bad_surface;
725 
726 	user_srf = container_of(base, struct vmw_user_surface, base);
727 	*id = user_srf->srf.res.id;
728 	ret = 0;
729 
730 out_bad_surface:
731 	/**
732 	 * FIXME: May deadlock here when called from the
733 	 * command parsing code.
734 	 */
735 
736 	ttm_base_object_unref(&base);
737 	return ret;
738 }
739 
740 /**
741  * Buffer management.
742  */
743 
744 static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
745 				  unsigned long num_pages)
746 {
747 	static size_t bo_user_size = ~0;
748 
749 	size_t page_array_size =
750 	    (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
751 
752 	if (unlikely(bo_user_size == ~0)) {
753 		bo_user_size = glob->ttm_bo_extra_size +
754 		    ttm_round_pot(sizeof(struct vmw_dma_buffer));
755 	}
756 
757 	return bo_user_size + page_array_size;
758 }
759 
760 void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo)
761 {
762 	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
763 	struct ttm_bo_global *glob = bo->glob;
764 	struct vmw_private *dev_priv =
765 		container_of(bo->bdev, struct vmw_private, bdev);
766 
767 	if (vmw_bo->gmr_bound) {
768 		vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
769 		spin_lock(&glob->lru_lock);
770 		ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
771 		spin_unlock(&glob->lru_lock);
772 		vmw_bo->gmr_bound = false;
773 	}
774 }
775 
776 void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
777 {
778 	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
779 	struct ttm_bo_global *glob = bo->glob;
780 
781 	vmw_dmabuf_gmr_unbind(bo);
782 	ttm_mem_global_free(glob->mem_glob, bo->acc_size);
783 	kfree(vmw_bo);
784 }
785 
786 int vmw_dmabuf_init(struct vmw_private *dev_priv,
787 		    struct vmw_dma_buffer *vmw_bo,
788 		    size_t size, struct ttm_placement *placement,
789 		    bool interruptible,
790 		    void (*bo_free) (struct ttm_buffer_object *bo))
791 {
792 	struct ttm_bo_device *bdev = &dev_priv->bdev;
793 	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
794 	size_t acc_size;
795 	int ret;
796 
797 	BUG_ON(!bo_free);
798 
799 	acc_size =
800 	    vmw_dmabuf_acc_size(bdev->glob,
801 				(size + PAGE_SIZE - 1) >> PAGE_SHIFT);
802 
803 	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
804 	if (unlikely(ret != 0)) {
805 		/* we must free the bo here as
806 		 * ttm_buffer_object_init does so as well */
807 		bo_free(&vmw_bo->base);
808 		return ret;
809 	}
810 
811 	memset(vmw_bo, 0, sizeof(*vmw_bo));
812 
813 	INIT_LIST_HEAD(&vmw_bo->gmr_lru);
814 	INIT_LIST_HEAD(&vmw_bo->validate_list);
815 	vmw_bo->gmr_id = 0;
816 	vmw_bo->gmr_bound = false;
817 
818 	ret = ttm_bo_init(bdev, &vmw_bo->base, size,
819 			  ttm_bo_type_device, placement,
820 			  0, 0, interruptible,
821 			  NULL, acc_size, bo_free);
822 	return ret;
823 }
824 
825 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
826 {
827 	struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
828 	struct ttm_bo_global *glob = bo->glob;
829 
830 	vmw_dmabuf_gmr_unbind(bo);
831 	ttm_mem_global_free(glob->mem_glob, bo->acc_size);
832 	kfree(vmw_user_bo);
833 }
834 
835 static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
836 {
837 	struct vmw_user_dma_buffer *vmw_user_bo;
838 	struct ttm_base_object *base = *p_base;
839 	struct ttm_buffer_object *bo;
840 
841 	*p_base = NULL;
842 
843 	if (unlikely(base == NULL))
844 		return;
845 
846 	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
847 	bo = &vmw_user_bo->dma.base;
848 	ttm_bo_unref(&bo);
849 }
850 
851 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
852 			   struct drm_file *file_priv)
853 {
854 	struct vmw_private *dev_priv = vmw_priv(dev);
855 	union drm_vmw_alloc_dmabuf_arg *arg =
856 	    (union drm_vmw_alloc_dmabuf_arg *)data;
857 	struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
858 	struct drm_vmw_dmabuf_rep *rep = &arg->rep;
859 	struct vmw_user_dma_buffer *vmw_user_bo;
860 	struct ttm_buffer_object *tmp;
861 	struct vmw_master *vmaster = vmw_master(file_priv->master);
862 	int ret;
863 
864 	vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
865 	if (unlikely(vmw_user_bo == NULL))
866 		return -ENOMEM;
867 
868 	ret = ttm_read_lock(&vmaster->lock, true);
869 	if (unlikely(ret != 0)) {
870 		kfree(vmw_user_bo);
871 		return ret;
872 	}
873 
874 	ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
875 			      &vmw_vram_sys_placement, true,
876 			      &vmw_user_dmabuf_destroy);
877 	if (unlikely(ret != 0))
878 		return ret;
879 
880 	tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
881 	ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
882 				   &vmw_user_bo->base,
883 				   false,
884 				   ttm_buffer_type,
885 				   &vmw_user_dmabuf_release, NULL);
886 	if (unlikely(ret != 0)) {
887 		ttm_bo_unref(&tmp);
888 	} else {
889 		rep->handle = vmw_user_bo->base.hash.key;
890 		rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
891 		rep->cur_gmr_id = vmw_user_bo->base.hash.key;
892 		rep->cur_gmr_offset = 0;
893 	}
894 	ttm_bo_unref(&tmp);
895 
896 	ttm_read_unlock(&vmaster->lock);
897 
898 	return 0;
899 }
900 
901 int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
902 			   struct drm_file *file_priv)
903 {
904 	struct drm_vmw_unref_dmabuf_arg *arg =
905 	    (struct drm_vmw_unref_dmabuf_arg *)data;
906 
907 	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
908 					 arg->handle,
909 					 TTM_REF_USAGE);
910 }
911 
912 uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
913 				  uint32_t cur_validate_node)
914 {
915 	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
916 
917 	if (likely(vmw_bo->on_validate_list))
918 		return vmw_bo->cur_validate_node;
919 
920 	vmw_bo->cur_validate_node = cur_validate_node;
921 	vmw_bo->on_validate_list = true;
922 
923 	return cur_validate_node;
924 }
925 
926 void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
927 {
928 	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
929 
930 	vmw_bo->on_validate_list = false;
931 }
932 
933 uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo)
934 {
935 	struct vmw_dma_buffer *vmw_bo;
936 
937 	if (bo->mem.mem_type == TTM_PL_VRAM)
938 		return SVGA_GMR_FRAMEBUFFER;
939 
940 	vmw_bo = vmw_dma_buffer(bo);
941 
942 	return (vmw_bo->gmr_bound) ? vmw_bo->gmr_id : SVGA_GMR_NULL;
943 }
944 
945 void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id)
946 {
947 	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
948 	vmw_bo->gmr_bound = true;
949 	vmw_bo->gmr_id = id;
950 }
951 
952 int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
953 			   uint32_t handle, struct vmw_dma_buffer **out)
954 {
955 	struct vmw_user_dma_buffer *vmw_user_bo;
956 	struct ttm_base_object *base;
957 
958 	base = ttm_base_object_lookup(tfile, handle);
959 	if (unlikely(base == NULL)) {
960 		printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
961 		       (unsigned long)handle);
962 		return -ESRCH;
963 	}
964 
965 	if (unlikely(base->object_type != ttm_buffer_type)) {
966 		ttm_base_object_unref(&base);
967 		printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
968 		       (unsigned long)handle);
969 		return -EINVAL;
970 	}
971 
972 	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
973 	(void)ttm_bo_reference(&vmw_user_bo->dma.base);
974 	ttm_base_object_unref(&base);
975 	*out = &vmw_user_bo->dma;
976 
977 	return 0;
978 }
979 
980 /**
981  * TODO: Implement a gmr id eviction mechanism. Currently we just fail
982  * when we're out of ids, causing GMR space to be allocated
983  * out of VRAM.
984  */
985 
986 int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id)
987 {
988 	struct ttm_bo_global *glob = dev_priv->bdev.glob;
989 	int id;
990 	int ret;
991 
992 	do {
993 		if (unlikely(ida_pre_get(&dev_priv->gmr_ida, GFP_KERNEL) == 0))
994 			return -ENOMEM;
995 
996 		spin_lock(&glob->lru_lock);
997 		ret = ida_get_new(&dev_priv->gmr_ida, &id);
998 		spin_unlock(&glob->lru_lock);
999 	} while (ret == -EAGAIN);
1000 
1001 	if (unlikely(ret != 0))
1002 		return ret;
1003 
1004 	if (unlikely(id >= dev_priv->max_gmr_ids)) {
1005 		spin_lock(&glob->lru_lock);
1006 		ida_remove(&dev_priv->gmr_ida, id);
1007 		spin_unlock(&glob->lru_lock);
1008 		return -EBUSY;
1009 	}
1010 
1011 	*p_id = (uint32_t) id;
1012 	return 0;
1013 }
1014 
1015 /*
1016  * Stream managment
1017  */
1018 
1019 static void vmw_stream_destroy(struct vmw_resource *res)
1020 {
1021 	struct vmw_private *dev_priv = res->dev_priv;
1022 	struct vmw_stream *stream;
1023 	int ret;
1024 
1025 	DRM_INFO("%s: unref\n", __func__);
1026 	stream = container_of(res, struct vmw_stream, res);
1027 
1028 	ret = vmw_overlay_unref(dev_priv, stream->stream_id);
1029 	WARN_ON(ret != 0);
1030 }
1031 
1032 static int vmw_stream_init(struct vmw_private *dev_priv,
1033 			   struct vmw_stream *stream,
1034 			   void (*res_free) (struct vmw_resource *res))
1035 {
1036 	struct vmw_resource *res = &stream->res;
1037 	int ret;
1038 
1039 	ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
1040 				VMW_RES_STREAM, res_free);
1041 
1042 	if (unlikely(ret != 0)) {
1043 		if (res_free == NULL)
1044 			kfree(stream);
1045 		else
1046 			res_free(&stream->res);
1047 		return ret;
1048 	}
1049 
1050 	ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
1051 	if (ret) {
1052 		vmw_resource_unreference(&res);
1053 		return ret;
1054 	}
1055 
1056 	DRM_INFO("%s: claimed\n", __func__);
1057 
1058 	vmw_resource_activate(&stream->res, vmw_stream_destroy);
1059 	return 0;
1060 }
1061 
1062 /**
1063  * User-space context management:
1064  */
1065 
1066 static void vmw_user_stream_free(struct vmw_resource *res)
1067 {
1068 	struct vmw_user_stream *stream =
1069 	    container_of(res, struct vmw_user_stream, stream.res);
1070 
1071 	kfree(stream);
1072 }
1073 
1074 /**
1075  * This function is called when user space has no more references on the
1076  * base object. It releases the base-object's reference on the resource object.
1077  */
1078 
1079 static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
1080 {
1081 	struct ttm_base_object *base = *p_base;
1082 	struct vmw_user_stream *stream =
1083 	    container_of(base, struct vmw_user_stream, base);
1084 	struct vmw_resource *res = &stream->stream.res;
1085 
1086 	*p_base = NULL;
1087 	vmw_resource_unreference(&res);
1088 }
1089 
1090 int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
1091 			   struct drm_file *file_priv)
1092 {
1093 	struct vmw_private *dev_priv = vmw_priv(dev);
1094 	struct vmw_resource *res;
1095 	struct vmw_user_stream *stream;
1096 	struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1097 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1098 	int ret = 0;
1099 
1100 	res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
1101 	if (unlikely(res == NULL))
1102 		return -EINVAL;
1103 
1104 	if (res->res_free != &vmw_user_stream_free) {
1105 		ret = -EINVAL;
1106 		goto out;
1107 	}
1108 
1109 	stream = container_of(res, struct vmw_user_stream, stream.res);
1110 	if (stream->base.tfile != tfile) {
1111 		ret = -EINVAL;
1112 		goto out;
1113 	}
1114 
1115 	ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
1116 out:
1117 	vmw_resource_unreference(&res);
1118 	return ret;
1119 }
1120 
1121 int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
1122 			   struct drm_file *file_priv)
1123 {
1124 	struct vmw_private *dev_priv = vmw_priv(dev);
1125 	struct vmw_user_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
1126 	struct vmw_resource *res;
1127 	struct vmw_resource *tmp;
1128 	struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1129 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1130 	int ret;
1131 
1132 	if (unlikely(stream == NULL))
1133 		return -ENOMEM;
1134 
1135 	res = &stream->stream.res;
1136 	stream->base.shareable = false;
1137 	stream->base.tfile = NULL;
1138 
1139 	ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
1140 	if (unlikely(ret != 0))
1141 		return ret;
1142 
1143 	tmp = vmw_resource_reference(res);
1144 	ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
1145 				   &vmw_user_stream_base_release, NULL);
1146 
1147 	if (unlikely(ret != 0)) {
1148 		vmw_resource_unreference(&tmp);
1149 		goto out_err;
1150 	}
1151 
1152 	arg->stream_id = res->id;
1153 out_err:
1154 	vmw_resource_unreference(&res);
1155 	return ret;
1156 }
1157 
1158 int vmw_user_stream_lookup(struct vmw_private *dev_priv,
1159 			   struct ttm_object_file *tfile,
1160 			   uint32_t *inout_id, struct vmw_resource **out)
1161 {
1162 	struct vmw_user_stream *stream;
1163 	struct vmw_resource *res;
1164 	int ret;
1165 
1166 	res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
1167 	if (unlikely(res == NULL))
1168 		return -EINVAL;
1169 
1170 	if (res->res_free != &vmw_user_stream_free) {
1171 		ret = -EINVAL;
1172 		goto err_ref;
1173 	}
1174 
1175 	stream = container_of(res, struct vmw_user_stream, stream.res);
1176 	if (stream->base.tfile != tfile) {
1177 		ret = -EPERM;
1178 		goto err_ref;
1179 	}
1180 
1181 	*inout_id = stream->stream.stream_id;
1182 	*out = res;
1183 	return 0;
1184 err_ref:
1185 	vmw_resource_unreference(&res);
1186 	return ret;
1187 }
1188