1 /**************************************************************************
2  *
3  * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_resource_priv.h"
30 #include "vmwgfx_so.h"
31 #include "vmwgfx_binding.h"
32 #include <ttm/ttm_placement.h>
33 #include "device_include/svga3d_surfacedefs.h"
34 
35 
36 /**
37  * struct vmw_user_surface - User-space visible surface resource
38  *
39  * @base:           The TTM base object handling user-space visibility.
40  * @srf:            The surface metadata.
41  * @size:           TTM accounting size for the surface.
42  * @master: master of the creating client. Used for security check.
43  */
44 struct vmw_user_surface {
45 	struct ttm_prime_object prime;
46 	struct vmw_surface srf;
47 	uint32_t size;
48 	struct drm_master *master;
49 };
50 
51 /**
52  * struct vmw_surface_offset - Backing store mip level offset info
53  *
54  * @face:           Surface face.
55  * @mip:            Mip level.
56  * @bo_offset:      Offset into backing store of this mip level.
57  *
58  */
59 struct vmw_surface_offset {
60 	uint32_t face;
61 	uint32_t mip;
62 	uint32_t bo_offset;
63 };
64 
65 static void vmw_user_surface_free(struct vmw_resource *res);
66 static struct vmw_resource *
67 vmw_user_surface_base_to_res(struct ttm_base_object *base);
68 static int vmw_legacy_srf_bind(struct vmw_resource *res,
69 			       struct ttm_validate_buffer *val_buf);
70 static int vmw_legacy_srf_unbind(struct vmw_resource *res,
71 				 bool readback,
72 				 struct ttm_validate_buffer *val_buf);
73 static int vmw_legacy_srf_create(struct vmw_resource *res);
74 static int vmw_legacy_srf_destroy(struct vmw_resource *res);
75 static int vmw_gb_surface_create(struct vmw_resource *res);
76 static int vmw_gb_surface_bind(struct vmw_resource *res,
77 			       struct ttm_validate_buffer *val_buf);
78 static int vmw_gb_surface_unbind(struct vmw_resource *res,
79 				 bool readback,
80 				 struct ttm_validate_buffer *val_buf);
81 static int vmw_gb_surface_destroy(struct vmw_resource *res);
82 
83 
84 static const struct vmw_user_resource_conv user_surface_conv = {
85 	.object_type = VMW_RES_SURFACE,
86 	.base_obj_to_res = vmw_user_surface_base_to_res,
87 	.res_free = vmw_user_surface_free
88 };
89 
90 const struct vmw_user_resource_conv *user_surface_converter =
91 	&user_surface_conv;
92 
93 
94 static uint64_t vmw_user_surface_size;
95 
96 static const struct vmw_res_func vmw_legacy_surface_func = {
97 	.res_type = vmw_res_surface,
98 	.needs_backup = false,
99 	.may_evict = true,
100 	.type_name = "legacy surfaces",
101 	.backup_placement = &vmw_srf_placement,
102 	.create = &vmw_legacy_srf_create,
103 	.destroy = &vmw_legacy_srf_destroy,
104 	.bind = &vmw_legacy_srf_bind,
105 	.unbind = &vmw_legacy_srf_unbind
106 };
107 
108 static const struct vmw_res_func vmw_gb_surface_func = {
109 	.res_type = vmw_res_surface,
110 	.needs_backup = true,
111 	.may_evict = true,
112 	.type_name = "guest backed surfaces",
113 	.backup_placement = &vmw_mob_placement,
114 	.create = vmw_gb_surface_create,
115 	.destroy = vmw_gb_surface_destroy,
116 	.bind = vmw_gb_surface_bind,
117 	.unbind = vmw_gb_surface_unbind
118 };
119 
120 /**
121  * struct vmw_surface_dma - SVGA3D DMA command
122  */
123 struct vmw_surface_dma {
124 	SVGA3dCmdHeader header;
125 	SVGA3dCmdSurfaceDMA body;
126 	SVGA3dCopyBox cb;
127 	SVGA3dCmdSurfaceDMASuffix suffix;
128 };
129 
130 /**
131  * struct vmw_surface_define - SVGA3D Surface Define command
132  */
133 struct vmw_surface_define {
134 	SVGA3dCmdHeader header;
135 	SVGA3dCmdDefineSurface body;
136 };
137 
138 /**
139  * struct vmw_surface_destroy - SVGA3D Surface Destroy command
140  */
141 struct vmw_surface_destroy {
142 	SVGA3dCmdHeader header;
143 	SVGA3dCmdDestroySurface body;
144 };
145 
146 
147 /**
148  * vmw_surface_dma_size - Compute fifo size for a dma command.
149  *
150  * @srf: Pointer to a struct vmw_surface
151  *
152  * Computes the required size for a surface dma command for backup or
153  * restoration of the surface represented by @srf.
154  */
155 static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
156 {
157 	return srf->num_sizes * sizeof(struct vmw_surface_dma);
158 }
159 
160 
161 /**
162  * vmw_surface_define_size - Compute fifo size for a surface define command.
163  *
164  * @srf: Pointer to a struct vmw_surface
165  *
166  * Computes the required size for a surface define command for the definition
167  * of the surface represented by @srf.
168  */
169 static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
170 {
171 	return sizeof(struct vmw_surface_define) + srf->num_sizes *
172 		sizeof(SVGA3dSize);
173 }
174 
175 
176 /**
177  * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
178  *
179  * Computes the required size for a surface destroy command for the destruction
180  * of a hw surface.
181  */
182 static inline uint32_t vmw_surface_destroy_size(void)
183 {
184 	return sizeof(struct vmw_surface_destroy);
185 }
186 
187 /**
188  * vmw_surface_destroy_encode - Encode a surface_destroy command.
189  *
190  * @id: The surface id
191  * @cmd_space: Pointer to memory area in which the commands should be encoded.
192  */
193 static void vmw_surface_destroy_encode(uint32_t id,
194 				       void *cmd_space)
195 {
196 	struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
197 		cmd_space;
198 
199 	cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
200 	cmd->header.size = sizeof(cmd->body);
201 	cmd->body.sid = id;
202 }
203 
204 /**
205  * vmw_surface_define_encode - Encode a surface_define command.
206  *
207  * @srf: Pointer to a struct vmw_surface object.
208  * @cmd_space: Pointer to memory area in which the commands should be encoded.
209  */
210 static void vmw_surface_define_encode(const struct vmw_surface *srf,
211 				      void *cmd_space)
212 {
213 	struct vmw_surface_define *cmd = (struct vmw_surface_define *)
214 		cmd_space;
215 	struct drm_vmw_size *src_size;
216 	SVGA3dSize *cmd_size;
217 	uint32_t cmd_len;
218 	int i;
219 
220 	cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
221 
222 	cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
223 	cmd->header.size = cmd_len;
224 	cmd->body.sid = srf->res.id;
225 	cmd->body.surfaceFlags = srf->flags;
226 	cmd->body.format = srf->format;
227 	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
228 		cmd->body.face[i].numMipLevels = srf->mip_levels[i];
229 
230 	cmd += 1;
231 	cmd_size = (SVGA3dSize *) cmd;
232 	src_size = srf->sizes;
233 
234 	for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
235 		cmd_size->width = src_size->width;
236 		cmd_size->height = src_size->height;
237 		cmd_size->depth = src_size->depth;
238 	}
239 }
240 
241 /**
242  * vmw_surface_dma_encode - Encode a surface_dma command.
243  *
244  * @srf: Pointer to a struct vmw_surface object.
245  * @cmd_space: Pointer to memory area in which the commands should be encoded.
246  * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
247  * should be placed or read from.
248  * @to_surface: Boolean whether to DMA to the surface or from the surface.
249  */
250 static void vmw_surface_dma_encode(struct vmw_surface *srf,
251 				   void *cmd_space,
252 				   const SVGAGuestPtr *ptr,
253 				   bool to_surface)
254 {
255 	uint32_t i;
256 	struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
257 	const struct svga3d_surface_desc *desc =
258 		svga3dsurface_get_desc(srf->format);
259 
260 	for (i = 0; i < srf->num_sizes; ++i) {
261 		SVGA3dCmdHeader *header = &cmd->header;
262 		SVGA3dCmdSurfaceDMA *body = &cmd->body;
263 		SVGA3dCopyBox *cb = &cmd->cb;
264 		SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
265 		const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
266 		const struct drm_vmw_size *cur_size = &srf->sizes[i];
267 
268 		header->id = SVGA_3D_CMD_SURFACE_DMA;
269 		header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
270 
271 		body->guest.ptr = *ptr;
272 		body->guest.ptr.offset += cur_offset->bo_offset;
273 		body->guest.pitch = svga3dsurface_calculate_pitch(desc,
274 								  cur_size);
275 		body->host.sid = srf->res.id;
276 		body->host.face = cur_offset->face;
277 		body->host.mipmap = cur_offset->mip;
278 		body->transfer = ((to_surface) ?  SVGA3D_WRITE_HOST_VRAM :
279 				  SVGA3D_READ_HOST_VRAM);
280 		cb->x = 0;
281 		cb->y = 0;
282 		cb->z = 0;
283 		cb->srcx = 0;
284 		cb->srcy = 0;
285 		cb->srcz = 0;
286 		cb->w = cur_size->width;
287 		cb->h = cur_size->height;
288 		cb->d = cur_size->depth;
289 
290 		suffix->suffixSize = sizeof(*suffix);
291 		suffix->maximumOffset =
292 			svga3dsurface_get_image_buffer_size(desc, cur_size,
293 							    body->guest.pitch);
294 		suffix->flags.discard = 0;
295 		suffix->flags.unsynchronized = 0;
296 		suffix->flags.reserved = 0;
297 		++cmd;
298 	}
299 };
300 
301 
302 /**
303  * vmw_hw_surface_destroy - destroy a Device surface
304  *
305  * @res:        Pointer to a struct vmw_resource embedded in a struct
306  *              vmw_surface.
307  *
308  * Destroys a the device surface associated with a struct vmw_surface if
309  * any, and adjusts accounting and resource count accordingly.
310  */
311 static void vmw_hw_surface_destroy(struct vmw_resource *res)
312 {
313 
314 	struct vmw_private *dev_priv = res->dev_priv;
315 	struct vmw_surface *srf;
316 	void *cmd;
317 
318 	if (res->func->destroy == vmw_gb_surface_destroy) {
319 		(void) vmw_gb_surface_destroy(res);
320 		return;
321 	}
322 
323 	if (res->id != -1) {
324 
325 		cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
326 		if (unlikely(cmd == NULL)) {
327 			DRM_ERROR("Failed reserving FIFO space for surface "
328 				  "destruction.\n");
329 			return;
330 		}
331 
332 		vmw_surface_destroy_encode(res->id, cmd);
333 		vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
334 
335 		/*
336 		 * used_memory_size_atomic, or separate lock
337 		 * to avoid taking dev_priv::cmdbuf_mutex in
338 		 * the destroy path.
339 		 */
340 
341 		mutex_lock(&dev_priv->cmdbuf_mutex);
342 		srf = vmw_res_to_srf(res);
343 		dev_priv->used_memory_size -= res->backup_size;
344 		mutex_unlock(&dev_priv->cmdbuf_mutex);
345 	}
346 	vmw_fifo_resource_dec(dev_priv);
347 }
348 
349 /**
350  * vmw_legacy_srf_create - Create a device surface as part of the
351  * resource validation process.
352  *
353  * @res: Pointer to a struct vmw_surface.
354  *
355  * If the surface doesn't have a hw id.
356  *
357  * Returns -EBUSY if there wasn't sufficient device resources to
358  * complete the validation. Retry after freeing up resources.
359  *
360  * May return other errors if the kernel is out of guest resources.
361  */
362 static int vmw_legacy_srf_create(struct vmw_resource *res)
363 {
364 	struct vmw_private *dev_priv = res->dev_priv;
365 	struct vmw_surface *srf;
366 	uint32_t submit_size;
367 	uint8_t *cmd;
368 	int ret;
369 
370 	if (likely(res->id != -1))
371 		return 0;
372 
373 	srf = vmw_res_to_srf(res);
374 	if (unlikely(dev_priv->used_memory_size + res->backup_size >=
375 		     dev_priv->memory_size))
376 		return -EBUSY;
377 
378 	/*
379 	 * Alloc id for the resource.
380 	 */
381 
382 	ret = vmw_resource_alloc_id(res);
383 	if (unlikely(ret != 0)) {
384 		DRM_ERROR("Failed to allocate a surface id.\n");
385 		goto out_no_id;
386 	}
387 
388 	if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
389 		ret = -EBUSY;
390 		goto out_no_fifo;
391 	}
392 
393 	/*
394 	 * Encode surface define- commands.
395 	 */
396 
397 	submit_size = vmw_surface_define_size(srf);
398 	cmd = vmw_fifo_reserve(dev_priv, submit_size);
399 	if (unlikely(cmd == NULL)) {
400 		DRM_ERROR("Failed reserving FIFO space for surface "
401 			  "creation.\n");
402 		ret = -ENOMEM;
403 		goto out_no_fifo;
404 	}
405 
406 	vmw_surface_define_encode(srf, cmd);
407 	vmw_fifo_commit(dev_priv, submit_size);
408 	/*
409 	 * Surface memory usage accounting.
410 	 */
411 
412 	dev_priv->used_memory_size += res->backup_size;
413 	return 0;
414 
415 out_no_fifo:
416 	vmw_resource_release_id(res);
417 out_no_id:
418 	return ret;
419 }
420 
421 /**
422  * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
423  *
424  * @res:            Pointer to a struct vmw_res embedded in a struct
425  *                  vmw_surface.
426  * @val_buf:        Pointer to a struct ttm_validate_buffer containing
427  *                  information about the backup buffer.
428  * @bind:           Boolean wether to DMA to the surface.
429  *
430  * Transfer backup data to or from a legacy surface as part of the
431  * validation process.
432  * May return other errors if the kernel is out of guest resources.
433  * The backup buffer will be fenced or idle upon successful completion,
434  * and if the surface needs persistent backup storage, the backup buffer
435  * will also be returned reserved iff @bind is true.
436  */
437 static int vmw_legacy_srf_dma(struct vmw_resource *res,
438 			      struct ttm_validate_buffer *val_buf,
439 			      bool bind)
440 {
441 	SVGAGuestPtr ptr;
442 	struct vmw_fence_obj *fence;
443 	uint32_t submit_size;
444 	struct vmw_surface *srf = vmw_res_to_srf(res);
445 	uint8_t *cmd;
446 	struct vmw_private *dev_priv = res->dev_priv;
447 
448 	BUG_ON(val_buf->bo == NULL);
449 
450 	submit_size = vmw_surface_dma_size(srf);
451 	cmd = vmw_fifo_reserve(dev_priv, submit_size);
452 	if (unlikely(cmd == NULL)) {
453 		DRM_ERROR("Failed reserving FIFO space for surface "
454 			  "DMA.\n");
455 		return -ENOMEM;
456 	}
457 	vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
458 	vmw_surface_dma_encode(srf, cmd, &ptr, bind);
459 
460 	vmw_fifo_commit(dev_priv, submit_size);
461 
462 	/*
463 	 * Create a fence object and fence the backup buffer.
464 	 */
465 
466 	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
467 					  &fence, NULL);
468 
469 	vmw_fence_single_bo(val_buf->bo, fence);
470 
471 	if (likely(fence != NULL))
472 		vmw_fence_obj_unreference(&fence);
473 
474 	return 0;
475 }
476 
477 /**
478  * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
479  *                       surface validation process.
480  *
481  * @res:            Pointer to a struct vmw_res embedded in a struct
482  *                  vmw_surface.
483  * @val_buf:        Pointer to a struct ttm_validate_buffer containing
484  *                  information about the backup buffer.
485  *
486  * This function will copy backup data to the surface if the
487  * backup buffer is dirty.
488  */
489 static int vmw_legacy_srf_bind(struct vmw_resource *res,
490 			       struct ttm_validate_buffer *val_buf)
491 {
492 	if (!res->backup_dirty)
493 		return 0;
494 
495 	return vmw_legacy_srf_dma(res, val_buf, true);
496 }
497 
498 
499 /**
500  * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
501  *                         surface eviction process.
502  *
503  * @res:            Pointer to a struct vmw_res embedded in a struct
504  *                  vmw_surface.
505  * @val_buf:        Pointer to a struct ttm_validate_buffer containing
506  *                  information about the backup buffer.
507  *
508  * This function will copy backup data from the surface.
509  */
510 static int vmw_legacy_srf_unbind(struct vmw_resource *res,
511 				 bool readback,
512 				 struct ttm_validate_buffer *val_buf)
513 {
514 	if (unlikely(readback))
515 		return vmw_legacy_srf_dma(res, val_buf, false);
516 	return 0;
517 }
518 
519 /**
520  * vmw_legacy_srf_destroy - Destroy a device surface as part of a
521  *                          resource eviction process.
522  *
523  * @res:            Pointer to a struct vmw_res embedded in a struct
524  *                  vmw_surface.
525  */
526 static int vmw_legacy_srf_destroy(struct vmw_resource *res)
527 {
528 	struct vmw_private *dev_priv = res->dev_priv;
529 	uint32_t submit_size;
530 	uint8_t *cmd;
531 
532 	BUG_ON(res->id == -1);
533 
534 	/*
535 	 * Encode the dma- and surface destroy commands.
536 	 */
537 
538 	submit_size = vmw_surface_destroy_size();
539 	cmd = vmw_fifo_reserve(dev_priv, submit_size);
540 	if (unlikely(cmd == NULL)) {
541 		DRM_ERROR("Failed reserving FIFO space for surface "
542 			  "eviction.\n");
543 		return -ENOMEM;
544 	}
545 
546 	vmw_surface_destroy_encode(res->id, cmd);
547 	vmw_fifo_commit(dev_priv, submit_size);
548 
549 	/*
550 	 * Surface memory usage accounting.
551 	 */
552 
553 	dev_priv->used_memory_size -= res->backup_size;
554 
555 	/*
556 	 * Release the surface ID.
557 	 */
558 
559 	vmw_resource_release_id(res);
560 
561 	return 0;
562 }
563 
564 
565 /**
566  * vmw_surface_init - initialize a struct vmw_surface
567  *
568  * @dev_priv:       Pointer to a device private struct.
569  * @srf:            Pointer to the struct vmw_surface to initialize.
570  * @res_free:       Pointer to a resource destructor used to free
571  *                  the object.
572  */
573 static int vmw_surface_init(struct vmw_private *dev_priv,
574 			    struct vmw_surface *srf,
575 			    void (*res_free) (struct vmw_resource *res))
576 {
577 	int ret;
578 	struct vmw_resource *res = &srf->res;
579 
580 	BUG_ON(res_free == NULL);
581 	if (!dev_priv->has_mob)
582 		vmw_fifo_resource_inc(dev_priv);
583 	ret = vmw_resource_init(dev_priv, res, true, res_free,
584 				(dev_priv->has_mob) ? &vmw_gb_surface_func :
585 				&vmw_legacy_surface_func);
586 
587 	if (unlikely(ret != 0)) {
588 		if (!dev_priv->has_mob)
589 			vmw_fifo_resource_dec(dev_priv);
590 		res_free(res);
591 		return ret;
592 	}
593 
594 	/*
595 	 * The surface won't be visible to hardware until a
596 	 * surface validate.
597 	 */
598 
599 	INIT_LIST_HEAD(&srf->view_list);
600 	vmw_resource_activate(res, vmw_hw_surface_destroy);
601 	return ret;
602 }
603 
604 /**
605  * vmw_user_surface_base_to_res - TTM base object to resource converter for
606  *                                user visible surfaces
607  *
608  * @base:           Pointer to a TTM base object
609  *
610  * Returns the struct vmw_resource embedded in a struct vmw_surface
611  * for the user-visible object identified by the TTM base object @base.
612  */
613 static struct vmw_resource *
614 vmw_user_surface_base_to_res(struct ttm_base_object *base)
615 {
616 	return &(container_of(base, struct vmw_user_surface,
617 			      prime.base)->srf.res);
618 }
619 
620 /**
621  * vmw_user_surface_free - User visible surface resource destructor
622  *
623  * @res:            A struct vmw_resource embedded in a struct vmw_surface.
624  */
625 static void vmw_user_surface_free(struct vmw_resource *res)
626 {
627 	struct vmw_surface *srf = vmw_res_to_srf(res);
628 	struct vmw_user_surface *user_srf =
629 	    container_of(srf, struct vmw_user_surface, srf);
630 	struct vmw_private *dev_priv = srf->res.dev_priv;
631 	uint32_t size = user_srf->size;
632 
633 	if (user_srf->master)
634 		drm_master_put(&user_srf->master);
635 	kfree(srf->offsets);
636 	kfree(srf->sizes);
637 	kfree(srf->snooper.image);
638 	ttm_prime_object_kfree(user_srf, prime);
639 	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
640 }
641 
642 /**
643  * vmw_user_surface_free - User visible surface TTM base object destructor
644  *
645  * @p_base:         Pointer to a pointer to a TTM base object
646  *                  embedded in a struct vmw_user_surface.
647  *
648  * Drops the base object's reference on its resource, and the
649  * pointer pointed to by *p_base is set to NULL.
650  */
651 static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
652 {
653 	struct ttm_base_object *base = *p_base;
654 	struct vmw_user_surface *user_srf =
655 	    container_of(base, struct vmw_user_surface, prime.base);
656 	struct vmw_resource *res = &user_srf->srf.res;
657 
658 	*p_base = NULL;
659 	vmw_resource_unreference(&res);
660 }
661 
662 /**
663  * vmw_user_surface_destroy_ioctl - Ioctl function implementing
664  *                                  the user surface destroy functionality.
665  *
666  * @dev:            Pointer to a struct drm_device.
667  * @data:           Pointer to data copied from / to user-space.
668  * @file_priv:      Pointer to a drm file private structure.
669  */
670 int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
671 			      struct drm_file *file_priv)
672 {
673 	struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
674 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
675 
676 	return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
677 }
678 
679 /**
680  * vmw_user_surface_define_ioctl - Ioctl function implementing
681  *                                  the user surface define functionality.
682  *
683  * @dev:            Pointer to a struct drm_device.
684  * @data:           Pointer to data copied from / to user-space.
685  * @file_priv:      Pointer to a drm file private structure.
686  */
687 int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
688 			     struct drm_file *file_priv)
689 {
690 	struct vmw_private *dev_priv = vmw_priv(dev);
691 	struct vmw_user_surface *user_srf;
692 	struct vmw_surface *srf;
693 	struct vmw_resource *res;
694 	struct vmw_resource *tmp;
695 	union drm_vmw_surface_create_arg *arg =
696 	    (union drm_vmw_surface_create_arg *)data;
697 	struct drm_vmw_surface_create_req *req = &arg->req;
698 	struct drm_vmw_surface_arg *rep = &arg->rep;
699 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
700 	struct drm_vmw_size __user *user_sizes;
701 	int ret;
702 	int i, j;
703 	uint32_t cur_bo_offset;
704 	struct drm_vmw_size *cur_size;
705 	struct vmw_surface_offset *cur_offset;
706 	uint32_t num_sizes;
707 	uint32_t size;
708 	const struct svga3d_surface_desc *desc;
709 
710 	if (unlikely(vmw_user_surface_size == 0))
711 		vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
712 			128;
713 
714 	num_sizes = 0;
715 	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
716 		num_sizes += req->mip_levels[i];
717 
718 	if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
719 	    DRM_VMW_MAX_MIP_LEVELS)
720 		return -EINVAL;
721 
722 	size = vmw_user_surface_size + 128 +
723 		ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
724 		ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
725 
726 
727 	desc = svga3dsurface_get_desc(req->format);
728 	if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
729 		DRM_ERROR("Invalid surface format for surface creation.\n");
730 		DRM_ERROR("Format requested is: %d\n", req->format);
731 		return -EINVAL;
732 	}
733 
734 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
735 	if (unlikely(ret != 0))
736 		return ret;
737 
738 	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
739 				   size, false, true);
740 	if (unlikely(ret != 0)) {
741 		if (ret != -ERESTARTSYS)
742 			DRM_ERROR("Out of graphics memory for surface"
743 				  " creation.\n");
744 		goto out_unlock;
745 	}
746 
747 	user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
748 	if (unlikely(user_srf == NULL)) {
749 		ret = -ENOMEM;
750 		goto out_no_user_srf;
751 	}
752 
753 	srf = &user_srf->srf;
754 	res = &srf->res;
755 
756 	srf->flags = req->flags;
757 	srf->format = req->format;
758 	srf->scanout = req->scanout;
759 
760 	memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
761 	srf->num_sizes = num_sizes;
762 	user_srf->size = size;
763 
764 	srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
765 	if (unlikely(srf->sizes == NULL)) {
766 		ret = -ENOMEM;
767 		goto out_no_sizes;
768 	}
769 	srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
770 			       GFP_KERNEL);
771 	if (unlikely(srf->sizes == NULL)) {
772 		ret = -ENOMEM;
773 		goto out_no_offsets;
774 	}
775 
776 	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
777 	    req->size_addr;
778 
779 	ret = copy_from_user(srf->sizes, user_sizes,
780 			     srf->num_sizes * sizeof(*srf->sizes));
781 	if (unlikely(ret != 0)) {
782 		ret = -EFAULT;
783 		goto out_no_copy;
784 	}
785 
786 	srf->base_size = *srf->sizes;
787 	srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
788 	srf->multisample_count = 0;
789 
790 	cur_bo_offset = 0;
791 	cur_offset = srf->offsets;
792 	cur_size = srf->sizes;
793 
794 	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
795 		for (j = 0; j < srf->mip_levels[i]; ++j) {
796 			uint32_t stride = svga3dsurface_calculate_pitch
797 				(desc, cur_size);
798 
799 			cur_offset->face = i;
800 			cur_offset->mip = j;
801 			cur_offset->bo_offset = cur_bo_offset;
802 			cur_bo_offset += svga3dsurface_get_image_buffer_size
803 				(desc, cur_size, stride);
804 			++cur_offset;
805 			++cur_size;
806 		}
807 	}
808 	res->backup_size = cur_bo_offset;
809 	if (srf->scanout &&
810 	    srf->num_sizes == 1 &&
811 	    srf->sizes[0].width == 64 &&
812 	    srf->sizes[0].height == 64 &&
813 	    srf->format == SVGA3D_A8R8G8B8) {
814 
815 		srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
816 		/* clear the image */
817 		if (srf->snooper.image) {
818 			memset(srf->snooper.image, 0x00, 64 * 64 * 4);
819 		} else {
820 			DRM_ERROR("Failed to allocate cursor_image\n");
821 			ret = -ENOMEM;
822 			goto out_no_copy;
823 		}
824 	} else {
825 		srf->snooper.image = NULL;
826 	}
827 	srf->snooper.crtc = NULL;
828 
829 	user_srf->prime.base.shareable = false;
830 	user_srf->prime.base.tfile = NULL;
831 	if (drm_is_primary_client(file_priv))
832 		user_srf->master = drm_master_get(file_priv->master);
833 
834 	/**
835 	 * From this point, the generic resource management functions
836 	 * destroy the object on failure.
837 	 */
838 
839 	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
840 	if (unlikely(ret != 0))
841 		goto out_unlock;
842 
843 	/*
844 	 * A gb-aware client referencing a shared surface will
845 	 * expect a backup buffer to be present.
846 	 */
847 	if (dev_priv->has_mob && req->shareable) {
848 		uint32_t backup_handle;
849 
850 		ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
851 					    res->backup_size,
852 					    true,
853 					    &backup_handle,
854 					    &res->backup);
855 		if (unlikely(ret != 0)) {
856 			vmw_resource_unreference(&res);
857 			goto out_unlock;
858 		}
859 	}
860 
861 	tmp = vmw_resource_reference(&srf->res);
862 	ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
863 				    req->shareable, VMW_RES_SURFACE,
864 				    &vmw_user_surface_base_release, NULL);
865 
866 	if (unlikely(ret != 0)) {
867 		vmw_resource_unreference(&tmp);
868 		vmw_resource_unreference(&res);
869 		goto out_unlock;
870 	}
871 
872 	rep->sid = user_srf->prime.base.hash.key;
873 	vmw_resource_unreference(&res);
874 
875 	ttm_read_unlock(&dev_priv->reservation_sem);
876 	return 0;
877 out_no_copy:
878 	kfree(srf->offsets);
879 out_no_offsets:
880 	kfree(srf->sizes);
881 out_no_sizes:
882 	ttm_prime_object_kfree(user_srf, prime);
883 out_no_user_srf:
884 	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
885 out_unlock:
886 	ttm_read_unlock(&dev_priv->reservation_sem);
887 	return ret;
888 }
889 
890 
891 static int
892 vmw_surface_handle_reference(struct vmw_private *dev_priv,
893 			     struct drm_file *file_priv,
894 			     uint32_t u_handle,
895 			     enum drm_vmw_handle_type handle_type,
896 			     struct ttm_base_object **base_p)
897 {
898 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
899 	struct vmw_user_surface *user_srf;
900 	uint32_t handle;
901 	struct ttm_base_object *base;
902 	int ret;
903 
904 	if (handle_type == DRM_VMW_HANDLE_PRIME) {
905 		ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
906 		if (unlikely(ret != 0))
907 			return ret;
908 	} else {
909 		if (unlikely(drm_is_render_client(file_priv))) {
910 			DRM_ERROR("Render client refused legacy "
911 				  "surface reference.\n");
912 			return -EACCES;
913 		}
914 		handle = u_handle;
915 	}
916 
917 	ret = -EINVAL;
918 	base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle);
919 	if (unlikely(base == NULL)) {
920 		DRM_ERROR("Could not find surface to reference.\n");
921 		goto out_no_lookup;
922 	}
923 
924 	if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) {
925 		DRM_ERROR("Referenced object is not a surface.\n");
926 		goto out_bad_resource;
927 	}
928 
929 	if (handle_type != DRM_VMW_HANDLE_PRIME) {
930 		user_srf = container_of(base, struct vmw_user_surface,
931 					prime.base);
932 
933 		/*
934 		 * Make sure the surface creator has the same
935 		 * authenticating master.
936 		 */
937 		if (drm_is_primary_client(file_priv) &&
938 		    user_srf->master != file_priv->master) {
939 			DRM_ERROR("Trying to reference surface outside of"
940 				  " master domain.\n");
941 			ret = -EACCES;
942 			goto out_bad_resource;
943 		}
944 
945 		ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
946 		if (unlikely(ret != 0)) {
947 			DRM_ERROR("Could not add a reference to a surface.\n");
948 			goto out_bad_resource;
949 		}
950 	}
951 
952 	*base_p = base;
953 	return 0;
954 
955 out_bad_resource:
956 	ttm_base_object_unref(&base);
957 out_no_lookup:
958 	if (handle_type == DRM_VMW_HANDLE_PRIME)
959 		(void) ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
960 
961 	return ret;
962 }
963 
964 /**
965  * vmw_user_surface_define_ioctl - Ioctl function implementing
966  *                                  the user surface reference functionality.
967  *
968  * @dev:            Pointer to a struct drm_device.
969  * @data:           Pointer to data copied from / to user-space.
970  * @file_priv:      Pointer to a drm file private structure.
971  */
972 int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
973 				struct drm_file *file_priv)
974 {
975 	struct vmw_private *dev_priv = vmw_priv(dev);
976 	union drm_vmw_surface_reference_arg *arg =
977 	    (union drm_vmw_surface_reference_arg *)data;
978 	struct drm_vmw_surface_arg *req = &arg->req;
979 	struct drm_vmw_surface_create_req *rep = &arg->rep;
980 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
981 	struct vmw_surface *srf;
982 	struct vmw_user_surface *user_srf;
983 	struct drm_vmw_size __user *user_sizes;
984 	struct ttm_base_object *base;
985 	int ret;
986 
987 	ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
988 					   req->handle_type, &base);
989 	if (unlikely(ret != 0))
990 		return ret;
991 
992 	user_srf = container_of(base, struct vmw_user_surface, prime.base);
993 	srf = &user_srf->srf;
994 
995 	rep->flags = srf->flags;
996 	rep->format = srf->format;
997 	memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
998 	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
999 	    rep->size_addr;
1000 
1001 	if (user_sizes)
1002 		ret = copy_to_user(user_sizes, &srf->base_size,
1003 				   sizeof(srf->base_size));
1004 	if (unlikely(ret != 0)) {
1005 		DRM_ERROR("copy_to_user failed %p %u\n",
1006 			  user_sizes, srf->num_sizes);
1007 		ttm_ref_object_base_unref(tfile, base->hash.key, TTM_REF_USAGE);
1008 		ret = -EFAULT;
1009 	}
1010 
1011 	ttm_base_object_unref(&base);
1012 
1013 	return ret;
1014 }
1015 
1016 /**
1017  * vmw_surface_define_encode - Encode a surface_define command.
1018  *
1019  * @srf: Pointer to a struct vmw_surface object.
1020  * @cmd_space: Pointer to memory area in which the commands should be encoded.
1021  */
1022 static int vmw_gb_surface_create(struct vmw_resource *res)
1023 {
1024 	struct vmw_private *dev_priv = res->dev_priv;
1025 	struct vmw_surface *srf = vmw_res_to_srf(res);
1026 	uint32_t cmd_len, cmd_id, submit_len;
1027 	int ret;
1028 	struct {
1029 		SVGA3dCmdHeader header;
1030 		SVGA3dCmdDefineGBSurface body;
1031 	} *cmd;
1032 	struct {
1033 		SVGA3dCmdHeader header;
1034 		SVGA3dCmdDefineGBSurface_v2 body;
1035 	} *cmd2;
1036 
1037 	if (likely(res->id != -1))
1038 		return 0;
1039 
1040 	vmw_fifo_resource_inc(dev_priv);
1041 	ret = vmw_resource_alloc_id(res);
1042 	if (unlikely(ret != 0)) {
1043 		DRM_ERROR("Failed to allocate a surface id.\n");
1044 		goto out_no_id;
1045 	}
1046 
1047 	if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) {
1048 		ret = -EBUSY;
1049 		goto out_no_fifo;
1050 	}
1051 
1052 	if (srf->array_size > 0) {
1053 		/* has_dx checked on creation time. */
1054 		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2;
1055 		cmd_len = sizeof(cmd2->body);
1056 		submit_len = sizeof(*cmd2);
1057 	} else {
1058 		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
1059 		cmd_len = sizeof(cmd->body);
1060 		submit_len = sizeof(*cmd);
1061 	}
1062 
1063 	cmd = vmw_fifo_reserve(dev_priv, submit_len);
1064 	cmd2 = (typeof(cmd2))cmd;
1065 	if (unlikely(cmd == NULL)) {
1066 		DRM_ERROR("Failed reserving FIFO space for surface "
1067 			  "creation.\n");
1068 		ret = -ENOMEM;
1069 		goto out_no_fifo;
1070 	}
1071 
1072 	if (srf->array_size > 0) {
1073 		cmd2->header.id = cmd_id;
1074 		cmd2->header.size = cmd_len;
1075 		cmd2->body.sid = srf->res.id;
1076 		cmd2->body.surfaceFlags = srf->flags;
1077 		cmd2->body.format = cpu_to_le32(srf->format);
1078 		cmd2->body.numMipLevels = srf->mip_levels[0];
1079 		cmd2->body.multisampleCount = srf->multisample_count;
1080 		cmd2->body.autogenFilter = srf->autogen_filter;
1081 		cmd2->body.size.width = srf->base_size.width;
1082 		cmd2->body.size.height = srf->base_size.height;
1083 		cmd2->body.size.depth = srf->base_size.depth;
1084 		cmd2->body.arraySize = srf->array_size;
1085 	} else {
1086 		cmd->header.id = cmd_id;
1087 		cmd->header.size = cmd_len;
1088 		cmd->body.sid = srf->res.id;
1089 		cmd->body.surfaceFlags = srf->flags;
1090 		cmd->body.format = cpu_to_le32(srf->format);
1091 		cmd->body.numMipLevels = srf->mip_levels[0];
1092 		cmd->body.multisampleCount = srf->multisample_count;
1093 		cmd->body.autogenFilter = srf->autogen_filter;
1094 		cmd->body.size.width = srf->base_size.width;
1095 		cmd->body.size.height = srf->base_size.height;
1096 		cmd->body.size.depth = srf->base_size.depth;
1097 	}
1098 
1099 	vmw_fifo_commit(dev_priv, submit_len);
1100 
1101 	return 0;
1102 
1103 out_no_fifo:
1104 	vmw_resource_release_id(res);
1105 out_no_id:
1106 	vmw_fifo_resource_dec(dev_priv);
1107 	return ret;
1108 }
1109 
1110 
1111 static int vmw_gb_surface_bind(struct vmw_resource *res,
1112 			       struct ttm_validate_buffer *val_buf)
1113 {
1114 	struct vmw_private *dev_priv = res->dev_priv;
1115 	struct {
1116 		SVGA3dCmdHeader header;
1117 		SVGA3dCmdBindGBSurface body;
1118 	} *cmd1;
1119 	struct {
1120 		SVGA3dCmdHeader header;
1121 		SVGA3dCmdUpdateGBSurface body;
1122 	} *cmd2;
1123 	uint32_t submit_size;
1124 	struct ttm_buffer_object *bo = val_buf->bo;
1125 
1126 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
1127 
1128 	submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
1129 
1130 	cmd1 = vmw_fifo_reserve(dev_priv, submit_size);
1131 	if (unlikely(cmd1 == NULL)) {
1132 		DRM_ERROR("Failed reserving FIFO space for surface "
1133 			  "binding.\n");
1134 		return -ENOMEM;
1135 	}
1136 
1137 	cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
1138 	cmd1->header.size = sizeof(cmd1->body);
1139 	cmd1->body.sid = res->id;
1140 	cmd1->body.mobid = bo->mem.start;
1141 	if (res->backup_dirty) {
1142 		cmd2 = (void *) &cmd1[1];
1143 		cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
1144 		cmd2->header.size = sizeof(cmd2->body);
1145 		cmd2->body.sid = res->id;
1146 		res->backup_dirty = false;
1147 	}
1148 	vmw_fifo_commit(dev_priv, submit_size);
1149 
1150 	return 0;
1151 }
1152 
1153 static int vmw_gb_surface_unbind(struct vmw_resource *res,
1154 				 bool readback,
1155 				 struct ttm_validate_buffer *val_buf)
1156 {
1157 	struct vmw_private *dev_priv = res->dev_priv;
1158 	struct ttm_buffer_object *bo = val_buf->bo;
1159 	struct vmw_fence_obj *fence;
1160 
1161 	struct {
1162 		SVGA3dCmdHeader header;
1163 		SVGA3dCmdReadbackGBSurface body;
1164 	} *cmd1;
1165 	struct {
1166 		SVGA3dCmdHeader header;
1167 		SVGA3dCmdInvalidateGBSurface body;
1168 	} *cmd2;
1169 	struct {
1170 		SVGA3dCmdHeader header;
1171 		SVGA3dCmdBindGBSurface body;
1172 	} *cmd3;
1173 	uint32_t submit_size;
1174 	uint8_t *cmd;
1175 
1176 
1177 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
1178 
1179 	submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
1180 	cmd = vmw_fifo_reserve(dev_priv, submit_size);
1181 	if (unlikely(cmd == NULL)) {
1182 		DRM_ERROR("Failed reserving FIFO space for surface "
1183 			  "unbinding.\n");
1184 		return -ENOMEM;
1185 	}
1186 
1187 	if (readback) {
1188 		cmd1 = (void *) cmd;
1189 		cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
1190 		cmd1->header.size = sizeof(cmd1->body);
1191 		cmd1->body.sid = res->id;
1192 		cmd3 = (void *) &cmd1[1];
1193 	} else {
1194 		cmd2 = (void *) cmd;
1195 		cmd2->header.id = SVGA_3D_CMD_INVALIDATE_GB_SURFACE;
1196 		cmd2->header.size = sizeof(cmd2->body);
1197 		cmd2->body.sid = res->id;
1198 		cmd3 = (void *) &cmd2[1];
1199 	}
1200 
1201 	cmd3->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
1202 	cmd3->header.size = sizeof(cmd3->body);
1203 	cmd3->body.sid = res->id;
1204 	cmd3->body.mobid = SVGA3D_INVALID_ID;
1205 
1206 	vmw_fifo_commit(dev_priv, submit_size);
1207 
1208 	/*
1209 	 * Create a fence object and fence the backup buffer.
1210 	 */
1211 
1212 	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
1213 					  &fence, NULL);
1214 
1215 	vmw_fence_single_bo(val_buf->bo, fence);
1216 
1217 	if (likely(fence != NULL))
1218 		vmw_fence_obj_unreference(&fence);
1219 
1220 	return 0;
1221 }
1222 
1223 static int vmw_gb_surface_destroy(struct vmw_resource *res)
1224 {
1225 	struct vmw_private *dev_priv = res->dev_priv;
1226 	struct vmw_surface *srf = vmw_res_to_srf(res);
1227 	struct {
1228 		SVGA3dCmdHeader header;
1229 		SVGA3dCmdDestroyGBSurface body;
1230 	} *cmd;
1231 
1232 	if (likely(res->id == -1))
1233 		return 0;
1234 
1235 	mutex_lock(&dev_priv->binding_mutex);
1236 	vmw_view_surface_list_destroy(dev_priv, &srf->view_list);
1237 	vmw_binding_res_list_scrub(&res->binding_head);
1238 
1239 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
1240 	if (unlikely(cmd == NULL)) {
1241 		DRM_ERROR("Failed reserving FIFO space for surface "
1242 			  "destruction.\n");
1243 		mutex_unlock(&dev_priv->binding_mutex);
1244 		return -ENOMEM;
1245 	}
1246 
1247 	cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE;
1248 	cmd->header.size = sizeof(cmd->body);
1249 	cmd->body.sid = res->id;
1250 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
1251 	mutex_unlock(&dev_priv->binding_mutex);
1252 	vmw_resource_release_id(res);
1253 	vmw_fifo_resource_dec(dev_priv);
1254 
1255 	return 0;
1256 }
1257 
1258 
1259 /**
1260  * vmw_gb_surface_define_ioctl - Ioctl function implementing
1261  *                               the user surface define functionality.
1262  *
1263  * @dev:            Pointer to a struct drm_device.
1264  * @data:           Pointer to data copied from / to user-space.
1265  * @file_priv:      Pointer to a drm file private structure.
1266  */
1267 int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1268 				struct drm_file *file_priv)
1269 {
1270 	struct vmw_private *dev_priv = vmw_priv(dev);
1271 	struct vmw_user_surface *user_srf;
1272 	struct vmw_surface *srf;
1273 	struct vmw_resource *res;
1274 	struct vmw_resource *tmp;
1275 	union drm_vmw_gb_surface_create_arg *arg =
1276 	    (union drm_vmw_gb_surface_create_arg *)data;
1277 	struct drm_vmw_gb_surface_create_req *req = &arg->req;
1278 	struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
1279 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1280 	int ret;
1281 	uint32_t size;
1282 	uint32_t backup_handle;
1283 
1284 
1285 	if (unlikely(vmw_user_surface_size == 0))
1286 		vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
1287 			128;
1288 
1289 	size = vmw_user_surface_size + 128;
1290 
1291 	/* Define a surface based on the parameters. */
1292 	ret = vmw_surface_gb_priv_define(dev,
1293 			size,
1294 			req->svga3d_flags,
1295 			req->format,
1296 			req->drm_surface_flags & drm_vmw_surface_flag_scanout,
1297 			req->mip_levels,
1298 			req->multisample_count,
1299 			req->array_size,
1300 			req->base_size,
1301 			&srf);
1302 	if (unlikely(ret != 0))
1303 		return ret;
1304 
1305 	user_srf = container_of(srf, struct vmw_user_surface, srf);
1306 	if (drm_is_primary_client(file_priv))
1307 		user_srf->master = drm_master_get(file_priv->master);
1308 
1309 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1310 	if (unlikely(ret != 0))
1311 		return ret;
1312 
1313 	res = &user_srf->srf.res;
1314 
1315 
1316 	if (req->buffer_handle != SVGA3D_INVALID_ID) {
1317 		ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
1318 					     &res->backup);
1319 		if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE <
1320 		    res->backup_size) {
1321 			DRM_ERROR("Surface backup buffer is too small.\n");
1322 			vmw_dmabuf_unreference(&res->backup);
1323 			ret = -EINVAL;
1324 			goto out_unlock;
1325 		}
1326 	} else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer)
1327 		ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
1328 					    res->backup_size,
1329 					    req->drm_surface_flags &
1330 					    drm_vmw_surface_flag_shareable,
1331 					    &backup_handle,
1332 					    &res->backup);
1333 
1334 	if (unlikely(ret != 0)) {
1335 		vmw_resource_unreference(&res);
1336 		goto out_unlock;
1337 	}
1338 
1339 	tmp = vmw_resource_reference(res);
1340 	ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
1341 				    req->drm_surface_flags &
1342 				    drm_vmw_surface_flag_shareable,
1343 				    VMW_RES_SURFACE,
1344 				    &vmw_user_surface_base_release, NULL);
1345 
1346 	if (unlikely(ret != 0)) {
1347 		vmw_resource_unreference(&tmp);
1348 		vmw_resource_unreference(&res);
1349 		goto out_unlock;
1350 	}
1351 
1352 	rep->handle      = user_srf->prime.base.hash.key;
1353 	rep->backup_size = res->backup_size;
1354 	if (res->backup) {
1355 		rep->buffer_map_handle =
1356 			drm_vma_node_offset_addr(&res->backup->base.vma_node);
1357 		rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
1358 		rep->buffer_handle = backup_handle;
1359 	} else {
1360 		rep->buffer_map_handle = 0;
1361 		rep->buffer_size = 0;
1362 		rep->buffer_handle = SVGA3D_INVALID_ID;
1363 	}
1364 
1365 	vmw_resource_unreference(&res);
1366 
1367 out_unlock:
1368 	ttm_read_unlock(&dev_priv->reservation_sem);
1369 	return ret;
1370 }
1371 
1372 /**
1373  * vmw_gb_surface_reference_ioctl - Ioctl function implementing
1374  *                                  the user surface reference functionality.
1375  *
1376  * @dev:            Pointer to a struct drm_device.
1377  * @data:           Pointer to data copied from / to user-space.
1378  * @file_priv:      Pointer to a drm file private structure.
1379  */
1380 int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
1381 				   struct drm_file *file_priv)
1382 {
1383 	struct vmw_private *dev_priv = vmw_priv(dev);
1384 	union drm_vmw_gb_surface_reference_arg *arg =
1385 	    (union drm_vmw_gb_surface_reference_arg *)data;
1386 	struct drm_vmw_surface_arg *req = &arg->req;
1387 	struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep;
1388 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1389 	struct vmw_surface *srf;
1390 	struct vmw_user_surface *user_srf;
1391 	struct ttm_base_object *base;
1392 	uint32_t backup_handle;
1393 	int ret = -EINVAL;
1394 
1395 	ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
1396 					   req->handle_type, &base);
1397 	if (unlikely(ret != 0))
1398 		return ret;
1399 
1400 	user_srf = container_of(base, struct vmw_user_surface, prime.base);
1401 	srf = &user_srf->srf;
1402 	if (srf->res.backup == NULL) {
1403 		DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
1404 		goto out_bad_resource;
1405 	}
1406 
1407 	mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
1408 	ret = vmw_user_dmabuf_reference(tfile, srf->res.backup,
1409 					&backup_handle);
1410 	mutex_unlock(&dev_priv->cmdbuf_mutex);
1411 
1412 	if (unlikely(ret != 0)) {
1413 		DRM_ERROR("Could not add a reference to a GB surface "
1414 			  "backup buffer.\n");
1415 		(void) ttm_ref_object_base_unref(tfile, base->hash.key,
1416 						 TTM_REF_USAGE);
1417 		goto out_bad_resource;
1418 	}
1419 
1420 	rep->creq.svga3d_flags = srf->flags;
1421 	rep->creq.format = srf->format;
1422 	rep->creq.mip_levels = srf->mip_levels[0];
1423 	rep->creq.drm_surface_flags = 0;
1424 	rep->creq.multisample_count = srf->multisample_count;
1425 	rep->creq.autogen_filter = srf->autogen_filter;
1426 	rep->creq.array_size = srf->array_size;
1427 	rep->creq.buffer_handle = backup_handle;
1428 	rep->creq.base_size = srf->base_size;
1429 	rep->crep.handle = user_srf->prime.base.hash.key;
1430 	rep->crep.backup_size = srf->res.backup_size;
1431 	rep->crep.buffer_handle = backup_handle;
1432 	rep->crep.buffer_map_handle =
1433 		drm_vma_node_offset_addr(&srf->res.backup->base.vma_node);
1434 	rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
1435 
1436 out_bad_resource:
1437 	ttm_base_object_unref(&base);
1438 
1439 	return ret;
1440 }
1441 
1442 /**
1443  * vmw_surface_gb_priv_define - Define a private GB surface
1444  *
1445  * @dev:  Pointer to a struct drm_device
1446  * @user_accounting_size:  Used to track user-space memory usage, set
1447  *                         to 0 for kernel mode only memory
1448  * @svga3d_flags: SVGA3d surface flags for the device
1449  * @format: requested surface format
1450  * @for_scanout: true if inteded to be used for scanout buffer
1451  * @num_mip_levels:  number of MIP levels
1452  * @multisample_count:
1453  * @array_size: Surface array size.
1454  * @size: width, heigh, depth of the surface requested
1455  * @user_srf_out: allocated user_srf.  Set to NULL on failure.
1456  *
1457  * GB surfaces allocated by this function will not have a user mode handle, and
1458  * thus will only be visible to vmwgfx.  For optimization reasons the
1459  * surface may later be given a user mode handle by another function to make
1460  * it available to user mode drivers.
1461  */
1462 int vmw_surface_gb_priv_define(struct drm_device *dev,
1463 			       uint32_t user_accounting_size,
1464 			       uint32_t svga3d_flags,
1465 			       SVGA3dSurfaceFormat format,
1466 			       bool for_scanout,
1467 			       uint32_t num_mip_levels,
1468 			       uint32_t multisample_count,
1469 			       uint32_t array_size,
1470 			       struct drm_vmw_size size,
1471 			       struct vmw_surface **srf_out)
1472 {
1473 	struct vmw_private *dev_priv = vmw_priv(dev);
1474 	struct vmw_user_surface *user_srf;
1475 	struct vmw_surface *srf;
1476 	int ret;
1477 	u32 num_layers;
1478 
1479 	*srf_out = NULL;
1480 
1481 	if (for_scanout) {
1482 		if (!svga3dsurface_is_screen_target_format(format)) {
1483 			DRM_ERROR("Invalid Screen Target surface format.");
1484 			return -EINVAL;
1485 		}
1486 	} else {
1487 		const struct svga3d_surface_desc *desc;
1488 
1489 		desc = svga3dsurface_get_desc(format);
1490 		if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
1491 			DRM_ERROR("Invalid surface format.\n");
1492 			return -EINVAL;
1493 		}
1494 	}
1495 
1496 	/* array_size must be null for non-GL3 host. */
1497 	if (array_size > 0 && !dev_priv->has_dx) {
1498 		DRM_ERROR("Tried to create DX surface on non-DX host.\n");
1499 		return -EINVAL;
1500 	}
1501 
1502 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1503 	if (unlikely(ret != 0))
1504 		return ret;
1505 
1506 	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
1507 				   user_accounting_size, false, true);
1508 	if (unlikely(ret != 0)) {
1509 		if (ret != -ERESTARTSYS)
1510 			DRM_ERROR("Out of graphics memory for surface"
1511 				  " creation.\n");
1512 		goto out_unlock;
1513 	}
1514 
1515 	user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
1516 	if (unlikely(user_srf == NULL)) {
1517 		ret = -ENOMEM;
1518 		goto out_no_user_srf;
1519 	}
1520 
1521 	*srf_out  = &user_srf->srf;
1522 	user_srf->size = user_accounting_size;
1523 	user_srf->prime.base.shareable = false;
1524 	user_srf->prime.base.tfile     = NULL;
1525 
1526 	srf = &user_srf->srf;
1527 	srf->flags             = svga3d_flags;
1528 	srf->format            = format;
1529 	srf->scanout           = for_scanout;
1530 	srf->mip_levels[0]     = num_mip_levels;
1531 	srf->num_sizes         = 1;
1532 	srf->sizes             = NULL;
1533 	srf->offsets           = NULL;
1534 	srf->base_size         = size;
1535 	srf->autogen_filter    = SVGA3D_TEX_FILTER_NONE;
1536 	srf->array_size        = array_size;
1537 	srf->multisample_count = multisample_count;
1538 
1539 	if (array_size)
1540 		num_layers = array_size;
1541 	else if (svga3d_flags & SVGA3D_SURFACE_CUBEMAP)
1542 		num_layers = SVGA3D_MAX_SURFACE_FACES;
1543 	else
1544 		num_layers = 1;
1545 
1546 	srf->res.backup_size   =
1547 		svga3dsurface_get_serialized_size(srf->format,
1548 						  srf->base_size,
1549 						  srf->mip_levels[0],
1550 						  num_layers);
1551 
1552 	if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
1553 		srf->res.backup_size += sizeof(SVGA3dDXSOState);
1554 
1555 	if (dev_priv->active_display_unit == vmw_du_screen_target &&
1556 	    for_scanout)
1557 		srf->flags |= SVGA3D_SURFACE_SCREENTARGET;
1558 
1559 	/*
1560 	 * From this point, the generic resource management functions
1561 	 * destroy the object on failure.
1562 	 */
1563 	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
1564 
1565 	ttm_read_unlock(&dev_priv->reservation_sem);
1566 	return ret;
1567 
1568 out_no_user_srf:
1569 	ttm_mem_global_free(vmw_mem_glob(dev_priv), user_accounting_size);
1570 
1571 out_unlock:
1572 	ttm_read_unlock(&dev_priv->reservation_sem);
1573 	return ret;
1574 }
1575