1 /**************************************************************************
2  *
3  * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_resource_priv.h"
30 #include "vmwgfx_so.h"
31 #include "vmwgfx_binding.h"
32 #include <ttm/ttm_placement.h>
33 #include "device_include/svga3d_surfacedefs.h"
34 
35 
36 /**
37  * struct vmw_user_surface - User-space visible surface resource
38  *
39  * @base:           The TTM base object handling user-space visibility.
40  * @srf:            The surface metadata.
41  * @size:           TTM accounting size for the surface.
42  * @master: master of the creating client. Used for security check.
43  */
44 struct vmw_user_surface {
45 	struct ttm_prime_object prime;
46 	struct vmw_surface srf;
47 	uint32_t size;
48 	struct drm_master *master;
49 };
50 
51 /**
52  * struct vmw_surface_offset - Backing store mip level offset info
53  *
54  * @face:           Surface face.
55  * @mip:            Mip level.
56  * @bo_offset:      Offset into backing store of this mip level.
57  *
58  */
59 struct vmw_surface_offset {
60 	uint32_t face;
61 	uint32_t mip;
62 	uint32_t bo_offset;
63 };
64 
65 static void vmw_user_surface_free(struct vmw_resource *res);
66 static struct vmw_resource *
67 vmw_user_surface_base_to_res(struct ttm_base_object *base);
68 static int vmw_legacy_srf_bind(struct vmw_resource *res,
69 			       struct ttm_validate_buffer *val_buf);
70 static int vmw_legacy_srf_unbind(struct vmw_resource *res,
71 				 bool readback,
72 				 struct ttm_validate_buffer *val_buf);
73 static int vmw_legacy_srf_create(struct vmw_resource *res);
74 static int vmw_legacy_srf_destroy(struct vmw_resource *res);
75 static int vmw_gb_surface_create(struct vmw_resource *res);
76 static int vmw_gb_surface_bind(struct vmw_resource *res,
77 			       struct ttm_validate_buffer *val_buf);
78 static int vmw_gb_surface_unbind(struct vmw_resource *res,
79 				 bool readback,
80 				 struct ttm_validate_buffer *val_buf);
81 static int vmw_gb_surface_destroy(struct vmw_resource *res);
82 
83 
84 static const struct vmw_user_resource_conv user_surface_conv = {
85 	.object_type = VMW_RES_SURFACE,
86 	.base_obj_to_res = vmw_user_surface_base_to_res,
87 	.res_free = vmw_user_surface_free
88 };
89 
90 const struct vmw_user_resource_conv *user_surface_converter =
91 	&user_surface_conv;
92 
93 
94 static uint64_t vmw_user_surface_size;
95 
96 static const struct vmw_res_func vmw_legacy_surface_func = {
97 	.res_type = vmw_res_surface,
98 	.needs_backup = false,
99 	.may_evict = true,
100 	.type_name = "legacy surfaces",
101 	.backup_placement = &vmw_srf_placement,
102 	.create = &vmw_legacy_srf_create,
103 	.destroy = &vmw_legacy_srf_destroy,
104 	.bind = &vmw_legacy_srf_bind,
105 	.unbind = &vmw_legacy_srf_unbind
106 };
107 
108 static const struct vmw_res_func vmw_gb_surface_func = {
109 	.res_type = vmw_res_surface,
110 	.needs_backup = true,
111 	.may_evict = true,
112 	.type_name = "guest backed surfaces",
113 	.backup_placement = &vmw_mob_placement,
114 	.create = vmw_gb_surface_create,
115 	.destroy = vmw_gb_surface_destroy,
116 	.bind = vmw_gb_surface_bind,
117 	.unbind = vmw_gb_surface_unbind
118 };
119 
120 /**
121  * struct vmw_surface_dma - SVGA3D DMA command
122  */
123 struct vmw_surface_dma {
124 	SVGA3dCmdHeader header;
125 	SVGA3dCmdSurfaceDMA body;
126 	SVGA3dCopyBox cb;
127 	SVGA3dCmdSurfaceDMASuffix suffix;
128 };
129 
130 /**
131  * struct vmw_surface_define - SVGA3D Surface Define command
132  */
133 struct vmw_surface_define {
134 	SVGA3dCmdHeader header;
135 	SVGA3dCmdDefineSurface body;
136 };
137 
138 /**
139  * struct vmw_surface_destroy - SVGA3D Surface Destroy command
140  */
141 struct vmw_surface_destroy {
142 	SVGA3dCmdHeader header;
143 	SVGA3dCmdDestroySurface body;
144 };
145 
146 
147 /**
148  * vmw_surface_dma_size - Compute fifo size for a dma command.
149  *
150  * @srf: Pointer to a struct vmw_surface
151  *
152  * Computes the required size for a surface dma command for backup or
153  * restoration of the surface represented by @srf.
154  */
155 static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
156 {
157 	return srf->num_sizes * sizeof(struct vmw_surface_dma);
158 }
159 
160 
161 /**
162  * vmw_surface_define_size - Compute fifo size for a surface define command.
163  *
164  * @srf: Pointer to a struct vmw_surface
165  *
166  * Computes the required size for a surface define command for the definition
167  * of the surface represented by @srf.
168  */
169 static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
170 {
171 	return sizeof(struct vmw_surface_define) + srf->num_sizes *
172 		sizeof(SVGA3dSize);
173 }
174 
175 
176 /**
177  * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
178  *
179  * Computes the required size for a surface destroy command for the destruction
180  * of a hw surface.
181  */
182 static inline uint32_t vmw_surface_destroy_size(void)
183 {
184 	return sizeof(struct vmw_surface_destroy);
185 }
186 
187 /**
188  * vmw_surface_destroy_encode - Encode a surface_destroy command.
189  *
190  * @id: The surface id
191  * @cmd_space: Pointer to memory area in which the commands should be encoded.
192  */
193 static void vmw_surface_destroy_encode(uint32_t id,
194 				       void *cmd_space)
195 {
196 	struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
197 		cmd_space;
198 
199 	cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
200 	cmd->header.size = sizeof(cmd->body);
201 	cmd->body.sid = id;
202 }
203 
204 /**
205  * vmw_surface_define_encode - Encode a surface_define command.
206  *
207  * @srf: Pointer to a struct vmw_surface object.
208  * @cmd_space: Pointer to memory area in which the commands should be encoded.
209  */
210 static void vmw_surface_define_encode(const struct vmw_surface *srf,
211 				      void *cmd_space)
212 {
213 	struct vmw_surface_define *cmd = (struct vmw_surface_define *)
214 		cmd_space;
215 	struct drm_vmw_size *src_size;
216 	SVGA3dSize *cmd_size;
217 	uint32_t cmd_len;
218 	int i;
219 
220 	cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
221 
222 	cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
223 	cmd->header.size = cmd_len;
224 	cmd->body.sid = srf->res.id;
225 	cmd->body.surfaceFlags = srf->flags;
226 	cmd->body.format = srf->format;
227 	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
228 		cmd->body.face[i].numMipLevels = srf->mip_levels[i];
229 
230 	cmd += 1;
231 	cmd_size = (SVGA3dSize *) cmd;
232 	src_size = srf->sizes;
233 
234 	for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
235 		cmd_size->width = src_size->width;
236 		cmd_size->height = src_size->height;
237 		cmd_size->depth = src_size->depth;
238 	}
239 }
240 
241 /**
242  * vmw_surface_dma_encode - Encode a surface_dma command.
243  *
244  * @srf: Pointer to a struct vmw_surface object.
245  * @cmd_space: Pointer to memory area in which the commands should be encoded.
246  * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
247  * should be placed or read from.
248  * @to_surface: Boolean whether to DMA to the surface or from the surface.
249  */
250 static void vmw_surface_dma_encode(struct vmw_surface *srf,
251 				   void *cmd_space,
252 				   const SVGAGuestPtr *ptr,
253 				   bool to_surface)
254 {
255 	uint32_t i;
256 	struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
257 	const struct svga3d_surface_desc *desc =
258 		svga3dsurface_get_desc(srf->format);
259 
260 	for (i = 0; i < srf->num_sizes; ++i) {
261 		SVGA3dCmdHeader *header = &cmd->header;
262 		SVGA3dCmdSurfaceDMA *body = &cmd->body;
263 		SVGA3dCopyBox *cb = &cmd->cb;
264 		SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
265 		const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
266 		const struct drm_vmw_size *cur_size = &srf->sizes[i];
267 
268 		header->id = SVGA_3D_CMD_SURFACE_DMA;
269 		header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
270 
271 		body->guest.ptr = *ptr;
272 		body->guest.ptr.offset += cur_offset->bo_offset;
273 		body->guest.pitch = svga3dsurface_calculate_pitch(desc,
274 								  cur_size);
275 		body->host.sid = srf->res.id;
276 		body->host.face = cur_offset->face;
277 		body->host.mipmap = cur_offset->mip;
278 		body->transfer = ((to_surface) ?  SVGA3D_WRITE_HOST_VRAM :
279 				  SVGA3D_READ_HOST_VRAM);
280 		cb->x = 0;
281 		cb->y = 0;
282 		cb->z = 0;
283 		cb->srcx = 0;
284 		cb->srcy = 0;
285 		cb->srcz = 0;
286 		cb->w = cur_size->width;
287 		cb->h = cur_size->height;
288 		cb->d = cur_size->depth;
289 
290 		suffix->suffixSize = sizeof(*suffix);
291 		suffix->maximumOffset =
292 			svga3dsurface_get_image_buffer_size(desc, cur_size,
293 							    body->guest.pitch);
294 		suffix->flags.discard = 0;
295 		suffix->flags.unsynchronized = 0;
296 		suffix->flags.reserved = 0;
297 		++cmd;
298 	}
299 };
300 
301 
302 /**
303  * vmw_hw_surface_destroy - destroy a Device surface
304  *
305  * @res:        Pointer to a struct vmw_resource embedded in a struct
306  *              vmw_surface.
307  *
308  * Destroys a the device surface associated with a struct vmw_surface if
309  * any, and adjusts accounting and resource count accordingly.
310  */
311 static void vmw_hw_surface_destroy(struct vmw_resource *res)
312 {
313 
314 	struct vmw_private *dev_priv = res->dev_priv;
315 	struct vmw_surface *srf;
316 	void *cmd;
317 
318 	if (res->func->destroy == vmw_gb_surface_destroy) {
319 		(void) vmw_gb_surface_destroy(res);
320 		return;
321 	}
322 
323 	if (res->id != -1) {
324 
325 		cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
326 		if (unlikely(cmd == NULL)) {
327 			DRM_ERROR("Failed reserving FIFO space for surface "
328 				  "destruction.\n");
329 			return;
330 		}
331 
332 		vmw_surface_destroy_encode(res->id, cmd);
333 		vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
334 
335 		/*
336 		 * used_memory_size_atomic, or separate lock
337 		 * to avoid taking dev_priv::cmdbuf_mutex in
338 		 * the destroy path.
339 		 */
340 
341 		mutex_lock(&dev_priv->cmdbuf_mutex);
342 		srf = vmw_res_to_srf(res);
343 		dev_priv->used_memory_size -= res->backup_size;
344 		mutex_unlock(&dev_priv->cmdbuf_mutex);
345 	}
346 	vmw_fifo_resource_dec(dev_priv);
347 }
348 
349 /**
350  * vmw_legacy_srf_create - Create a device surface as part of the
351  * resource validation process.
352  *
353  * @res: Pointer to a struct vmw_surface.
354  *
355  * If the surface doesn't have a hw id.
356  *
357  * Returns -EBUSY if there wasn't sufficient device resources to
358  * complete the validation. Retry after freeing up resources.
359  *
360  * May return other errors if the kernel is out of guest resources.
361  */
362 static int vmw_legacy_srf_create(struct vmw_resource *res)
363 {
364 	struct vmw_private *dev_priv = res->dev_priv;
365 	struct vmw_surface *srf;
366 	uint32_t submit_size;
367 	uint8_t *cmd;
368 	int ret;
369 
370 	if (likely(res->id != -1))
371 		return 0;
372 
373 	srf = vmw_res_to_srf(res);
374 	if (unlikely(dev_priv->used_memory_size + res->backup_size >=
375 		     dev_priv->memory_size))
376 		return -EBUSY;
377 
378 	/*
379 	 * Alloc id for the resource.
380 	 */
381 
382 	ret = vmw_resource_alloc_id(res);
383 	if (unlikely(ret != 0)) {
384 		DRM_ERROR("Failed to allocate a surface id.\n");
385 		goto out_no_id;
386 	}
387 
388 	if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
389 		ret = -EBUSY;
390 		goto out_no_fifo;
391 	}
392 
393 	/*
394 	 * Encode surface define- commands.
395 	 */
396 
397 	submit_size = vmw_surface_define_size(srf);
398 	cmd = vmw_fifo_reserve(dev_priv, submit_size);
399 	if (unlikely(cmd == NULL)) {
400 		DRM_ERROR("Failed reserving FIFO space for surface "
401 			  "creation.\n");
402 		ret = -ENOMEM;
403 		goto out_no_fifo;
404 	}
405 
406 	vmw_surface_define_encode(srf, cmd);
407 	vmw_fifo_commit(dev_priv, submit_size);
408 	/*
409 	 * Surface memory usage accounting.
410 	 */
411 
412 	dev_priv->used_memory_size += res->backup_size;
413 	return 0;
414 
415 out_no_fifo:
416 	vmw_resource_release_id(res);
417 out_no_id:
418 	return ret;
419 }
420 
421 /**
422  * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
423  *
424  * @res:            Pointer to a struct vmw_res embedded in a struct
425  *                  vmw_surface.
426  * @val_buf:        Pointer to a struct ttm_validate_buffer containing
427  *                  information about the backup buffer.
428  * @bind:           Boolean wether to DMA to the surface.
429  *
430  * Transfer backup data to or from a legacy surface as part of the
431  * validation process.
432  * May return other errors if the kernel is out of guest resources.
433  * The backup buffer will be fenced or idle upon successful completion,
434  * and if the surface needs persistent backup storage, the backup buffer
435  * will also be returned reserved iff @bind is true.
436  */
437 static int vmw_legacy_srf_dma(struct vmw_resource *res,
438 			      struct ttm_validate_buffer *val_buf,
439 			      bool bind)
440 {
441 	SVGAGuestPtr ptr;
442 	struct vmw_fence_obj *fence;
443 	uint32_t submit_size;
444 	struct vmw_surface *srf = vmw_res_to_srf(res);
445 	uint8_t *cmd;
446 	struct vmw_private *dev_priv = res->dev_priv;
447 
448 	BUG_ON(val_buf->bo == NULL);
449 
450 	submit_size = vmw_surface_dma_size(srf);
451 	cmd = vmw_fifo_reserve(dev_priv, submit_size);
452 	if (unlikely(cmd == NULL)) {
453 		DRM_ERROR("Failed reserving FIFO space for surface "
454 			  "DMA.\n");
455 		return -ENOMEM;
456 	}
457 	vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
458 	vmw_surface_dma_encode(srf, cmd, &ptr, bind);
459 
460 	vmw_fifo_commit(dev_priv, submit_size);
461 
462 	/*
463 	 * Create a fence object and fence the backup buffer.
464 	 */
465 
466 	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
467 					  &fence, NULL);
468 
469 	vmw_fence_single_bo(val_buf->bo, fence);
470 
471 	if (likely(fence != NULL))
472 		vmw_fence_obj_unreference(&fence);
473 
474 	return 0;
475 }
476 
477 /**
478  * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
479  *                       surface validation process.
480  *
481  * @res:            Pointer to a struct vmw_res embedded in a struct
482  *                  vmw_surface.
483  * @val_buf:        Pointer to a struct ttm_validate_buffer containing
484  *                  information about the backup buffer.
485  *
486  * This function will copy backup data to the surface if the
487  * backup buffer is dirty.
488  */
489 static int vmw_legacy_srf_bind(struct vmw_resource *res,
490 			       struct ttm_validate_buffer *val_buf)
491 {
492 	if (!res->backup_dirty)
493 		return 0;
494 
495 	return vmw_legacy_srf_dma(res, val_buf, true);
496 }
497 
498 
499 /**
500  * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
501  *                         surface eviction process.
502  *
503  * @res:            Pointer to a struct vmw_res embedded in a struct
504  *                  vmw_surface.
505  * @val_buf:        Pointer to a struct ttm_validate_buffer containing
506  *                  information about the backup buffer.
507  *
508  * This function will copy backup data from the surface.
509  */
510 static int vmw_legacy_srf_unbind(struct vmw_resource *res,
511 				 bool readback,
512 				 struct ttm_validate_buffer *val_buf)
513 {
514 	if (unlikely(readback))
515 		return vmw_legacy_srf_dma(res, val_buf, false);
516 	return 0;
517 }
518 
519 /**
520  * vmw_legacy_srf_destroy - Destroy a device surface as part of a
521  *                          resource eviction process.
522  *
523  * @res:            Pointer to a struct vmw_res embedded in a struct
524  *                  vmw_surface.
525  */
526 static int vmw_legacy_srf_destroy(struct vmw_resource *res)
527 {
528 	struct vmw_private *dev_priv = res->dev_priv;
529 	uint32_t submit_size;
530 	uint8_t *cmd;
531 
532 	BUG_ON(res->id == -1);
533 
534 	/*
535 	 * Encode the dma- and surface destroy commands.
536 	 */
537 
538 	submit_size = vmw_surface_destroy_size();
539 	cmd = vmw_fifo_reserve(dev_priv, submit_size);
540 	if (unlikely(cmd == NULL)) {
541 		DRM_ERROR("Failed reserving FIFO space for surface "
542 			  "eviction.\n");
543 		return -ENOMEM;
544 	}
545 
546 	vmw_surface_destroy_encode(res->id, cmd);
547 	vmw_fifo_commit(dev_priv, submit_size);
548 
549 	/*
550 	 * Surface memory usage accounting.
551 	 */
552 
553 	dev_priv->used_memory_size -= res->backup_size;
554 
555 	/*
556 	 * Release the surface ID.
557 	 */
558 
559 	vmw_resource_release_id(res);
560 
561 	return 0;
562 }
563 
564 
565 /**
566  * vmw_surface_init - initialize a struct vmw_surface
567  *
568  * @dev_priv:       Pointer to a device private struct.
569  * @srf:            Pointer to the struct vmw_surface to initialize.
570  * @res_free:       Pointer to a resource destructor used to free
571  *                  the object.
572  */
573 static int vmw_surface_init(struct vmw_private *dev_priv,
574 			    struct vmw_surface *srf,
575 			    void (*res_free) (struct vmw_resource *res))
576 {
577 	int ret;
578 	struct vmw_resource *res = &srf->res;
579 
580 	BUG_ON(res_free == NULL);
581 	if (!dev_priv->has_mob)
582 		vmw_fifo_resource_inc(dev_priv);
583 	ret = vmw_resource_init(dev_priv, res, true, res_free,
584 				(dev_priv->has_mob) ? &vmw_gb_surface_func :
585 				&vmw_legacy_surface_func);
586 
587 	if (unlikely(ret != 0)) {
588 		if (!dev_priv->has_mob)
589 			vmw_fifo_resource_dec(dev_priv);
590 		res_free(res);
591 		return ret;
592 	}
593 
594 	/*
595 	 * The surface won't be visible to hardware until a
596 	 * surface validate.
597 	 */
598 
599 	INIT_LIST_HEAD(&srf->view_list);
600 	vmw_resource_activate(res, vmw_hw_surface_destroy);
601 	return ret;
602 }
603 
604 /**
605  * vmw_user_surface_base_to_res - TTM base object to resource converter for
606  *                                user visible surfaces
607  *
608  * @base:           Pointer to a TTM base object
609  *
610  * Returns the struct vmw_resource embedded in a struct vmw_surface
611  * for the user-visible object identified by the TTM base object @base.
612  */
613 static struct vmw_resource *
614 vmw_user_surface_base_to_res(struct ttm_base_object *base)
615 {
616 	return &(container_of(base, struct vmw_user_surface,
617 			      prime.base)->srf.res);
618 }
619 
620 /**
621  * vmw_user_surface_free - User visible surface resource destructor
622  *
623  * @res:            A struct vmw_resource embedded in a struct vmw_surface.
624  */
625 static void vmw_user_surface_free(struct vmw_resource *res)
626 {
627 	struct vmw_surface *srf = vmw_res_to_srf(res);
628 	struct vmw_user_surface *user_srf =
629 	    container_of(srf, struct vmw_user_surface, srf);
630 	struct vmw_private *dev_priv = srf->res.dev_priv;
631 	uint32_t size = user_srf->size;
632 
633 	if (user_srf->master)
634 		drm_master_put(&user_srf->master);
635 	kfree(srf->offsets);
636 	kfree(srf->sizes);
637 	kfree(srf->snooper.image);
638 	ttm_prime_object_kfree(user_srf, prime);
639 	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
640 }
641 
642 /**
643  * vmw_user_surface_free - User visible surface TTM base object destructor
644  *
645  * @p_base:         Pointer to a pointer to a TTM base object
646  *                  embedded in a struct vmw_user_surface.
647  *
648  * Drops the base object's reference on its resource, and the
649  * pointer pointed to by *p_base is set to NULL.
650  */
651 static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
652 {
653 	struct ttm_base_object *base = *p_base;
654 	struct vmw_user_surface *user_srf =
655 	    container_of(base, struct vmw_user_surface, prime.base);
656 	struct vmw_resource *res = &user_srf->srf.res;
657 
658 	*p_base = NULL;
659 	vmw_resource_unreference(&res);
660 }
661 
662 /**
663  * vmw_user_surface_destroy_ioctl - Ioctl function implementing
664  *                                  the user surface destroy functionality.
665  *
666  * @dev:            Pointer to a struct drm_device.
667  * @data:           Pointer to data copied from / to user-space.
668  * @file_priv:      Pointer to a drm file private structure.
669  */
670 int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
671 			      struct drm_file *file_priv)
672 {
673 	struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
674 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
675 
676 	return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
677 }
678 
679 /**
680  * vmw_user_surface_define_ioctl - Ioctl function implementing
681  *                                  the user surface define functionality.
682  *
683  * @dev:            Pointer to a struct drm_device.
684  * @data:           Pointer to data copied from / to user-space.
685  * @file_priv:      Pointer to a drm file private structure.
686  */
687 int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
688 			     struct drm_file *file_priv)
689 {
690 	struct vmw_private *dev_priv = vmw_priv(dev);
691 	struct vmw_user_surface *user_srf;
692 	struct vmw_surface *srf;
693 	struct vmw_resource *res;
694 	struct vmw_resource *tmp;
695 	union drm_vmw_surface_create_arg *arg =
696 	    (union drm_vmw_surface_create_arg *)data;
697 	struct drm_vmw_surface_create_req *req = &arg->req;
698 	struct drm_vmw_surface_arg *rep = &arg->rep;
699 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
700 	struct drm_vmw_size __user *user_sizes;
701 	int ret;
702 	int i, j;
703 	uint32_t cur_bo_offset;
704 	struct drm_vmw_size *cur_size;
705 	struct vmw_surface_offset *cur_offset;
706 	uint32_t num_sizes;
707 	uint32_t size;
708 	const struct svga3d_surface_desc *desc;
709 
710 	if (unlikely(vmw_user_surface_size == 0))
711 		vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
712 			128;
713 
714 	num_sizes = 0;
715 	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
716 		num_sizes += req->mip_levels[i];
717 
718 	if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
719 	    DRM_VMW_MAX_MIP_LEVELS)
720 		return -EINVAL;
721 
722 	size = vmw_user_surface_size + 128 +
723 		ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
724 		ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
725 
726 
727 	desc = svga3dsurface_get_desc(req->format);
728 	if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
729 		DRM_ERROR("Invalid surface format for surface creation.\n");
730 		DRM_ERROR("Format requested is: %d\n", req->format);
731 		return -EINVAL;
732 	}
733 
734 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
735 	if (unlikely(ret != 0))
736 		return ret;
737 
738 	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
739 				   size, false, true);
740 	if (unlikely(ret != 0)) {
741 		if (ret != -ERESTARTSYS)
742 			DRM_ERROR("Out of graphics memory for surface"
743 				  " creation.\n");
744 		goto out_unlock;
745 	}
746 
747 	user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
748 	if (unlikely(user_srf == NULL)) {
749 		ret = -ENOMEM;
750 		goto out_no_user_srf;
751 	}
752 
753 	srf = &user_srf->srf;
754 	res = &srf->res;
755 
756 	srf->flags = req->flags;
757 	srf->format = req->format;
758 	srf->scanout = req->scanout;
759 
760 	memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
761 	srf->num_sizes = num_sizes;
762 	user_srf->size = size;
763 
764 	srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
765 	if (unlikely(srf->sizes == NULL)) {
766 		ret = -ENOMEM;
767 		goto out_no_sizes;
768 	}
769 	srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
770 			       GFP_KERNEL);
771 	if (unlikely(srf->sizes == NULL)) {
772 		ret = -ENOMEM;
773 		goto out_no_offsets;
774 	}
775 
776 	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
777 	    req->size_addr;
778 
779 	ret = copy_from_user(srf->sizes, user_sizes,
780 			     srf->num_sizes * sizeof(*srf->sizes));
781 	if (unlikely(ret != 0)) {
782 		ret = -EFAULT;
783 		goto out_no_copy;
784 	}
785 
786 	srf->base_size = *srf->sizes;
787 	srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
788 	srf->multisample_count = 0;
789 
790 	cur_bo_offset = 0;
791 	cur_offset = srf->offsets;
792 	cur_size = srf->sizes;
793 
794 	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
795 		for (j = 0; j < srf->mip_levels[i]; ++j) {
796 			uint32_t stride = svga3dsurface_calculate_pitch
797 				(desc, cur_size);
798 
799 			cur_offset->face = i;
800 			cur_offset->mip = j;
801 			cur_offset->bo_offset = cur_bo_offset;
802 			cur_bo_offset += svga3dsurface_get_image_buffer_size
803 				(desc, cur_size, stride);
804 			++cur_offset;
805 			++cur_size;
806 		}
807 	}
808 	res->backup_size = cur_bo_offset;
809 	if (srf->scanout &&
810 	    srf->num_sizes == 1 &&
811 	    srf->sizes[0].width == 64 &&
812 	    srf->sizes[0].height == 64 &&
813 	    srf->format == SVGA3D_A8R8G8B8) {
814 
815 		srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
816 		/* clear the image */
817 		if (srf->snooper.image) {
818 			memset(srf->snooper.image, 0x00, 64 * 64 * 4);
819 		} else {
820 			DRM_ERROR("Failed to allocate cursor_image\n");
821 			ret = -ENOMEM;
822 			goto out_no_copy;
823 		}
824 	} else {
825 		srf->snooper.image = NULL;
826 	}
827 	srf->snooper.crtc = NULL;
828 
829 	user_srf->prime.base.shareable = false;
830 	user_srf->prime.base.tfile = NULL;
831 	if (drm_is_primary_client(file_priv))
832 		user_srf->master = drm_master_get(file_priv->master);
833 
834 	/**
835 	 * From this point, the generic resource management functions
836 	 * destroy the object on failure.
837 	 */
838 
839 	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
840 	if (unlikely(ret != 0))
841 		goto out_unlock;
842 
843 	/*
844 	 * A gb-aware client referencing a shared surface will
845 	 * expect a backup buffer to be present.
846 	 */
847 	if (dev_priv->has_mob && req->shareable) {
848 		uint32_t backup_handle;
849 
850 		ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
851 					    res->backup_size,
852 					    true,
853 					    &backup_handle,
854 					    &res->backup);
855 		if (unlikely(ret != 0)) {
856 			vmw_resource_unreference(&res);
857 			goto out_unlock;
858 		}
859 	}
860 
861 	tmp = vmw_resource_reference(&srf->res);
862 	ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
863 				    req->shareable, VMW_RES_SURFACE,
864 				    &vmw_user_surface_base_release, NULL);
865 
866 	if (unlikely(ret != 0)) {
867 		vmw_resource_unreference(&tmp);
868 		vmw_resource_unreference(&res);
869 		goto out_unlock;
870 	}
871 
872 	rep->sid = user_srf->prime.base.hash.key;
873 	vmw_resource_unreference(&res);
874 
875 	ttm_read_unlock(&dev_priv->reservation_sem);
876 	return 0;
877 out_no_copy:
878 	kfree(srf->offsets);
879 out_no_offsets:
880 	kfree(srf->sizes);
881 out_no_sizes:
882 	ttm_prime_object_kfree(user_srf, prime);
883 out_no_user_srf:
884 	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
885 out_unlock:
886 	ttm_read_unlock(&dev_priv->reservation_sem);
887 	return ret;
888 }
889 
890 
891 static int
892 vmw_surface_handle_reference(struct vmw_private *dev_priv,
893 			     struct drm_file *file_priv,
894 			     uint32_t u_handle,
895 			     enum drm_vmw_handle_type handle_type,
896 			     struct ttm_base_object **base_p)
897 {
898 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
899 	struct vmw_user_surface *user_srf;
900 	uint32_t handle;
901 	struct ttm_base_object *base;
902 	int ret;
903 
904 	if (handle_type == DRM_VMW_HANDLE_PRIME) {
905 		ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
906 		if (unlikely(ret != 0))
907 			return ret;
908 	} else {
909 		if (unlikely(drm_is_render_client(file_priv))) {
910 			DRM_ERROR("Render client refused legacy "
911 				  "surface reference.\n");
912 			return -EACCES;
913 		}
914 		if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) {
915 			DRM_ERROR("Locked master refused legacy "
916 				  "surface reference.\n");
917 			return -EACCES;
918 		}
919 
920 		handle = u_handle;
921 	}
922 
923 	ret = -EINVAL;
924 	base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle);
925 	if (unlikely(base == NULL)) {
926 		DRM_ERROR("Could not find surface to reference.\n");
927 		goto out_no_lookup;
928 	}
929 
930 	if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) {
931 		DRM_ERROR("Referenced object is not a surface.\n");
932 		goto out_bad_resource;
933 	}
934 
935 	if (handle_type != DRM_VMW_HANDLE_PRIME) {
936 		user_srf = container_of(base, struct vmw_user_surface,
937 					prime.base);
938 
939 		/*
940 		 * Make sure the surface creator has the same
941 		 * authenticating master.
942 		 */
943 		if (drm_is_primary_client(file_priv) &&
944 		    user_srf->master != file_priv->master) {
945 			DRM_ERROR("Trying to reference surface outside of"
946 				  " master domain.\n");
947 			ret = -EACCES;
948 			goto out_bad_resource;
949 		}
950 
951 		ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
952 		if (unlikely(ret != 0)) {
953 			DRM_ERROR("Could not add a reference to a surface.\n");
954 			goto out_bad_resource;
955 		}
956 	}
957 
958 	*base_p = base;
959 	return 0;
960 
961 out_bad_resource:
962 	ttm_base_object_unref(&base);
963 out_no_lookup:
964 	if (handle_type == DRM_VMW_HANDLE_PRIME)
965 		(void) ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
966 
967 	return ret;
968 }
969 
970 /**
971  * vmw_user_surface_define_ioctl - Ioctl function implementing
972  *                                  the user surface reference functionality.
973  *
974  * @dev:            Pointer to a struct drm_device.
975  * @data:           Pointer to data copied from / to user-space.
976  * @file_priv:      Pointer to a drm file private structure.
977  */
978 int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
979 				struct drm_file *file_priv)
980 {
981 	struct vmw_private *dev_priv = vmw_priv(dev);
982 	union drm_vmw_surface_reference_arg *arg =
983 	    (union drm_vmw_surface_reference_arg *)data;
984 	struct drm_vmw_surface_arg *req = &arg->req;
985 	struct drm_vmw_surface_create_req *rep = &arg->rep;
986 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
987 	struct vmw_surface *srf;
988 	struct vmw_user_surface *user_srf;
989 	struct drm_vmw_size __user *user_sizes;
990 	struct ttm_base_object *base;
991 	int ret;
992 
993 	ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
994 					   req->handle_type, &base);
995 	if (unlikely(ret != 0))
996 		return ret;
997 
998 	user_srf = container_of(base, struct vmw_user_surface, prime.base);
999 	srf = &user_srf->srf;
1000 
1001 	rep->flags = srf->flags;
1002 	rep->format = srf->format;
1003 	memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
1004 	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
1005 	    rep->size_addr;
1006 
1007 	if (user_sizes)
1008 		ret = copy_to_user(user_sizes, &srf->base_size,
1009 				   sizeof(srf->base_size));
1010 	if (unlikely(ret != 0)) {
1011 		DRM_ERROR("copy_to_user failed %p %u\n",
1012 			  user_sizes, srf->num_sizes);
1013 		ttm_ref_object_base_unref(tfile, base->hash.key, TTM_REF_USAGE);
1014 		ret = -EFAULT;
1015 	}
1016 
1017 	ttm_base_object_unref(&base);
1018 
1019 	return ret;
1020 }
1021 
1022 /**
1023  * vmw_surface_define_encode - Encode a surface_define command.
1024  *
1025  * @srf: Pointer to a struct vmw_surface object.
1026  * @cmd_space: Pointer to memory area in which the commands should be encoded.
1027  */
1028 static int vmw_gb_surface_create(struct vmw_resource *res)
1029 {
1030 	struct vmw_private *dev_priv = res->dev_priv;
1031 	struct vmw_surface *srf = vmw_res_to_srf(res);
1032 	uint32_t cmd_len, cmd_id, submit_len;
1033 	int ret;
1034 	struct {
1035 		SVGA3dCmdHeader header;
1036 		SVGA3dCmdDefineGBSurface body;
1037 	} *cmd;
1038 	struct {
1039 		SVGA3dCmdHeader header;
1040 		SVGA3dCmdDefineGBSurface_v2 body;
1041 	} *cmd2;
1042 
1043 	if (likely(res->id != -1))
1044 		return 0;
1045 
1046 	vmw_fifo_resource_inc(dev_priv);
1047 	ret = vmw_resource_alloc_id(res);
1048 	if (unlikely(ret != 0)) {
1049 		DRM_ERROR("Failed to allocate a surface id.\n");
1050 		goto out_no_id;
1051 	}
1052 
1053 	if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) {
1054 		ret = -EBUSY;
1055 		goto out_no_fifo;
1056 	}
1057 
1058 	if (srf->array_size > 0) {
1059 		/* has_dx checked on creation time. */
1060 		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2;
1061 		cmd_len = sizeof(cmd2->body);
1062 		submit_len = sizeof(*cmd2);
1063 	} else {
1064 		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
1065 		cmd_len = sizeof(cmd->body);
1066 		submit_len = sizeof(*cmd);
1067 	}
1068 
1069 	cmd = vmw_fifo_reserve(dev_priv, submit_len);
1070 	cmd2 = (typeof(cmd2))cmd;
1071 	if (unlikely(cmd == NULL)) {
1072 		DRM_ERROR("Failed reserving FIFO space for surface "
1073 			  "creation.\n");
1074 		ret = -ENOMEM;
1075 		goto out_no_fifo;
1076 	}
1077 
1078 	if (srf->array_size > 0) {
1079 		cmd2->header.id = cmd_id;
1080 		cmd2->header.size = cmd_len;
1081 		cmd2->body.sid = srf->res.id;
1082 		cmd2->body.surfaceFlags = srf->flags;
1083 		cmd2->body.format = cpu_to_le32(srf->format);
1084 		cmd2->body.numMipLevels = srf->mip_levels[0];
1085 		cmd2->body.multisampleCount = srf->multisample_count;
1086 		cmd2->body.autogenFilter = srf->autogen_filter;
1087 		cmd2->body.size.width = srf->base_size.width;
1088 		cmd2->body.size.height = srf->base_size.height;
1089 		cmd2->body.size.depth = srf->base_size.depth;
1090 		cmd2->body.arraySize = srf->array_size;
1091 	} else {
1092 		cmd->header.id = cmd_id;
1093 		cmd->header.size = cmd_len;
1094 		cmd->body.sid = srf->res.id;
1095 		cmd->body.surfaceFlags = srf->flags;
1096 		cmd->body.format = cpu_to_le32(srf->format);
1097 		cmd->body.numMipLevels = srf->mip_levels[0];
1098 		cmd->body.multisampleCount = srf->multisample_count;
1099 		cmd->body.autogenFilter = srf->autogen_filter;
1100 		cmd->body.size.width = srf->base_size.width;
1101 		cmd->body.size.height = srf->base_size.height;
1102 		cmd->body.size.depth = srf->base_size.depth;
1103 	}
1104 
1105 	vmw_fifo_commit(dev_priv, submit_len);
1106 
1107 	return 0;
1108 
1109 out_no_fifo:
1110 	vmw_resource_release_id(res);
1111 out_no_id:
1112 	vmw_fifo_resource_dec(dev_priv);
1113 	return ret;
1114 }
1115 
1116 
1117 static int vmw_gb_surface_bind(struct vmw_resource *res,
1118 			       struct ttm_validate_buffer *val_buf)
1119 {
1120 	struct vmw_private *dev_priv = res->dev_priv;
1121 	struct {
1122 		SVGA3dCmdHeader header;
1123 		SVGA3dCmdBindGBSurface body;
1124 	} *cmd1;
1125 	struct {
1126 		SVGA3dCmdHeader header;
1127 		SVGA3dCmdUpdateGBSurface body;
1128 	} *cmd2;
1129 	uint32_t submit_size;
1130 	struct ttm_buffer_object *bo = val_buf->bo;
1131 
1132 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
1133 
1134 	submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
1135 
1136 	cmd1 = vmw_fifo_reserve(dev_priv, submit_size);
1137 	if (unlikely(cmd1 == NULL)) {
1138 		DRM_ERROR("Failed reserving FIFO space for surface "
1139 			  "binding.\n");
1140 		return -ENOMEM;
1141 	}
1142 
1143 	cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
1144 	cmd1->header.size = sizeof(cmd1->body);
1145 	cmd1->body.sid = res->id;
1146 	cmd1->body.mobid = bo->mem.start;
1147 	if (res->backup_dirty) {
1148 		cmd2 = (void *) &cmd1[1];
1149 		cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
1150 		cmd2->header.size = sizeof(cmd2->body);
1151 		cmd2->body.sid = res->id;
1152 		res->backup_dirty = false;
1153 	}
1154 	vmw_fifo_commit(dev_priv, submit_size);
1155 
1156 	return 0;
1157 }
1158 
1159 static int vmw_gb_surface_unbind(struct vmw_resource *res,
1160 				 bool readback,
1161 				 struct ttm_validate_buffer *val_buf)
1162 {
1163 	struct vmw_private *dev_priv = res->dev_priv;
1164 	struct ttm_buffer_object *bo = val_buf->bo;
1165 	struct vmw_fence_obj *fence;
1166 
1167 	struct {
1168 		SVGA3dCmdHeader header;
1169 		SVGA3dCmdReadbackGBSurface body;
1170 	} *cmd1;
1171 	struct {
1172 		SVGA3dCmdHeader header;
1173 		SVGA3dCmdInvalidateGBSurface body;
1174 	} *cmd2;
1175 	struct {
1176 		SVGA3dCmdHeader header;
1177 		SVGA3dCmdBindGBSurface body;
1178 	} *cmd3;
1179 	uint32_t submit_size;
1180 	uint8_t *cmd;
1181 
1182 
1183 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
1184 
1185 	submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
1186 	cmd = vmw_fifo_reserve(dev_priv, submit_size);
1187 	if (unlikely(cmd == NULL)) {
1188 		DRM_ERROR("Failed reserving FIFO space for surface "
1189 			  "unbinding.\n");
1190 		return -ENOMEM;
1191 	}
1192 
1193 	if (readback) {
1194 		cmd1 = (void *) cmd;
1195 		cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
1196 		cmd1->header.size = sizeof(cmd1->body);
1197 		cmd1->body.sid = res->id;
1198 		cmd3 = (void *) &cmd1[1];
1199 	} else {
1200 		cmd2 = (void *) cmd;
1201 		cmd2->header.id = SVGA_3D_CMD_INVALIDATE_GB_SURFACE;
1202 		cmd2->header.size = sizeof(cmd2->body);
1203 		cmd2->body.sid = res->id;
1204 		cmd3 = (void *) &cmd2[1];
1205 	}
1206 
1207 	cmd3->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
1208 	cmd3->header.size = sizeof(cmd3->body);
1209 	cmd3->body.sid = res->id;
1210 	cmd3->body.mobid = SVGA3D_INVALID_ID;
1211 
1212 	vmw_fifo_commit(dev_priv, submit_size);
1213 
1214 	/*
1215 	 * Create a fence object and fence the backup buffer.
1216 	 */
1217 
1218 	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
1219 					  &fence, NULL);
1220 
1221 	vmw_fence_single_bo(val_buf->bo, fence);
1222 
1223 	if (likely(fence != NULL))
1224 		vmw_fence_obj_unreference(&fence);
1225 
1226 	return 0;
1227 }
1228 
1229 static int vmw_gb_surface_destroy(struct vmw_resource *res)
1230 {
1231 	struct vmw_private *dev_priv = res->dev_priv;
1232 	struct vmw_surface *srf = vmw_res_to_srf(res);
1233 	struct {
1234 		SVGA3dCmdHeader header;
1235 		SVGA3dCmdDestroyGBSurface body;
1236 	} *cmd;
1237 
1238 	if (likely(res->id == -1))
1239 		return 0;
1240 
1241 	mutex_lock(&dev_priv->binding_mutex);
1242 	vmw_view_surface_list_destroy(dev_priv, &srf->view_list);
1243 	vmw_binding_res_list_scrub(&res->binding_head);
1244 
1245 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
1246 	if (unlikely(cmd == NULL)) {
1247 		DRM_ERROR("Failed reserving FIFO space for surface "
1248 			  "destruction.\n");
1249 		mutex_unlock(&dev_priv->binding_mutex);
1250 		return -ENOMEM;
1251 	}
1252 
1253 	cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE;
1254 	cmd->header.size = sizeof(cmd->body);
1255 	cmd->body.sid = res->id;
1256 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
1257 	mutex_unlock(&dev_priv->binding_mutex);
1258 	vmw_resource_release_id(res);
1259 	vmw_fifo_resource_dec(dev_priv);
1260 
1261 	return 0;
1262 }
1263 
1264 
1265 /**
1266  * vmw_gb_surface_define_ioctl - Ioctl function implementing
1267  *                               the user surface define functionality.
1268  *
1269  * @dev:            Pointer to a struct drm_device.
1270  * @data:           Pointer to data copied from / to user-space.
1271  * @file_priv:      Pointer to a drm file private structure.
1272  */
1273 int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1274 				struct drm_file *file_priv)
1275 {
1276 	struct vmw_private *dev_priv = vmw_priv(dev);
1277 	struct vmw_user_surface *user_srf;
1278 	struct vmw_surface *srf;
1279 	struct vmw_resource *res;
1280 	struct vmw_resource *tmp;
1281 	union drm_vmw_gb_surface_create_arg *arg =
1282 	    (union drm_vmw_gb_surface_create_arg *)data;
1283 	struct drm_vmw_gb_surface_create_req *req = &arg->req;
1284 	struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
1285 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1286 	int ret;
1287 	uint32_t size;
1288 	uint32_t backup_handle;
1289 
1290 
1291 	if (unlikely(vmw_user_surface_size == 0))
1292 		vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
1293 			128;
1294 
1295 	size = vmw_user_surface_size + 128;
1296 
1297 	/* Define a surface based on the parameters. */
1298 	ret = vmw_surface_gb_priv_define(dev,
1299 			size,
1300 			req->svga3d_flags,
1301 			req->format,
1302 			req->drm_surface_flags & drm_vmw_surface_flag_scanout,
1303 			req->mip_levels,
1304 			req->multisample_count,
1305 			req->array_size,
1306 			req->base_size,
1307 			&srf);
1308 	if (unlikely(ret != 0))
1309 		return ret;
1310 
1311 	user_srf = container_of(srf, struct vmw_user_surface, srf);
1312 	if (drm_is_primary_client(file_priv))
1313 		user_srf->master = drm_master_get(file_priv->master);
1314 
1315 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1316 	if (unlikely(ret != 0))
1317 		return ret;
1318 
1319 	res = &user_srf->srf.res;
1320 
1321 
1322 	if (req->buffer_handle != SVGA3D_INVALID_ID) {
1323 		ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
1324 					     &res->backup);
1325 		if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE <
1326 		    res->backup_size) {
1327 			DRM_ERROR("Surface backup buffer is too small.\n");
1328 			vmw_dmabuf_unreference(&res->backup);
1329 			ret = -EINVAL;
1330 			goto out_unlock;
1331 		}
1332 	} else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer)
1333 		ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
1334 					    res->backup_size,
1335 					    req->drm_surface_flags &
1336 					    drm_vmw_surface_flag_shareable,
1337 					    &backup_handle,
1338 					    &res->backup);
1339 
1340 	if (unlikely(ret != 0)) {
1341 		vmw_resource_unreference(&res);
1342 		goto out_unlock;
1343 	}
1344 
1345 	tmp = vmw_resource_reference(res);
1346 	ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
1347 				    req->drm_surface_flags &
1348 				    drm_vmw_surface_flag_shareable,
1349 				    VMW_RES_SURFACE,
1350 				    &vmw_user_surface_base_release, NULL);
1351 
1352 	if (unlikely(ret != 0)) {
1353 		vmw_resource_unreference(&tmp);
1354 		vmw_resource_unreference(&res);
1355 		goto out_unlock;
1356 	}
1357 
1358 	rep->handle      = user_srf->prime.base.hash.key;
1359 	rep->backup_size = res->backup_size;
1360 	if (res->backup) {
1361 		rep->buffer_map_handle =
1362 			drm_vma_node_offset_addr(&res->backup->base.vma_node);
1363 		rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
1364 		rep->buffer_handle = backup_handle;
1365 	} else {
1366 		rep->buffer_map_handle = 0;
1367 		rep->buffer_size = 0;
1368 		rep->buffer_handle = SVGA3D_INVALID_ID;
1369 	}
1370 
1371 	vmw_resource_unreference(&res);
1372 
1373 out_unlock:
1374 	ttm_read_unlock(&dev_priv->reservation_sem);
1375 	return ret;
1376 }
1377 
1378 /**
1379  * vmw_gb_surface_reference_ioctl - Ioctl function implementing
1380  *                                  the user surface reference functionality.
1381  *
1382  * @dev:            Pointer to a struct drm_device.
1383  * @data:           Pointer to data copied from / to user-space.
1384  * @file_priv:      Pointer to a drm file private structure.
1385  */
1386 int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
1387 				   struct drm_file *file_priv)
1388 {
1389 	struct vmw_private *dev_priv = vmw_priv(dev);
1390 	union drm_vmw_gb_surface_reference_arg *arg =
1391 	    (union drm_vmw_gb_surface_reference_arg *)data;
1392 	struct drm_vmw_surface_arg *req = &arg->req;
1393 	struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep;
1394 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1395 	struct vmw_surface *srf;
1396 	struct vmw_user_surface *user_srf;
1397 	struct ttm_base_object *base;
1398 	uint32_t backup_handle;
1399 	int ret = -EINVAL;
1400 
1401 	ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
1402 					   req->handle_type, &base);
1403 	if (unlikely(ret != 0))
1404 		return ret;
1405 
1406 	user_srf = container_of(base, struct vmw_user_surface, prime.base);
1407 	srf = &user_srf->srf;
1408 	if (srf->res.backup == NULL) {
1409 		DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
1410 		goto out_bad_resource;
1411 	}
1412 
1413 	mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
1414 	ret = vmw_user_dmabuf_reference(tfile, srf->res.backup,
1415 					&backup_handle);
1416 	mutex_unlock(&dev_priv->cmdbuf_mutex);
1417 
1418 	if (unlikely(ret != 0)) {
1419 		DRM_ERROR("Could not add a reference to a GB surface "
1420 			  "backup buffer.\n");
1421 		(void) ttm_ref_object_base_unref(tfile, base->hash.key,
1422 						 TTM_REF_USAGE);
1423 		goto out_bad_resource;
1424 	}
1425 
1426 	rep->creq.svga3d_flags = srf->flags;
1427 	rep->creq.format = srf->format;
1428 	rep->creq.mip_levels = srf->mip_levels[0];
1429 	rep->creq.drm_surface_flags = 0;
1430 	rep->creq.multisample_count = srf->multisample_count;
1431 	rep->creq.autogen_filter = srf->autogen_filter;
1432 	rep->creq.array_size = srf->array_size;
1433 	rep->creq.buffer_handle = backup_handle;
1434 	rep->creq.base_size = srf->base_size;
1435 	rep->crep.handle = user_srf->prime.base.hash.key;
1436 	rep->crep.backup_size = srf->res.backup_size;
1437 	rep->crep.buffer_handle = backup_handle;
1438 	rep->crep.buffer_map_handle =
1439 		drm_vma_node_offset_addr(&srf->res.backup->base.vma_node);
1440 	rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
1441 
1442 out_bad_resource:
1443 	ttm_base_object_unref(&base);
1444 
1445 	return ret;
1446 }
1447 
1448 /**
1449  * vmw_surface_gb_priv_define - Define a private GB surface
1450  *
1451  * @dev:  Pointer to a struct drm_device
1452  * @user_accounting_size:  Used to track user-space memory usage, set
1453  *                         to 0 for kernel mode only memory
1454  * @svga3d_flags: SVGA3d surface flags for the device
1455  * @format: requested surface format
1456  * @for_scanout: true if inteded to be used for scanout buffer
1457  * @num_mip_levels:  number of MIP levels
1458  * @multisample_count:
1459  * @array_size: Surface array size.
1460  * @size: width, heigh, depth of the surface requested
1461  * @user_srf_out: allocated user_srf.  Set to NULL on failure.
1462  *
1463  * GB surfaces allocated by this function will not have a user mode handle, and
1464  * thus will only be visible to vmwgfx.  For optimization reasons the
1465  * surface may later be given a user mode handle by another function to make
1466  * it available to user mode drivers.
1467  */
1468 int vmw_surface_gb_priv_define(struct drm_device *dev,
1469 			       uint32_t user_accounting_size,
1470 			       uint32_t svga3d_flags,
1471 			       SVGA3dSurfaceFormat format,
1472 			       bool for_scanout,
1473 			       uint32_t num_mip_levels,
1474 			       uint32_t multisample_count,
1475 			       uint32_t array_size,
1476 			       struct drm_vmw_size size,
1477 			       struct vmw_surface **srf_out)
1478 {
1479 	struct vmw_private *dev_priv = vmw_priv(dev);
1480 	struct vmw_user_surface *user_srf;
1481 	struct vmw_surface *srf;
1482 	int ret;
1483 	u32 num_layers;
1484 
1485 	*srf_out = NULL;
1486 
1487 	if (for_scanout) {
1488 		if (!svga3dsurface_is_screen_target_format(format)) {
1489 			DRM_ERROR("Invalid Screen Target surface format.");
1490 			return -EINVAL;
1491 		}
1492 	} else {
1493 		const struct svga3d_surface_desc *desc;
1494 
1495 		desc = svga3dsurface_get_desc(format);
1496 		if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
1497 			DRM_ERROR("Invalid surface format.\n");
1498 			return -EINVAL;
1499 		}
1500 	}
1501 
1502 	/* array_size must be null for non-GL3 host. */
1503 	if (array_size > 0 && !dev_priv->has_dx) {
1504 		DRM_ERROR("Tried to create DX surface on non-DX host.\n");
1505 		return -EINVAL;
1506 	}
1507 
1508 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1509 	if (unlikely(ret != 0))
1510 		return ret;
1511 
1512 	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
1513 				   user_accounting_size, false, true);
1514 	if (unlikely(ret != 0)) {
1515 		if (ret != -ERESTARTSYS)
1516 			DRM_ERROR("Out of graphics memory for surface"
1517 				  " creation.\n");
1518 		goto out_unlock;
1519 	}
1520 
1521 	user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
1522 	if (unlikely(user_srf == NULL)) {
1523 		ret = -ENOMEM;
1524 		goto out_no_user_srf;
1525 	}
1526 
1527 	*srf_out  = &user_srf->srf;
1528 	user_srf->size = user_accounting_size;
1529 	user_srf->prime.base.shareable = false;
1530 	user_srf->prime.base.tfile     = NULL;
1531 
1532 	srf = &user_srf->srf;
1533 	srf->flags             = svga3d_flags;
1534 	srf->format            = format;
1535 	srf->scanout           = for_scanout;
1536 	srf->mip_levels[0]     = num_mip_levels;
1537 	srf->num_sizes         = 1;
1538 	srf->sizes             = NULL;
1539 	srf->offsets           = NULL;
1540 	srf->base_size         = size;
1541 	srf->autogen_filter    = SVGA3D_TEX_FILTER_NONE;
1542 	srf->array_size        = array_size;
1543 	srf->multisample_count = multisample_count;
1544 
1545 	if (array_size)
1546 		num_layers = array_size;
1547 	else if (svga3d_flags & SVGA3D_SURFACE_CUBEMAP)
1548 		num_layers = SVGA3D_MAX_SURFACE_FACES;
1549 	else
1550 		num_layers = 1;
1551 
1552 	srf->res.backup_size   =
1553 		svga3dsurface_get_serialized_size(srf->format,
1554 						  srf->base_size,
1555 						  srf->mip_levels[0],
1556 						  num_layers);
1557 
1558 	if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
1559 		srf->res.backup_size += sizeof(SVGA3dDXSOState);
1560 
1561 	if (dev_priv->active_display_unit == vmw_du_screen_target &&
1562 	    for_scanout)
1563 		srf->flags |= SVGA3D_SURFACE_SCREENTARGET;
1564 
1565 	/*
1566 	 * From this point, the generic resource management functions
1567 	 * destroy the object on failure.
1568 	 */
1569 	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
1570 
1571 	ttm_read_unlock(&dev_priv->reservation_sem);
1572 	return ret;
1573 
1574 out_no_user_srf:
1575 	ttm_mem_global_free(vmw_mem_glob(dev_priv), user_accounting_size);
1576 
1577 out_unlock:
1578 	ttm_read_unlock(&dev_priv->reservation_sem);
1579 	return ret;
1580 }
1581