1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include <drm/ttm/ttm_placement.h>
29 
30 #include "vmwgfx_drv.h"
31 #include "vmwgfx_resource_priv.h"
32 #include "vmwgfx_so.h"
33 #include "vmwgfx_binding.h"
34 #include "device_include/svga3d_surfacedefs.h"
35 
36 #define SVGA3D_FLAGS_64(upper32, lower32) (((uint64_t)upper32 << 32) | lower32)
37 #define SVGA3D_FLAGS_UPPER_32(svga3d_flags) (svga3d_flags >> 32)
38 #define SVGA3D_FLAGS_LOWER_32(svga3d_flags) \
39 	(svga3d_flags & ((uint64_t)U32_MAX))
40 
41 /**
42  * struct vmw_user_surface - User-space visible surface resource
43  *
44  * @base:           The TTM base object handling user-space visibility.
45  * @srf:            The surface metadata.
46  * @size:           TTM accounting size for the surface.
47  * @master: master of the creating client. Used for security check.
48  */
49 struct vmw_user_surface {
50 	struct ttm_prime_object prime;
51 	struct vmw_surface srf;
52 	uint32_t size;
53 	struct drm_master *master;
54 	struct ttm_base_object *backup_base;
55 };
56 
57 /**
58  * struct vmw_surface_offset - Backing store mip level offset info
59  *
60  * @face:           Surface face.
61  * @mip:            Mip level.
62  * @bo_offset:      Offset into backing store of this mip level.
63  *
64  */
65 struct vmw_surface_offset {
66 	uint32_t face;
67 	uint32_t mip;
68 	uint32_t bo_offset;
69 };
70 
71 /**
72  * vmw_surface_dirty - Surface dirty-tracker
73  * @cache: Cached layout information of the surface.
74  * @size: Accounting size for the struct vmw_surface_dirty.
75  * @num_subres: Number of subresources.
76  * @boxes: Array of SVGA3dBoxes indicating dirty regions. One per subresource.
77  */
78 struct vmw_surface_dirty {
79 	struct svga3dsurface_cache cache;
80 	size_t size;
81 	u32 num_subres;
82 	SVGA3dBox boxes[0];
83 };
84 
85 static void vmw_user_surface_free(struct vmw_resource *res);
86 static struct vmw_resource *
87 vmw_user_surface_base_to_res(struct ttm_base_object *base);
88 static int vmw_legacy_srf_bind(struct vmw_resource *res,
89 			       struct ttm_validate_buffer *val_buf);
90 static int vmw_legacy_srf_unbind(struct vmw_resource *res,
91 				 bool readback,
92 				 struct ttm_validate_buffer *val_buf);
93 static int vmw_legacy_srf_create(struct vmw_resource *res);
94 static int vmw_legacy_srf_destroy(struct vmw_resource *res);
95 static int vmw_gb_surface_create(struct vmw_resource *res);
96 static int vmw_gb_surface_bind(struct vmw_resource *res,
97 			       struct ttm_validate_buffer *val_buf);
98 static int vmw_gb_surface_unbind(struct vmw_resource *res,
99 				 bool readback,
100 				 struct ttm_validate_buffer *val_buf);
101 static int vmw_gb_surface_destroy(struct vmw_resource *res);
102 static int
103 vmw_gb_surface_define_internal(struct drm_device *dev,
104 			       struct drm_vmw_gb_surface_create_ext_req *req,
105 			       struct drm_vmw_gb_surface_create_rep *rep,
106 			       struct drm_file *file_priv);
107 static int
108 vmw_gb_surface_reference_internal(struct drm_device *dev,
109 				  struct drm_vmw_surface_arg *req,
110 				  struct drm_vmw_gb_surface_ref_ext_rep *rep,
111 				  struct drm_file *file_priv);
112 
113 static void vmw_surface_dirty_free(struct vmw_resource *res);
114 static int vmw_surface_dirty_alloc(struct vmw_resource *res);
115 static int vmw_surface_dirty_sync(struct vmw_resource *res);
116 static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start,
117 					size_t end);
118 static int vmw_surface_clean(struct vmw_resource *res);
119 
120 static const struct vmw_user_resource_conv user_surface_conv = {
121 	.object_type = VMW_RES_SURFACE,
122 	.base_obj_to_res = vmw_user_surface_base_to_res,
123 	.res_free = vmw_user_surface_free
124 };
125 
126 const struct vmw_user_resource_conv *user_surface_converter =
127 	&user_surface_conv;
128 
129 
130 static uint64_t vmw_user_surface_size;
131 
132 static const struct vmw_res_func vmw_legacy_surface_func = {
133 	.res_type = vmw_res_surface,
134 	.needs_backup = false,
135 	.may_evict = true,
136 	.prio = 1,
137 	.dirty_prio = 1,
138 	.type_name = "legacy surfaces",
139 	.backup_placement = &vmw_srf_placement,
140 	.create = &vmw_legacy_srf_create,
141 	.destroy = &vmw_legacy_srf_destroy,
142 	.bind = &vmw_legacy_srf_bind,
143 	.unbind = &vmw_legacy_srf_unbind
144 };
145 
146 static const struct vmw_res_func vmw_gb_surface_func = {
147 	.res_type = vmw_res_surface,
148 	.needs_backup = true,
149 	.may_evict = true,
150 	.prio = 1,
151 	.dirty_prio = 2,
152 	.type_name = "guest backed surfaces",
153 	.backup_placement = &vmw_mob_placement,
154 	.create = vmw_gb_surface_create,
155 	.destroy = vmw_gb_surface_destroy,
156 	.bind = vmw_gb_surface_bind,
157 	.unbind = vmw_gb_surface_unbind,
158 	.dirty_alloc = vmw_surface_dirty_alloc,
159 	.dirty_free = vmw_surface_dirty_free,
160 	.dirty_sync = vmw_surface_dirty_sync,
161 	.dirty_range_add = vmw_surface_dirty_range_add,
162 	.clean = vmw_surface_clean,
163 };
164 
165 /**
166  * struct vmw_surface_dma - SVGA3D DMA command
167  */
168 struct vmw_surface_dma {
169 	SVGA3dCmdHeader header;
170 	SVGA3dCmdSurfaceDMA body;
171 	SVGA3dCopyBox cb;
172 	SVGA3dCmdSurfaceDMASuffix suffix;
173 };
174 
175 /**
176  * struct vmw_surface_define - SVGA3D Surface Define command
177  */
178 struct vmw_surface_define {
179 	SVGA3dCmdHeader header;
180 	SVGA3dCmdDefineSurface body;
181 };
182 
183 /**
184  * struct vmw_surface_destroy - SVGA3D Surface Destroy command
185  */
186 struct vmw_surface_destroy {
187 	SVGA3dCmdHeader header;
188 	SVGA3dCmdDestroySurface body;
189 };
190 
191 
192 /**
193  * vmw_surface_dma_size - Compute fifo size for a dma command.
194  *
195  * @srf: Pointer to a struct vmw_surface
196  *
197  * Computes the required size for a surface dma command for backup or
198  * restoration of the surface represented by @srf.
199  */
200 static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
201 {
202 	return srf->num_sizes * sizeof(struct vmw_surface_dma);
203 }
204 
205 
206 /**
207  * vmw_surface_define_size - Compute fifo size for a surface define command.
208  *
209  * @srf: Pointer to a struct vmw_surface
210  *
211  * Computes the required size for a surface define command for the definition
212  * of the surface represented by @srf.
213  */
214 static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
215 {
216 	return sizeof(struct vmw_surface_define) + srf->num_sizes *
217 		sizeof(SVGA3dSize);
218 }
219 
220 
221 /**
222  * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
223  *
224  * Computes the required size for a surface destroy command for the destruction
225  * of a hw surface.
226  */
227 static inline uint32_t vmw_surface_destroy_size(void)
228 {
229 	return sizeof(struct vmw_surface_destroy);
230 }
231 
232 /**
233  * vmw_surface_destroy_encode - Encode a surface_destroy command.
234  *
235  * @id: The surface id
236  * @cmd_space: Pointer to memory area in which the commands should be encoded.
237  */
238 static void vmw_surface_destroy_encode(uint32_t id,
239 				       void *cmd_space)
240 {
241 	struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
242 		cmd_space;
243 
244 	cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
245 	cmd->header.size = sizeof(cmd->body);
246 	cmd->body.sid = id;
247 }
248 
249 /**
250  * vmw_surface_define_encode - Encode a surface_define command.
251  *
252  * @srf: Pointer to a struct vmw_surface object.
253  * @cmd_space: Pointer to memory area in which the commands should be encoded.
254  */
255 static void vmw_surface_define_encode(const struct vmw_surface *srf,
256 				      void *cmd_space)
257 {
258 	struct vmw_surface_define *cmd = (struct vmw_surface_define *)
259 		cmd_space;
260 	struct drm_vmw_size *src_size;
261 	SVGA3dSize *cmd_size;
262 	uint32_t cmd_len;
263 	int i;
264 
265 	cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
266 
267 	cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
268 	cmd->header.size = cmd_len;
269 	cmd->body.sid = srf->res.id;
270 	/*
271 	 * Downcast of surfaceFlags, was upcasted when received from user-space,
272 	 * since driver internally stores as 64 bit.
273 	 * For legacy surface define only 32 bit flag is supported.
274 	 */
275 	cmd->body.surfaceFlags = (SVGA3dSurface1Flags)srf->flags;
276 	cmd->body.format = srf->format;
277 	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
278 		cmd->body.face[i].numMipLevels = srf->mip_levels[i];
279 
280 	cmd += 1;
281 	cmd_size = (SVGA3dSize *) cmd;
282 	src_size = srf->sizes;
283 
284 	for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
285 		cmd_size->width = src_size->width;
286 		cmd_size->height = src_size->height;
287 		cmd_size->depth = src_size->depth;
288 	}
289 }
290 
291 /**
292  * vmw_surface_dma_encode - Encode a surface_dma command.
293  *
294  * @srf: Pointer to a struct vmw_surface object.
295  * @cmd_space: Pointer to memory area in which the commands should be encoded.
296  * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
297  * should be placed or read from.
298  * @to_surface: Boolean whether to DMA to the surface or from the surface.
299  */
300 static void vmw_surface_dma_encode(struct vmw_surface *srf,
301 				   void *cmd_space,
302 				   const SVGAGuestPtr *ptr,
303 				   bool to_surface)
304 {
305 	uint32_t i;
306 	struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
307 	const struct svga3d_surface_desc *desc =
308 		svga3dsurface_get_desc(srf->format);
309 
310 	for (i = 0; i < srf->num_sizes; ++i) {
311 		SVGA3dCmdHeader *header = &cmd->header;
312 		SVGA3dCmdSurfaceDMA *body = &cmd->body;
313 		SVGA3dCopyBox *cb = &cmd->cb;
314 		SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
315 		const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
316 		const struct drm_vmw_size *cur_size = &srf->sizes[i];
317 
318 		header->id = SVGA_3D_CMD_SURFACE_DMA;
319 		header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
320 
321 		body->guest.ptr = *ptr;
322 		body->guest.ptr.offset += cur_offset->bo_offset;
323 		body->guest.pitch = svga3dsurface_calculate_pitch(desc,
324 								  cur_size);
325 		body->host.sid = srf->res.id;
326 		body->host.face = cur_offset->face;
327 		body->host.mipmap = cur_offset->mip;
328 		body->transfer = ((to_surface) ?  SVGA3D_WRITE_HOST_VRAM :
329 				  SVGA3D_READ_HOST_VRAM);
330 		cb->x = 0;
331 		cb->y = 0;
332 		cb->z = 0;
333 		cb->srcx = 0;
334 		cb->srcy = 0;
335 		cb->srcz = 0;
336 		cb->w = cur_size->width;
337 		cb->h = cur_size->height;
338 		cb->d = cur_size->depth;
339 
340 		suffix->suffixSize = sizeof(*suffix);
341 		suffix->maximumOffset =
342 			svga3dsurface_get_image_buffer_size(desc, cur_size,
343 							    body->guest.pitch);
344 		suffix->flags.discard = 0;
345 		suffix->flags.unsynchronized = 0;
346 		suffix->flags.reserved = 0;
347 		++cmd;
348 	}
349 };
350 
351 
352 /**
353  * vmw_hw_surface_destroy - destroy a Device surface
354  *
355  * @res:        Pointer to a struct vmw_resource embedded in a struct
356  *              vmw_surface.
357  *
358  * Destroys a the device surface associated with a struct vmw_surface if
359  * any, and adjusts accounting and resource count accordingly.
360  */
361 static void vmw_hw_surface_destroy(struct vmw_resource *res)
362 {
363 
364 	struct vmw_private *dev_priv = res->dev_priv;
365 	struct vmw_surface *srf;
366 	void *cmd;
367 
368 	if (res->func->destroy == vmw_gb_surface_destroy) {
369 		(void) vmw_gb_surface_destroy(res);
370 		return;
371 	}
372 
373 	if (res->id != -1) {
374 
375 		cmd = VMW_FIFO_RESERVE(dev_priv, vmw_surface_destroy_size());
376 		if (unlikely(!cmd))
377 			return;
378 
379 		vmw_surface_destroy_encode(res->id, cmd);
380 		vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
381 
382 		/*
383 		 * used_memory_size_atomic, or separate lock
384 		 * to avoid taking dev_priv::cmdbuf_mutex in
385 		 * the destroy path.
386 		 */
387 
388 		mutex_lock(&dev_priv->cmdbuf_mutex);
389 		srf = vmw_res_to_srf(res);
390 		dev_priv->used_memory_size -= res->backup_size;
391 		mutex_unlock(&dev_priv->cmdbuf_mutex);
392 	}
393 }
394 
395 /**
396  * vmw_legacy_srf_create - Create a device surface as part of the
397  * resource validation process.
398  *
399  * @res: Pointer to a struct vmw_surface.
400  *
401  * If the surface doesn't have a hw id.
402  *
403  * Returns -EBUSY if there wasn't sufficient device resources to
404  * complete the validation. Retry after freeing up resources.
405  *
406  * May return other errors if the kernel is out of guest resources.
407  */
408 static int vmw_legacy_srf_create(struct vmw_resource *res)
409 {
410 	struct vmw_private *dev_priv = res->dev_priv;
411 	struct vmw_surface *srf;
412 	uint32_t submit_size;
413 	uint8_t *cmd;
414 	int ret;
415 
416 	if (likely(res->id != -1))
417 		return 0;
418 
419 	srf = vmw_res_to_srf(res);
420 	if (unlikely(dev_priv->used_memory_size + res->backup_size >=
421 		     dev_priv->memory_size))
422 		return -EBUSY;
423 
424 	/*
425 	 * Alloc id for the resource.
426 	 */
427 
428 	ret = vmw_resource_alloc_id(res);
429 	if (unlikely(ret != 0)) {
430 		DRM_ERROR("Failed to allocate a surface id.\n");
431 		goto out_no_id;
432 	}
433 
434 	if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
435 		ret = -EBUSY;
436 		goto out_no_fifo;
437 	}
438 
439 	/*
440 	 * Encode surface define- commands.
441 	 */
442 
443 	submit_size = vmw_surface_define_size(srf);
444 	cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
445 	if (unlikely(!cmd)) {
446 		ret = -ENOMEM;
447 		goto out_no_fifo;
448 	}
449 
450 	vmw_surface_define_encode(srf, cmd);
451 	vmw_fifo_commit(dev_priv, submit_size);
452 	vmw_fifo_resource_inc(dev_priv);
453 
454 	/*
455 	 * Surface memory usage accounting.
456 	 */
457 
458 	dev_priv->used_memory_size += res->backup_size;
459 	return 0;
460 
461 out_no_fifo:
462 	vmw_resource_release_id(res);
463 out_no_id:
464 	return ret;
465 }
466 
467 /**
468  * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
469  *
470  * @res:            Pointer to a struct vmw_res embedded in a struct
471  *                  vmw_surface.
472  * @val_buf:        Pointer to a struct ttm_validate_buffer containing
473  *                  information about the backup buffer.
474  * @bind:           Boolean wether to DMA to the surface.
475  *
476  * Transfer backup data to or from a legacy surface as part of the
477  * validation process.
478  * May return other errors if the kernel is out of guest resources.
479  * The backup buffer will be fenced or idle upon successful completion,
480  * and if the surface needs persistent backup storage, the backup buffer
481  * will also be returned reserved iff @bind is true.
482  */
483 static int vmw_legacy_srf_dma(struct vmw_resource *res,
484 			      struct ttm_validate_buffer *val_buf,
485 			      bool bind)
486 {
487 	SVGAGuestPtr ptr;
488 	struct vmw_fence_obj *fence;
489 	uint32_t submit_size;
490 	struct vmw_surface *srf = vmw_res_to_srf(res);
491 	uint8_t *cmd;
492 	struct vmw_private *dev_priv = res->dev_priv;
493 
494 	BUG_ON(!val_buf->bo);
495 	submit_size = vmw_surface_dma_size(srf);
496 	cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
497 	if (unlikely(!cmd))
498 		return -ENOMEM;
499 
500 	vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
501 	vmw_surface_dma_encode(srf, cmd, &ptr, bind);
502 
503 	vmw_fifo_commit(dev_priv, submit_size);
504 
505 	/*
506 	 * Create a fence object and fence the backup buffer.
507 	 */
508 
509 	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
510 					  &fence, NULL);
511 
512 	vmw_bo_fence_single(val_buf->bo, fence);
513 
514 	if (likely(fence != NULL))
515 		vmw_fence_obj_unreference(&fence);
516 
517 	return 0;
518 }
519 
520 /**
521  * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
522  *                       surface validation process.
523  *
524  * @res:            Pointer to a struct vmw_res embedded in a struct
525  *                  vmw_surface.
526  * @val_buf:        Pointer to a struct ttm_validate_buffer containing
527  *                  information about the backup buffer.
528  *
529  * This function will copy backup data to the surface if the
530  * backup buffer is dirty.
531  */
532 static int vmw_legacy_srf_bind(struct vmw_resource *res,
533 			       struct ttm_validate_buffer *val_buf)
534 {
535 	if (!res->backup_dirty)
536 		return 0;
537 
538 	return vmw_legacy_srf_dma(res, val_buf, true);
539 }
540 
541 
542 /**
543  * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
544  *                         surface eviction process.
545  *
546  * @res:            Pointer to a struct vmw_res embedded in a struct
547  *                  vmw_surface.
548  * @val_buf:        Pointer to a struct ttm_validate_buffer containing
549  *                  information about the backup buffer.
550  *
551  * This function will copy backup data from the surface.
552  */
553 static int vmw_legacy_srf_unbind(struct vmw_resource *res,
554 				 bool readback,
555 				 struct ttm_validate_buffer *val_buf)
556 {
557 	if (unlikely(readback))
558 		return vmw_legacy_srf_dma(res, val_buf, false);
559 	return 0;
560 }
561 
562 /**
563  * vmw_legacy_srf_destroy - Destroy a device surface as part of a
564  *                          resource eviction process.
565  *
566  * @res:            Pointer to a struct vmw_res embedded in a struct
567  *                  vmw_surface.
568  */
569 static int vmw_legacy_srf_destroy(struct vmw_resource *res)
570 {
571 	struct vmw_private *dev_priv = res->dev_priv;
572 	uint32_t submit_size;
573 	uint8_t *cmd;
574 
575 	BUG_ON(res->id == -1);
576 
577 	/*
578 	 * Encode the dma- and surface destroy commands.
579 	 */
580 
581 	submit_size = vmw_surface_destroy_size();
582 	cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
583 	if (unlikely(!cmd))
584 		return -ENOMEM;
585 
586 	vmw_surface_destroy_encode(res->id, cmd);
587 	vmw_fifo_commit(dev_priv, submit_size);
588 
589 	/*
590 	 * Surface memory usage accounting.
591 	 */
592 
593 	dev_priv->used_memory_size -= res->backup_size;
594 
595 	/*
596 	 * Release the surface ID.
597 	 */
598 
599 	vmw_resource_release_id(res);
600 	vmw_fifo_resource_dec(dev_priv);
601 
602 	return 0;
603 }
604 
605 
606 /**
607  * vmw_surface_init - initialize a struct vmw_surface
608  *
609  * @dev_priv:       Pointer to a device private struct.
610  * @srf:            Pointer to the struct vmw_surface to initialize.
611  * @res_free:       Pointer to a resource destructor used to free
612  *                  the object.
613  */
614 static int vmw_surface_init(struct vmw_private *dev_priv,
615 			    struct vmw_surface *srf,
616 			    void (*res_free) (struct vmw_resource *res))
617 {
618 	int ret;
619 	struct vmw_resource *res = &srf->res;
620 
621 	BUG_ON(!res_free);
622 	ret = vmw_resource_init(dev_priv, res, true, res_free,
623 				(dev_priv->has_mob) ? &vmw_gb_surface_func :
624 				&vmw_legacy_surface_func);
625 
626 	if (unlikely(ret != 0)) {
627 		res_free(res);
628 		return ret;
629 	}
630 
631 	/*
632 	 * The surface won't be visible to hardware until a
633 	 * surface validate.
634 	 */
635 
636 	INIT_LIST_HEAD(&srf->view_list);
637 	res->hw_destroy = vmw_hw_surface_destroy;
638 	return ret;
639 }
640 
641 /**
642  * vmw_user_surface_base_to_res - TTM base object to resource converter for
643  *                                user visible surfaces
644  *
645  * @base:           Pointer to a TTM base object
646  *
647  * Returns the struct vmw_resource embedded in a struct vmw_surface
648  * for the user-visible object identified by the TTM base object @base.
649  */
650 static struct vmw_resource *
651 vmw_user_surface_base_to_res(struct ttm_base_object *base)
652 {
653 	return &(container_of(base, struct vmw_user_surface,
654 			      prime.base)->srf.res);
655 }
656 
657 /**
658  * vmw_user_surface_free - User visible surface resource destructor
659  *
660  * @res:            A struct vmw_resource embedded in a struct vmw_surface.
661  */
662 static void vmw_user_surface_free(struct vmw_resource *res)
663 {
664 	struct vmw_surface *srf = vmw_res_to_srf(res);
665 	struct vmw_user_surface *user_srf =
666 	    container_of(srf, struct vmw_user_surface, srf);
667 	struct vmw_private *dev_priv = srf->res.dev_priv;
668 	uint32_t size = user_srf->size;
669 
670 	WARN_ON_ONCE(res->dirty);
671 	if (user_srf->master)
672 		drm_master_put(&user_srf->master);
673 	kfree(srf->offsets);
674 	kfree(srf->sizes);
675 	kfree(srf->snooper.image);
676 	ttm_prime_object_kfree(user_srf, prime);
677 	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
678 }
679 
680 /**
681  * vmw_user_surface_free - User visible surface TTM base object destructor
682  *
683  * @p_base:         Pointer to a pointer to a TTM base object
684  *                  embedded in a struct vmw_user_surface.
685  *
686  * Drops the base object's reference on its resource, and the
687  * pointer pointed to by *p_base is set to NULL.
688  */
689 static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
690 {
691 	struct ttm_base_object *base = *p_base;
692 	struct vmw_user_surface *user_srf =
693 	    container_of(base, struct vmw_user_surface, prime.base);
694 	struct vmw_resource *res = &user_srf->srf.res;
695 
696 	*p_base = NULL;
697 	if (user_srf->backup_base)
698 		ttm_base_object_unref(&user_srf->backup_base);
699 	vmw_resource_unreference(&res);
700 }
701 
702 /**
703  * vmw_user_surface_destroy_ioctl - Ioctl function implementing
704  *                                  the user surface destroy functionality.
705  *
706  * @dev:            Pointer to a struct drm_device.
707  * @data:           Pointer to data copied from / to user-space.
708  * @file_priv:      Pointer to a drm file private structure.
709  */
710 int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
711 			      struct drm_file *file_priv)
712 {
713 	struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
714 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
715 
716 	return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
717 }
718 
719 /**
720  * vmw_user_surface_define_ioctl - Ioctl function implementing
721  *                                  the user surface define functionality.
722  *
723  * @dev:            Pointer to a struct drm_device.
724  * @data:           Pointer to data copied from / to user-space.
725  * @file_priv:      Pointer to a drm file private structure.
726  */
727 int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
728 			     struct drm_file *file_priv)
729 {
730 	struct vmw_private *dev_priv = vmw_priv(dev);
731 	struct vmw_user_surface *user_srf;
732 	struct vmw_surface *srf;
733 	struct vmw_resource *res;
734 	struct vmw_resource *tmp;
735 	union drm_vmw_surface_create_arg *arg =
736 	    (union drm_vmw_surface_create_arg *)data;
737 	struct drm_vmw_surface_create_req *req = &arg->req;
738 	struct drm_vmw_surface_arg *rep = &arg->rep;
739 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
740 	struct ttm_operation_ctx ctx = {
741 		.interruptible = true,
742 		.no_wait_gpu = false
743 	};
744 	int ret;
745 	int i, j;
746 	uint32_t cur_bo_offset;
747 	struct drm_vmw_size *cur_size;
748 	struct vmw_surface_offset *cur_offset;
749 	uint32_t num_sizes;
750 	uint32_t size;
751 	const struct svga3d_surface_desc *desc;
752 
753 	if (unlikely(vmw_user_surface_size == 0))
754 		vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
755 			VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
756 
757 	num_sizes = 0;
758 	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
759 		if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
760 			return -EINVAL;
761 		num_sizes += req->mip_levels[i];
762 	}
763 
764 	if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS ||
765 	    num_sizes == 0)
766 		return -EINVAL;
767 
768 	size = vmw_user_surface_size +
769 		ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
770 		ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
771 
772 	desc = svga3dsurface_get_desc(req->format);
773 	if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
774 		VMW_DEBUG_USER("Invalid format %d for surface creation.\n",
775 			       req->format);
776 		return -EINVAL;
777 	}
778 
779 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
780 	if (unlikely(ret != 0))
781 		return ret;
782 
783 	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
784 				   size, &ctx);
785 	if (unlikely(ret != 0)) {
786 		if (ret != -ERESTARTSYS)
787 			DRM_ERROR("Out of graphics memory for surface.\n");
788 		goto out_unlock;
789 	}
790 
791 	user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
792 	if (unlikely(!user_srf)) {
793 		ret = -ENOMEM;
794 		goto out_no_user_srf;
795 	}
796 
797 	srf = &user_srf->srf;
798 	res = &srf->res;
799 
800 	/* Driver internally stores as 64-bit flags */
801 	srf->flags = (SVGA3dSurfaceAllFlags)req->flags;
802 	srf->format = req->format;
803 	srf->scanout = req->scanout;
804 
805 	memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
806 	srf->num_sizes = num_sizes;
807 	user_srf->size = size;
808 	srf->sizes = memdup_user((struct drm_vmw_size __user *)(unsigned long)
809 				 req->size_addr,
810 				 sizeof(*srf->sizes) * srf->num_sizes);
811 	if (IS_ERR(srf->sizes)) {
812 		ret = PTR_ERR(srf->sizes);
813 		goto out_no_sizes;
814 	}
815 	srf->offsets = kmalloc_array(srf->num_sizes,
816 				     sizeof(*srf->offsets),
817 				     GFP_KERNEL);
818 	if (unlikely(!srf->offsets)) {
819 		ret = -ENOMEM;
820 		goto out_no_offsets;
821 	}
822 
823 	srf->base_size = *srf->sizes;
824 	srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
825 	srf->multisample_count = 0;
826 	srf->multisample_pattern = SVGA3D_MS_PATTERN_NONE;
827 	srf->quality_level = SVGA3D_MS_QUALITY_NONE;
828 
829 	cur_bo_offset = 0;
830 	cur_offset = srf->offsets;
831 	cur_size = srf->sizes;
832 
833 	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
834 		for (j = 0; j < srf->mip_levels[i]; ++j) {
835 			uint32_t stride = svga3dsurface_calculate_pitch
836 				(desc, cur_size);
837 
838 			cur_offset->face = i;
839 			cur_offset->mip = j;
840 			cur_offset->bo_offset = cur_bo_offset;
841 			cur_bo_offset += svga3dsurface_get_image_buffer_size
842 				(desc, cur_size, stride);
843 			++cur_offset;
844 			++cur_size;
845 		}
846 	}
847 	res->backup_size = cur_bo_offset;
848 	if (srf->scanout &&
849 	    srf->num_sizes == 1 &&
850 	    srf->sizes[0].width == 64 &&
851 	    srf->sizes[0].height == 64 &&
852 	    srf->format == SVGA3D_A8R8G8B8) {
853 
854 		srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL);
855 		if (!srf->snooper.image) {
856 			DRM_ERROR("Failed to allocate cursor_image\n");
857 			ret = -ENOMEM;
858 			goto out_no_copy;
859 		}
860 	} else {
861 		srf->snooper.image = NULL;
862 	}
863 
864 	user_srf->prime.base.shareable = false;
865 	user_srf->prime.base.tfile = NULL;
866 	if (drm_is_primary_client(file_priv))
867 		user_srf->master = drm_master_get(file_priv->master);
868 
869 	/**
870 	 * From this point, the generic resource management functions
871 	 * destroy the object on failure.
872 	 */
873 
874 	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
875 	if (unlikely(ret != 0))
876 		goto out_unlock;
877 
878 	/*
879 	 * A gb-aware client referencing a shared surface will
880 	 * expect a backup buffer to be present.
881 	 */
882 	if (dev_priv->has_mob && req->shareable) {
883 		uint32_t backup_handle;
884 
885 		ret = vmw_user_bo_alloc(dev_priv, tfile,
886 					res->backup_size,
887 					true,
888 					&backup_handle,
889 					&res->backup,
890 					&user_srf->backup_base);
891 		if (unlikely(ret != 0)) {
892 			vmw_resource_unreference(&res);
893 			goto out_unlock;
894 		}
895 	}
896 
897 	tmp = vmw_resource_reference(&srf->res);
898 	ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
899 				    req->shareable, VMW_RES_SURFACE,
900 				    &vmw_user_surface_base_release, NULL);
901 
902 	if (unlikely(ret != 0)) {
903 		vmw_resource_unreference(&tmp);
904 		vmw_resource_unreference(&res);
905 		goto out_unlock;
906 	}
907 
908 	rep->sid = user_srf->prime.base.handle;
909 	vmw_resource_unreference(&res);
910 
911 	ttm_read_unlock(&dev_priv->reservation_sem);
912 	return 0;
913 out_no_copy:
914 	kfree(srf->offsets);
915 out_no_offsets:
916 	kfree(srf->sizes);
917 out_no_sizes:
918 	ttm_prime_object_kfree(user_srf, prime);
919 out_no_user_srf:
920 	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
921 out_unlock:
922 	ttm_read_unlock(&dev_priv->reservation_sem);
923 	return ret;
924 }
925 
926 
927 static int
928 vmw_surface_handle_reference(struct vmw_private *dev_priv,
929 			     struct drm_file *file_priv,
930 			     uint32_t u_handle,
931 			     enum drm_vmw_handle_type handle_type,
932 			     struct ttm_base_object **base_p)
933 {
934 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
935 	struct vmw_user_surface *user_srf;
936 	uint32_t handle;
937 	struct ttm_base_object *base;
938 	int ret;
939 	bool require_exist = false;
940 
941 	if (handle_type == DRM_VMW_HANDLE_PRIME) {
942 		ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
943 		if (unlikely(ret != 0))
944 			return ret;
945 	} else {
946 		if (unlikely(drm_is_render_client(file_priv)))
947 			require_exist = true;
948 
949 		handle = u_handle;
950 	}
951 
952 	ret = -EINVAL;
953 	base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle);
954 	if (unlikely(!base)) {
955 		VMW_DEBUG_USER("Could not find surface to reference.\n");
956 		goto out_no_lookup;
957 	}
958 
959 	if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) {
960 		VMW_DEBUG_USER("Referenced object is not a surface.\n");
961 		goto out_bad_resource;
962 	}
963 
964 	if (handle_type != DRM_VMW_HANDLE_PRIME) {
965 		user_srf = container_of(base, struct vmw_user_surface,
966 					prime.base);
967 
968 		/*
969 		 * Make sure the surface creator has the same
970 		 * authenticating master, or is already registered with us.
971 		 */
972 		if (drm_is_primary_client(file_priv) &&
973 		    user_srf->master != file_priv->master)
974 			require_exist = true;
975 
976 		ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL,
977 					 require_exist);
978 		if (unlikely(ret != 0)) {
979 			DRM_ERROR("Could not add a reference to a surface.\n");
980 			goto out_bad_resource;
981 		}
982 	}
983 
984 	*base_p = base;
985 	return 0;
986 
987 out_bad_resource:
988 	ttm_base_object_unref(&base);
989 out_no_lookup:
990 	if (handle_type == DRM_VMW_HANDLE_PRIME)
991 		(void) ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
992 
993 	return ret;
994 }
995 
996 /**
997  * vmw_user_surface_define_ioctl - Ioctl function implementing
998  *                                  the user surface reference functionality.
999  *
1000  * @dev:            Pointer to a struct drm_device.
1001  * @data:           Pointer to data copied from / to user-space.
1002  * @file_priv:      Pointer to a drm file private structure.
1003  */
1004 int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
1005 				struct drm_file *file_priv)
1006 {
1007 	struct vmw_private *dev_priv = vmw_priv(dev);
1008 	union drm_vmw_surface_reference_arg *arg =
1009 	    (union drm_vmw_surface_reference_arg *)data;
1010 	struct drm_vmw_surface_arg *req = &arg->req;
1011 	struct drm_vmw_surface_create_req *rep = &arg->rep;
1012 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1013 	struct vmw_surface *srf;
1014 	struct vmw_user_surface *user_srf;
1015 	struct drm_vmw_size __user *user_sizes;
1016 	struct ttm_base_object *base;
1017 	int ret;
1018 
1019 	ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
1020 					   req->handle_type, &base);
1021 	if (unlikely(ret != 0))
1022 		return ret;
1023 
1024 	user_srf = container_of(base, struct vmw_user_surface, prime.base);
1025 	srf = &user_srf->srf;
1026 
1027 	/* Downcast of flags when sending back to user space */
1028 	rep->flags = (uint32_t)srf->flags;
1029 	rep->format = srf->format;
1030 	memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
1031 	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
1032 	    rep->size_addr;
1033 
1034 	if (user_sizes)
1035 		ret = copy_to_user(user_sizes, &srf->base_size,
1036 				   sizeof(srf->base_size));
1037 	if (unlikely(ret != 0)) {
1038 		VMW_DEBUG_USER("copy_to_user failed %p %u\n", user_sizes,
1039 			       srf->num_sizes);
1040 		ttm_ref_object_base_unref(tfile, base->handle, TTM_REF_USAGE);
1041 		ret = -EFAULT;
1042 	}
1043 
1044 	ttm_base_object_unref(&base);
1045 
1046 	return ret;
1047 }
1048 
1049 /**
1050  * vmw_surface_define_encode - Encode a surface_define command.
1051  *
1052  * @srf: Pointer to a struct vmw_surface object.
1053  * @cmd_space: Pointer to memory area in which the commands should be encoded.
1054  */
1055 static int vmw_gb_surface_create(struct vmw_resource *res)
1056 {
1057 	struct vmw_private *dev_priv = res->dev_priv;
1058 	struct vmw_surface *srf = vmw_res_to_srf(res);
1059 	uint32_t cmd_len, cmd_id, submit_len;
1060 	int ret;
1061 	struct {
1062 		SVGA3dCmdHeader header;
1063 		SVGA3dCmdDefineGBSurface body;
1064 	} *cmd;
1065 	struct {
1066 		SVGA3dCmdHeader header;
1067 		SVGA3dCmdDefineGBSurface_v2 body;
1068 	} *cmd2;
1069 	struct {
1070 		SVGA3dCmdHeader header;
1071 		SVGA3dCmdDefineGBSurface_v3 body;
1072 	} *cmd3;
1073 
1074 	if (likely(res->id != -1))
1075 		return 0;
1076 
1077 	vmw_fifo_resource_inc(dev_priv);
1078 	ret = vmw_resource_alloc_id(res);
1079 	if (unlikely(ret != 0)) {
1080 		DRM_ERROR("Failed to allocate a surface id.\n");
1081 		goto out_no_id;
1082 	}
1083 
1084 	if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) {
1085 		ret = -EBUSY;
1086 		goto out_no_fifo;
1087 	}
1088 
1089 	if (dev_priv->has_sm4_1 && srf->array_size > 0) {
1090 		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V3;
1091 		cmd_len = sizeof(cmd3->body);
1092 		submit_len = sizeof(*cmd3);
1093 	} else if (srf->array_size > 0) {
1094 		/* has_dx checked on creation time. */
1095 		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2;
1096 		cmd_len = sizeof(cmd2->body);
1097 		submit_len = sizeof(*cmd2);
1098 	} else {
1099 		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
1100 		cmd_len = sizeof(cmd->body);
1101 		submit_len = sizeof(*cmd);
1102 	}
1103 
1104 	cmd = VMW_FIFO_RESERVE(dev_priv, submit_len);
1105 	cmd2 = (typeof(cmd2))cmd;
1106 	cmd3 = (typeof(cmd3))cmd;
1107 	if (unlikely(!cmd)) {
1108 		ret = -ENOMEM;
1109 		goto out_no_fifo;
1110 	}
1111 
1112 	if (dev_priv->has_sm4_1 && srf->array_size > 0) {
1113 		cmd3->header.id = cmd_id;
1114 		cmd3->header.size = cmd_len;
1115 		cmd3->body.sid = srf->res.id;
1116 		cmd3->body.surfaceFlags = srf->flags;
1117 		cmd3->body.format = srf->format;
1118 		cmd3->body.numMipLevels = srf->mip_levels[0];
1119 		cmd3->body.multisampleCount = srf->multisample_count;
1120 		cmd3->body.multisamplePattern = srf->multisample_pattern;
1121 		cmd3->body.qualityLevel = srf->quality_level;
1122 		cmd3->body.autogenFilter = srf->autogen_filter;
1123 		cmd3->body.size.width = srf->base_size.width;
1124 		cmd3->body.size.height = srf->base_size.height;
1125 		cmd3->body.size.depth = srf->base_size.depth;
1126 		cmd3->body.arraySize = srf->array_size;
1127 	} else if (srf->array_size > 0) {
1128 		cmd2->header.id = cmd_id;
1129 		cmd2->header.size = cmd_len;
1130 		cmd2->body.sid = srf->res.id;
1131 		cmd2->body.surfaceFlags = srf->flags;
1132 		cmd2->body.format = srf->format;
1133 		cmd2->body.numMipLevels = srf->mip_levels[0];
1134 		cmd2->body.multisampleCount = srf->multisample_count;
1135 		cmd2->body.autogenFilter = srf->autogen_filter;
1136 		cmd2->body.size.width = srf->base_size.width;
1137 		cmd2->body.size.height = srf->base_size.height;
1138 		cmd2->body.size.depth = srf->base_size.depth;
1139 		cmd2->body.arraySize = srf->array_size;
1140 	} else {
1141 		cmd->header.id = cmd_id;
1142 		cmd->header.size = cmd_len;
1143 		cmd->body.sid = srf->res.id;
1144 		cmd->body.surfaceFlags = srf->flags;
1145 		cmd->body.format = srf->format;
1146 		cmd->body.numMipLevels = srf->mip_levels[0];
1147 		cmd->body.multisampleCount = srf->multisample_count;
1148 		cmd->body.autogenFilter = srf->autogen_filter;
1149 		cmd->body.size.width = srf->base_size.width;
1150 		cmd->body.size.height = srf->base_size.height;
1151 		cmd->body.size.depth = srf->base_size.depth;
1152 	}
1153 
1154 	vmw_fifo_commit(dev_priv, submit_len);
1155 
1156 	return 0;
1157 
1158 out_no_fifo:
1159 	vmw_resource_release_id(res);
1160 out_no_id:
1161 	vmw_fifo_resource_dec(dev_priv);
1162 	return ret;
1163 }
1164 
1165 
1166 static int vmw_gb_surface_bind(struct vmw_resource *res,
1167 			       struct ttm_validate_buffer *val_buf)
1168 {
1169 	struct vmw_private *dev_priv = res->dev_priv;
1170 	struct {
1171 		SVGA3dCmdHeader header;
1172 		SVGA3dCmdBindGBSurface body;
1173 	} *cmd1;
1174 	struct {
1175 		SVGA3dCmdHeader header;
1176 		SVGA3dCmdUpdateGBSurface body;
1177 	} *cmd2;
1178 	uint32_t submit_size;
1179 	struct ttm_buffer_object *bo = val_buf->bo;
1180 
1181 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
1182 
1183 	submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
1184 
1185 	cmd1 = VMW_FIFO_RESERVE(dev_priv, submit_size);
1186 	if (unlikely(!cmd1))
1187 		return -ENOMEM;
1188 
1189 	cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
1190 	cmd1->header.size = sizeof(cmd1->body);
1191 	cmd1->body.sid = res->id;
1192 	cmd1->body.mobid = bo->mem.start;
1193 	if (res->backup_dirty) {
1194 		cmd2 = (void *) &cmd1[1];
1195 		cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
1196 		cmd2->header.size = sizeof(cmd2->body);
1197 		cmd2->body.sid = res->id;
1198 	}
1199 	vmw_fifo_commit(dev_priv, submit_size);
1200 
1201 	if (res->backup->dirty && res->backup_dirty) {
1202 		/* We've just made a full upload. Cear dirty regions. */
1203 		vmw_bo_dirty_clear_res(res);
1204 	}
1205 
1206 	res->backup_dirty = false;
1207 
1208 	return 0;
1209 }
1210 
1211 static int vmw_gb_surface_unbind(struct vmw_resource *res,
1212 				 bool readback,
1213 				 struct ttm_validate_buffer *val_buf)
1214 {
1215 	struct vmw_private *dev_priv = res->dev_priv;
1216 	struct ttm_buffer_object *bo = val_buf->bo;
1217 	struct vmw_fence_obj *fence;
1218 
1219 	struct {
1220 		SVGA3dCmdHeader header;
1221 		SVGA3dCmdReadbackGBSurface body;
1222 	} *cmd1;
1223 	struct {
1224 		SVGA3dCmdHeader header;
1225 		SVGA3dCmdInvalidateGBSurface body;
1226 	} *cmd2;
1227 	struct {
1228 		SVGA3dCmdHeader header;
1229 		SVGA3dCmdBindGBSurface body;
1230 	} *cmd3;
1231 	uint32_t submit_size;
1232 	uint8_t *cmd;
1233 
1234 
1235 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
1236 
1237 	submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
1238 	cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
1239 	if (unlikely(!cmd))
1240 		return -ENOMEM;
1241 
1242 	if (readback) {
1243 		cmd1 = (void *) cmd;
1244 		cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
1245 		cmd1->header.size = sizeof(cmd1->body);
1246 		cmd1->body.sid = res->id;
1247 		cmd3 = (void *) &cmd1[1];
1248 	} else {
1249 		cmd2 = (void *) cmd;
1250 		cmd2->header.id = SVGA_3D_CMD_INVALIDATE_GB_SURFACE;
1251 		cmd2->header.size = sizeof(cmd2->body);
1252 		cmd2->body.sid = res->id;
1253 		cmd3 = (void *) &cmd2[1];
1254 	}
1255 
1256 	cmd3->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
1257 	cmd3->header.size = sizeof(cmd3->body);
1258 	cmd3->body.sid = res->id;
1259 	cmd3->body.mobid = SVGA3D_INVALID_ID;
1260 
1261 	vmw_fifo_commit(dev_priv, submit_size);
1262 
1263 	/*
1264 	 * Create a fence object and fence the backup buffer.
1265 	 */
1266 
1267 	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
1268 					  &fence, NULL);
1269 
1270 	vmw_bo_fence_single(val_buf->bo, fence);
1271 
1272 	if (likely(fence != NULL))
1273 		vmw_fence_obj_unreference(&fence);
1274 
1275 	return 0;
1276 }
1277 
1278 static int vmw_gb_surface_destroy(struct vmw_resource *res)
1279 {
1280 	struct vmw_private *dev_priv = res->dev_priv;
1281 	struct vmw_surface *srf = vmw_res_to_srf(res);
1282 	struct {
1283 		SVGA3dCmdHeader header;
1284 		SVGA3dCmdDestroyGBSurface body;
1285 	} *cmd;
1286 
1287 	if (likely(res->id == -1))
1288 		return 0;
1289 
1290 	mutex_lock(&dev_priv->binding_mutex);
1291 	vmw_view_surface_list_destroy(dev_priv, &srf->view_list);
1292 	vmw_binding_res_list_scrub(&res->binding_head);
1293 
1294 	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
1295 	if (unlikely(!cmd)) {
1296 		mutex_unlock(&dev_priv->binding_mutex);
1297 		return -ENOMEM;
1298 	}
1299 
1300 	cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE;
1301 	cmd->header.size = sizeof(cmd->body);
1302 	cmd->body.sid = res->id;
1303 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
1304 	mutex_unlock(&dev_priv->binding_mutex);
1305 	vmw_resource_release_id(res);
1306 	vmw_fifo_resource_dec(dev_priv);
1307 
1308 	return 0;
1309 }
1310 
1311 
1312 /**
1313  * vmw_gb_surface_define_ioctl - Ioctl function implementing
1314  * the user surface define functionality.
1315  *
1316  * @dev: Pointer to a struct drm_device.
1317  * @data: Pointer to data copied from / to user-space.
1318  * @file_priv: Pointer to a drm file private structure.
1319  */
1320 int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1321 				struct drm_file *file_priv)
1322 {
1323 	union drm_vmw_gb_surface_create_arg *arg =
1324 	    (union drm_vmw_gb_surface_create_arg *)data;
1325 	struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
1326 	struct drm_vmw_gb_surface_create_ext_req req_ext;
1327 
1328 	req_ext.base = arg->req;
1329 	req_ext.version = drm_vmw_gb_surface_v1;
1330 	req_ext.svga3d_flags_upper_32_bits = 0;
1331 	req_ext.multisample_pattern = SVGA3D_MS_PATTERN_NONE;
1332 	req_ext.quality_level = SVGA3D_MS_QUALITY_NONE;
1333 	req_ext.must_be_zero = 0;
1334 
1335 	return vmw_gb_surface_define_internal(dev, &req_ext, rep, file_priv);
1336 }
1337 
1338 /**
1339  * vmw_gb_surface_reference_ioctl - Ioctl function implementing
1340  * the user surface reference functionality.
1341  *
1342  * @dev: Pointer to a struct drm_device.
1343  * @data: Pointer to data copied from / to user-space.
1344  * @file_priv: Pointer to a drm file private structure.
1345  */
1346 int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
1347 				   struct drm_file *file_priv)
1348 {
1349 	union drm_vmw_gb_surface_reference_arg *arg =
1350 	    (union drm_vmw_gb_surface_reference_arg *)data;
1351 	struct drm_vmw_surface_arg *req = &arg->req;
1352 	struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep;
1353 	struct drm_vmw_gb_surface_ref_ext_rep rep_ext;
1354 	int ret;
1355 
1356 	ret = vmw_gb_surface_reference_internal(dev, req, &rep_ext, file_priv);
1357 
1358 	if (unlikely(ret != 0))
1359 		return ret;
1360 
1361 	rep->creq = rep_ext.creq.base;
1362 	rep->crep = rep_ext.crep;
1363 
1364 	return ret;
1365 }
1366 
1367 /**
1368  * vmw_surface_gb_priv_define - Define a private GB surface
1369  *
1370  * @dev:  Pointer to a struct drm_device
1371  * @user_accounting_size:  Used to track user-space memory usage, set
1372  *                         to 0 for kernel mode only memory
1373  * @svga3d_flags: SVGA3d surface flags for the device
1374  * @format: requested surface format
1375  * @for_scanout: true if inteded to be used for scanout buffer
1376  * @num_mip_levels:  number of MIP levels
1377  * @multisample_count:
1378  * @array_size: Surface array size.
1379  * @size: width, heigh, depth of the surface requested
1380  * @multisample_pattern: Multisampling pattern when msaa is supported
1381  * @quality_level: Precision settings
1382  * @user_srf_out: allocated user_srf.  Set to NULL on failure.
1383  *
1384  * GB surfaces allocated by this function will not have a user mode handle, and
1385  * thus will only be visible to vmwgfx.  For optimization reasons the
1386  * surface may later be given a user mode handle by another function to make
1387  * it available to user mode drivers.
1388  */
1389 int vmw_surface_gb_priv_define(struct drm_device *dev,
1390 			       uint32_t user_accounting_size,
1391 			       SVGA3dSurfaceAllFlags svga3d_flags,
1392 			       SVGA3dSurfaceFormat format,
1393 			       bool for_scanout,
1394 			       uint32_t num_mip_levels,
1395 			       uint32_t multisample_count,
1396 			       uint32_t array_size,
1397 			       struct drm_vmw_size size,
1398 			       SVGA3dMSPattern multisample_pattern,
1399 			       SVGA3dMSQualityLevel quality_level,
1400 			       struct vmw_surface **srf_out)
1401 {
1402 	struct vmw_private *dev_priv = vmw_priv(dev);
1403 	struct vmw_user_surface *user_srf;
1404 	struct ttm_operation_ctx ctx = {
1405 		.interruptible = true,
1406 		.no_wait_gpu = false
1407 	};
1408 	struct vmw_surface *srf;
1409 	int ret;
1410 	u32 num_layers = 1;
1411 	u32 sample_count = 1;
1412 
1413 	*srf_out = NULL;
1414 
1415 	if (for_scanout) {
1416 		if (!svga3dsurface_is_screen_target_format(format)) {
1417 			VMW_DEBUG_USER("Invalid Screen Target surface format.");
1418 			return -EINVAL;
1419 		}
1420 
1421 		if (size.width > dev_priv->texture_max_width ||
1422 		    size.height > dev_priv->texture_max_height) {
1423 			VMW_DEBUG_USER("%ux%u\n, exceeds max surface size %ux%u",
1424 				       size.width, size.height,
1425 				       dev_priv->texture_max_width,
1426 				       dev_priv->texture_max_height);
1427 			return -EINVAL;
1428 		}
1429 	} else {
1430 		const struct svga3d_surface_desc *desc;
1431 
1432 		desc = svga3dsurface_get_desc(format);
1433 		if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
1434 			VMW_DEBUG_USER("Invalid surface format.\n");
1435 			return -EINVAL;
1436 		}
1437 	}
1438 
1439 	/* array_size must be null for non-GL3 host. */
1440 	if (array_size > 0 && !dev_priv->has_dx) {
1441 		VMW_DEBUG_USER("Tried to create DX surface on non-DX host.\n");
1442 		return -EINVAL;
1443 	}
1444 
1445 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1446 	if (unlikely(ret != 0))
1447 		return ret;
1448 
1449 	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
1450 				   user_accounting_size, &ctx);
1451 	if (unlikely(ret != 0)) {
1452 		if (ret != -ERESTARTSYS)
1453 			DRM_ERROR("Out of graphics memory for surface"
1454 				  " creation.\n");
1455 		goto out_unlock;
1456 	}
1457 
1458 	user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
1459 	if (unlikely(!user_srf)) {
1460 		ret = -ENOMEM;
1461 		goto out_no_user_srf;
1462 	}
1463 
1464 	*srf_out  = &user_srf->srf;
1465 	user_srf->size = user_accounting_size;
1466 	user_srf->prime.base.shareable = false;
1467 	user_srf->prime.base.tfile     = NULL;
1468 
1469 	srf = &user_srf->srf;
1470 	srf->flags             = svga3d_flags;
1471 	srf->format            = format;
1472 	srf->scanout           = for_scanout;
1473 	srf->mip_levels[0]     = num_mip_levels;
1474 	srf->num_sizes         = 1;
1475 	srf->sizes             = NULL;
1476 	srf->offsets           = NULL;
1477 	srf->base_size         = size;
1478 	srf->autogen_filter    = SVGA3D_TEX_FILTER_NONE;
1479 	srf->array_size        = array_size;
1480 	srf->multisample_count = multisample_count;
1481 	srf->multisample_pattern = multisample_pattern;
1482 	srf->quality_level = quality_level;
1483 
1484 	if (array_size)
1485 		num_layers = array_size;
1486 	else if (svga3d_flags & SVGA3D_SURFACE_CUBEMAP)
1487 		num_layers = SVGA3D_MAX_SURFACE_FACES;
1488 
1489 	if (srf->flags & SVGA3D_SURFACE_MULTISAMPLE)
1490 		sample_count = srf->multisample_count;
1491 
1492 	srf->res.backup_size   =
1493 		svga3dsurface_get_serialized_size_extended(srf->format,
1494 							   srf->base_size,
1495 							   srf->mip_levels[0],
1496 							   num_layers,
1497 							   sample_count);
1498 
1499 	if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
1500 		srf->res.backup_size += sizeof(SVGA3dDXSOState);
1501 
1502 	/*
1503 	 * Don't set SVGA3D_SURFACE_SCREENTARGET flag for a scanout surface with
1504 	 * size greater than STDU max width/height. This is really a workaround
1505 	 * to support creation of big framebuffer requested by some user-space
1506 	 * for whole topology. That big framebuffer won't really be used for
1507 	 * binding with screen target as during prepare_fb a separate surface is
1508 	 * created so it's safe to ignore SVGA3D_SURFACE_SCREENTARGET flag.
1509 	 */
1510 	if (dev_priv->active_display_unit == vmw_du_screen_target &&
1511 	    for_scanout && size.width <= dev_priv->stdu_max_width &&
1512 	    size.height <= dev_priv->stdu_max_height)
1513 		srf->flags |= SVGA3D_SURFACE_SCREENTARGET;
1514 
1515 	/*
1516 	 * From this point, the generic resource management functions
1517 	 * destroy the object on failure.
1518 	 */
1519 	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
1520 
1521 	ttm_read_unlock(&dev_priv->reservation_sem);
1522 	return ret;
1523 
1524 out_no_user_srf:
1525 	ttm_mem_global_free(vmw_mem_glob(dev_priv), user_accounting_size);
1526 
1527 out_unlock:
1528 	ttm_read_unlock(&dev_priv->reservation_sem);
1529 	return ret;
1530 }
1531 
1532 /**
1533  * vmw_gb_surface_define_ext_ioctl - Ioctl function implementing
1534  * the user surface define functionality.
1535  *
1536  * @dev: Pointer to a struct drm_device.
1537  * @data: Pointer to data copied from / to user-space.
1538  * @file_priv: Pointer to a drm file private structure.
1539  */
1540 int vmw_gb_surface_define_ext_ioctl(struct drm_device *dev, void *data,
1541 				struct drm_file *file_priv)
1542 {
1543 	union drm_vmw_gb_surface_create_ext_arg *arg =
1544 	    (union drm_vmw_gb_surface_create_ext_arg *)data;
1545 	struct drm_vmw_gb_surface_create_ext_req *req = &arg->req;
1546 	struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
1547 
1548 	return vmw_gb_surface_define_internal(dev, req, rep, file_priv);
1549 }
1550 
1551 /**
1552  * vmw_gb_surface_reference_ext_ioctl - Ioctl function implementing
1553  * the user surface reference functionality.
1554  *
1555  * @dev: Pointer to a struct drm_device.
1556  * @data: Pointer to data copied from / to user-space.
1557  * @file_priv: Pointer to a drm file private structure.
1558  */
1559 int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev, void *data,
1560 				   struct drm_file *file_priv)
1561 {
1562 	union drm_vmw_gb_surface_reference_ext_arg *arg =
1563 	    (union drm_vmw_gb_surface_reference_ext_arg *)data;
1564 	struct drm_vmw_surface_arg *req = &arg->req;
1565 	struct drm_vmw_gb_surface_ref_ext_rep *rep = &arg->rep;
1566 
1567 	return vmw_gb_surface_reference_internal(dev, req, rep, file_priv);
1568 }
1569 
1570 /**
1571  * vmw_gb_surface_define_internal - Ioctl function implementing
1572  * the user surface define functionality.
1573  *
1574  * @dev: Pointer to a struct drm_device.
1575  * @req: Request argument from user-space.
1576  * @rep: Response argument to user-space.
1577  * @file_priv: Pointer to a drm file private structure.
1578  */
1579 static int
1580 vmw_gb_surface_define_internal(struct drm_device *dev,
1581 			       struct drm_vmw_gb_surface_create_ext_req *req,
1582 			       struct drm_vmw_gb_surface_create_rep *rep,
1583 			       struct drm_file *file_priv)
1584 {
1585 	struct vmw_private *dev_priv = vmw_priv(dev);
1586 	struct vmw_user_surface *user_srf;
1587 	struct vmw_surface *srf;
1588 	struct vmw_resource *res;
1589 	struct vmw_resource *tmp;
1590 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1591 	int ret;
1592 	uint32_t size;
1593 	uint32_t backup_handle = 0;
1594 	SVGA3dSurfaceAllFlags svga3d_flags_64 =
1595 		SVGA3D_FLAGS_64(req->svga3d_flags_upper_32_bits,
1596 				req->base.svga3d_flags);
1597 
1598 	if (!dev_priv->has_sm4_1) {
1599 		/*
1600 		 * If SM4_1 is not support then cannot send 64-bit flag to
1601 		 * device.
1602 		 */
1603 		if (req->svga3d_flags_upper_32_bits != 0)
1604 			return -EINVAL;
1605 
1606 		if (req->base.multisample_count != 0)
1607 			return -EINVAL;
1608 
1609 		if (req->multisample_pattern != SVGA3D_MS_PATTERN_NONE)
1610 			return -EINVAL;
1611 
1612 		if (req->quality_level != SVGA3D_MS_QUALITY_NONE)
1613 			return -EINVAL;
1614 	}
1615 
1616 	if ((svga3d_flags_64 & SVGA3D_SURFACE_MULTISAMPLE) &&
1617 	    req->base.multisample_count == 0)
1618 		return -EINVAL;
1619 
1620 	if (req->base.mip_levels > DRM_VMW_MAX_MIP_LEVELS)
1621 		return -EINVAL;
1622 
1623 	if (unlikely(vmw_user_surface_size == 0))
1624 		vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
1625 			VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
1626 
1627 	size = vmw_user_surface_size;
1628 
1629 	/* Define a surface based on the parameters. */
1630 	ret = vmw_surface_gb_priv_define(dev,
1631 					 size,
1632 					 svga3d_flags_64,
1633 					 req->base.format,
1634 					 req->base.drm_surface_flags &
1635 					 drm_vmw_surface_flag_scanout,
1636 					 req->base.mip_levels,
1637 					 req->base.multisample_count,
1638 					 req->base.array_size,
1639 					 req->base.base_size,
1640 					 req->multisample_pattern,
1641 					 req->quality_level,
1642 					 &srf);
1643 	if (unlikely(ret != 0))
1644 		return ret;
1645 
1646 	user_srf = container_of(srf, struct vmw_user_surface, srf);
1647 	if (drm_is_primary_client(file_priv))
1648 		user_srf->master = drm_master_get(file_priv->master);
1649 
1650 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1651 	if (unlikely(ret != 0))
1652 		return ret;
1653 
1654 	res = &user_srf->srf.res;
1655 
1656 	if (req->base.buffer_handle != SVGA3D_INVALID_ID) {
1657 		ret = vmw_user_bo_lookup(tfile, req->base.buffer_handle,
1658 					 &res->backup,
1659 					 &user_srf->backup_base);
1660 		if (ret == 0) {
1661 			if (res->backup->base.num_pages * PAGE_SIZE <
1662 			    res->backup_size) {
1663 				VMW_DEBUG_USER("Surface backup buffer too small.\n");
1664 				vmw_bo_unreference(&res->backup);
1665 				ret = -EINVAL;
1666 				goto out_unlock;
1667 			} else {
1668 				backup_handle = req->base.buffer_handle;
1669 			}
1670 		}
1671 	} else if (req->base.drm_surface_flags &
1672 		   (drm_vmw_surface_flag_create_buffer |
1673 		    drm_vmw_surface_flag_coherent))
1674 		ret = vmw_user_bo_alloc(dev_priv, tfile,
1675 					res->backup_size,
1676 					req->base.drm_surface_flags &
1677 					drm_vmw_surface_flag_shareable,
1678 					&backup_handle,
1679 					&res->backup,
1680 					&user_srf->backup_base);
1681 
1682 	if (unlikely(ret != 0)) {
1683 		vmw_resource_unreference(&res);
1684 		goto out_unlock;
1685 	}
1686 
1687 	if (req->base.drm_surface_flags & drm_vmw_surface_flag_coherent) {
1688 		struct vmw_buffer_object *backup = res->backup;
1689 
1690 		ttm_bo_reserve(&backup->base, false, false, NULL);
1691 		if (!res->func->dirty_alloc)
1692 			ret = -EINVAL;
1693 		if (!ret)
1694 			ret = vmw_bo_dirty_add(backup);
1695 		if (!ret) {
1696 			res->coherent = true;
1697 			ret = res->func->dirty_alloc(res);
1698 		}
1699 		ttm_bo_unreserve(&backup->base);
1700 		if (ret) {
1701 			vmw_resource_unreference(&res);
1702 			goto out_unlock;
1703 		}
1704 
1705 	}
1706 
1707 	tmp = vmw_resource_reference(res);
1708 	ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
1709 				    req->base.drm_surface_flags &
1710 				    drm_vmw_surface_flag_shareable,
1711 				    VMW_RES_SURFACE,
1712 				    &vmw_user_surface_base_release, NULL);
1713 
1714 	if (unlikely(ret != 0)) {
1715 		vmw_resource_unreference(&tmp);
1716 		vmw_resource_unreference(&res);
1717 		goto out_unlock;
1718 	}
1719 
1720 	rep->handle      = user_srf->prime.base.handle;
1721 	rep->backup_size = res->backup_size;
1722 	if (res->backup) {
1723 		rep->buffer_map_handle =
1724 			drm_vma_node_offset_addr(&res->backup->base.vma_node);
1725 		rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
1726 		rep->buffer_handle = backup_handle;
1727 	} else {
1728 		rep->buffer_map_handle = 0;
1729 		rep->buffer_size = 0;
1730 		rep->buffer_handle = SVGA3D_INVALID_ID;
1731 	}
1732 
1733 	vmw_resource_unreference(&res);
1734 
1735 out_unlock:
1736 	ttm_read_unlock(&dev_priv->reservation_sem);
1737 	return ret;
1738 }
1739 
1740 /**
1741  * vmw_gb_surface_reference_internal - Ioctl function implementing
1742  * the user surface reference functionality.
1743  *
1744  * @dev: Pointer to a struct drm_device.
1745  * @req: Pointer to user-space request surface arg.
1746  * @rep: Pointer to response to user-space.
1747  * @file_priv: Pointer to a drm file private structure.
1748  */
1749 static int
1750 vmw_gb_surface_reference_internal(struct drm_device *dev,
1751 				  struct drm_vmw_surface_arg *req,
1752 				  struct drm_vmw_gb_surface_ref_ext_rep *rep,
1753 				  struct drm_file *file_priv)
1754 {
1755 	struct vmw_private *dev_priv = vmw_priv(dev);
1756 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1757 	struct vmw_surface *srf;
1758 	struct vmw_user_surface *user_srf;
1759 	struct ttm_base_object *base;
1760 	uint32_t backup_handle;
1761 	int ret = -EINVAL;
1762 
1763 	ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
1764 					   req->handle_type, &base);
1765 	if (unlikely(ret != 0))
1766 		return ret;
1767 
1768 	user_srf = container_of(base, struct vmw_user_surface, prime.base);
1769 	srf = &user_srf->srf;
1770 	if (!srf->res.backup) {
1771 		DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
1772 		goto out_bad_resource;
1773 	}
1774 
1775 	mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
1776 	ret = vmw_user_bo_reference(tfile, srf->res.backup, &backup_handle);
1777 	mutex_unlock(&dev_priv->cmdbuf_mutex);
1778 
1779 	if (unlikely(ret != 0)) {
1780 		DRM_ERROR("Could not add a reference to a GB surface "
1781 			  "backup buffer.\n");
1782 		(void) ttm_ref_object_base_unref(tfile, base->handle,
1783 						 TTM_REF_USAGE);
1784 		goto out_bad_resource;
1785 	}
1786 
1787 	rep->creq.base.svga3d_flags = SVGA3D_FLAGS_LOWER_32(srf->flags);
1788 	rep->creq.base.format = srf->format;
1789 	rep->creq.base.mip_levels = srf->mip_levels[0];
1790 	rep->creq.base.drm_surface_flags = 0;
1791 	rep->creq.base.multisample_count = srf->multisample_count;
1792 	rep->creq.base.autogen_filter = srf->autogen_filter;
1793 	rep->creq.base.array_size = srf->array_size;
1794 	rep->creq.base.buffer_handle = backup_handle;
1795 	rep->creq.base.base_size = srf->base_size;
1796 	rep->crep.handle = user_srf->prime.base.handle;
1797 	rep->crep.backup_size = srf->res.backup_size;
1798 	rep->crep.buffer_handle = backup_handle;
1799 	rep->crep.buffer_map_handle =
1800 		drm_vma_node_offset_addr(&srf->res.backup->base.vma_node);
1801 	rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
1802 
1803 	rep->creq.version = drm_vmw_gb_surface_v1;
1804 	rep->creq.svga3d_flags_upper_32_bits =
1805 		SVGA3D_FLAGS_UPPER_32(srf->flags);
1806 	rep->creq.multisample_pattern = srf->multisample_pattern;
1807 	rep->creq.quality_level = srf->quality_level;
1808 	rep->creq.must_be_zero = 0;
1809 
1810 out_bad_resource:
1811 	ttm_base_object_unref(&base);
1812 
1813 	return ret;
1814 }
1815 
1816 /**
1817  * vmw_subres_dirty_add - Add a dirty region to a subresource
1818  * @dirty: The surfaces's dirty tracker.
1819  * @loc_start: The location corresponding to the start of the region.
1820  * @loc_end: The location corresponding to the end of the region.
1821  *
1822  * As we are assuming that @loc_start and @loc_end represent a sequential
1823  * range of backing store memory, if the region spans multiple lines then
1824  * regardless of the x coordinate, the full lines are dirtied.
1825  * Correspondingly if the region spans multiple z slices, then full rather
1826  * than partial z slices are dirtied.
1827  */
1828 static void vmw_subres_dirty_add(struct vmw_surface_dirty *dirty,
1829 				 const struct svga3dsurface_loc *loc_start,
1830 				 const struct svga3dsurface_loc *loc_end)
1831 {
1832 	const struct svga3dsurface_cache *cache = &dirty->cache;
1833 	SVGA3dBox *box = &dirty->boxes[loc_start->sub_resource];
1834 	u32 mip = loc_start->sub_resource % cache->num_mip_levels;
1835 	const struct drm_vmw_size *size = &cache->mip[mip].size;
1836 	u32 box_c2 = box->z + box->d;
1837 
1838 	if (WARN_ON(loc_start->sub_resource >= dirty->num_subres))
1839 		return;
1840 
1841 	if (box->d == 0 || box->z > loc_start->z)
1842 		box->z = loc_start->z;
1843 	if (box_c2 < loc_end->z)
1844 		box->d = loc_end->z - box->z;
1845 
1846 	if (loc_start->z + 1 == loc_end->z) {
1847 		box_c2 = box->y + box->h;
1848 		if (box->h == 0 || box->y > loc_start->y)
1849 			box->y = loc_start->y;
1850 		if (box_c2 < loc_end->y)
1851 			box->h = loc_end->y - box->y;
1852 
1853 		if (loc_start->y + 1 == loc_end->y) {
1854 			box_c2 = box->x + box->w;
1855 			if (box->w == 0 || box->x > loc_start->x)
1856 				box->x = loc_start->x;
1857 			if (box_c2 < loc_end->x)
1858 				box->w = loc_end->x - box->x;
1859 		} else {
1860 			box->x = 0;
1861 			box->w = size->width;
1862 		}
1863 	} else {
1864 		box->y = 0;
1865 		box->h = size->height;
1866 		box->x = 0;
1867 		box->w = size->width;
1868 	}
1869 }
1870 
1871 /**
1872  * vmw_subres_dirty_full - Mark a full subresource as dirty
1873  * @dirty: The surface's dirty tracker.
1874  * @subres: The subresource
1875  */
1876 static void vmw_subres_dirty_full(struct vmw_surface_dirty *dirty, u32 subres)
1877 {
1878 	const struct svga3dsurface_cache *cache = &dirty->cache;
1879 	u32 mip = subres % cache->num_mip_levels;
1880 	const struct drm_vmw_size *size = &cache->mip[mip].size;
1881 	SVGA3dBox *box = &dirty->boxes[subres];
1882 
1883 	box->x = 0;
1884 	box->y = 0;
1885 	box->z = 0;
1886 	box->w = size->width;
1887 	box->h = size->height;
1888 	box->d = size->depth;
1889 }
1890 
1891 /*
1892  * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for texture
1893  * surfaces.
1894  */
1895 static void vmw_surface_tex_dirty_range_add(struct vmw_resource *res,
1896 					    size_t start, size_t end)
1897 {
1898 	struct vmw_surface_dirty *dirty =
1899 		(struct vmw_surface_dirty *) res->dirty;
1900 	size_t backup_end = res->backup_offset + res->backup_size;
1901 	struct svga3dsurface_loc loc1, loc2;
1902 	const struct svga3dsurface_cache *cache;
1903 
1904 	start = max_t(size_t, start, res->backup_offset) - res->backup_offset;
1905 	end = min(end, backup_end) - res->backup_offset;
1906 	cache = &dirty->cache;
1907 	svga3dsurface_get_loc(cache, &loc1, start);
1908 	svga3dsurface_get_loc(cache, &loc2, end - 1);
1909 	svga3dsurface_inc_loc(cache, &loc2);
1910 
1911 	if (loc1.sub_resource + 1 == loc2.sub_resource) {
1912 		/* Dirty range covers a single sub-resource */
1913 		vmw_subres_dirty_add(dirty, &loc1, &loc2);
1914 	} else {
1915 		/* Dirty range covers multiple sub-resources */
1916 		struct svga3dsurface_loc loc_min, loc_max;
1917 		u32 sub_res = loc1.sub_resource;
1918 
1919 		svga3dsurface_max_loc(cache, loc1.sub_resource, &loc_max);
1920 		vmw_subres_dirty_add(dirty, &loc1, &loc_max);
1921 		svga3dsurface_min_loc(cache, loc2.sub_resource - 1, &loc_min);
1922 		vmw_subres_dirty_add(dirty, &loc_min, &loc2);
1923 		for (sub_res = loc1.sub_resource + 1;
1924 		     sub_res < loc2.sub_resource - 1; ++sub_res)
1925 			vmw_subres_dirty_full(dirty, sub_res);
1926 	}
1927 }
1928 
1929 /*
1930  * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for buffer
1931  * surfaces.
1932  */
1933 static void vmw_surface_buf_dirty_range_add(struct vmw_resource *res,
1934 					    size_t start, size_t end)
1935 {
1936 	struct vmw_surface_dirty *dirty =
1937 		(struct vmw_surface_dirty *) res->dirty;
1938 	const struct svga3dsurface_cache *cache = &dirty->cache;
1939 	size_t backup_end = res->backup_offset + cache->mip_chain_bytes;
1940 	SVGA3dBox *box = &dirty->boxes[0];
1941 	u32 box_c2;
1942 
1943 	box->h = box->d = 1;
1944 	start = max_t(size_t, start, res->backup_offset) - res->backup_offset;
1945 	end = min(end, backup_end) - res->backup_offset;
1946 	box_c2 = box->x + box->w;
1947 	if (box->w == 0 || box->x > start)
1948 		box->x = start;
1949 	if (box_c2 < end)
1950 		box->w = end - box->x;
1951 }
1952 
1953 /*
1954  * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for surfaces
1955  */
1956 static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start,
1957 					size_t end)
1958 {
1959 	struct vmw_surface *srf = vmw_res_to_srf(res);
1960 
1961 	if (WARN_ON(end <= res->backup_offset ||
1962 		    start >= res->backup_offset + res->backup_size))
1963 		return;
1964 
1965 	if (srf->format == SVGA3D_BUFFER)
1966 		vmw_surface_buf_dirty_range_add(res, start, end);
1967 	else
1968 		vmw_surface_tex_dirty_range_add(res, start, end);
1969 }
1970 
1971 /*
1972  * vmw_surface_dirty_sync - The surface's dirty_sync callback.
1973  */
1974 static int vmw_surface_dirty_sync(struct vmw_resource *res)
1975 {
1976 	struct vmw_private *dev_priv = res->dev_priv;
1977 	bool has_dx = 0;
1978 	u32 i, num_dirty;
1979 	struct vmw_surface_dirty *dirty =
1980 		(struct vmw_surface_dirty *) res->dirty;
1981 	size_t alloc_size;
1982 	const struct svga3dsurface_cache *cache = &dirty->cache;
1983 	struct {
1984 		SVGA3dCmdHeader header;
1985 		SVGA3dCmdDXUpdateSubResource body;
1986 	} *cmd1;
1987 	struct {
1988 		SVGA3dCmdHeader header;
1989 		SVGA3dCmdUpdateGBImage body;
1990 	} *cmd2;
1991 	void *cmd;
1992 
1993 	num_dirty = 0;
1994 	for (i = 0; i < dirty->num_subres; ++i) {
1995 		const SVGA3dBox *box = &dirty->boxes[i];
1996 
1997 		if (box->d)
1998 			num_dirty++;
1999 	}
2000 
2001 	if (!num_dirty)
2002 		goto out;
2003 
2004 	alloc_size = num_dirty * ((has_dx) ? sizeof(*cmd1) : sizeof(*cmd2));
2005 	cmd = VMW_FIFO_RESERVE(dev_priv, alloc_size);
2006 	if (!cmd)
2007 		return -ENOMEM;
2008 
2009 	cmd1 = cmd;
2010 	cmd2 = cmd;
2011 
2012 	for (i = 0; i < dirty->num_subres; ++i) {
2013 		const SVGA3dBox *box = &dirty->boxes[i];
2014 
2015 		if (!box->d)
2016 			continue;
2017 
2018 		/*
2019 		 * DX_UPDATE_SUBRESOURCE is aware of array surfaces.
2020 		 * UPDATE_GB_IMAGE is not.
2021 		 */
2022 		if (has_dx) {
2023 			cmd1->header.id = SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE;
2024 			cmd1->header.size = sizeof(cmd1->body);
2025 			cmd1->body.sid = res->id;
2026 			cmd1->body.subResource = i;
2027 			cmd1->body.box = *box;
2028 			cmd1++;
2029 		} else {
2030 			cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
2031 			cmd2->header.size = sizeof(cmd2->body);
2032 			cmd2->body.image.sid = res->id;
2033 			cmd2->body.image.face = i / cache->num_mip_levels;
2034 			cmd2->body.image.mipmap = i -
2035 				(cache->num_mip_levels * cmd2->body.image.face);
2036 			cmd2->body.box = *box;
2037 			cmd2++;
2038 		}
2039 
2040 	}
2041 	vmw_fifo_commit(dev_priv, alloc_size);
2042  out:
2043 	memset(&dirty->boxes[0], 0, sizeof(dirty->boxes[0]) *
2044 	       dirty->num_subres);
2045 
2046 	return 0;
2047 }
2048 
2049 /*
2050  * vmw_surface_dirty_alloc - The surface's dirty_alloc callback.
2051  */
2052 static int vmw_surface_dirty_alloc(struct vmw_resource *res)
2053 {
2054 	struct vmw_surface *srf = vmw_res_to_srf(res);
2055 	struct vmw_surface_dirty *dirty;
2056 	u32 num_layers = 1;
2057 	u32 num_mip;
2058 	u32 num_subres;
2059 	u32 num_samples;
2060 	size_t dirty_size, acc_size;
2061 	static struct ttm_operation_ctx ctx = {
2062 		.interruptible = false,
2063 		.no_wait_gpu = false
2064 	};
2065 	int ret;
2066 
2067 	if (srf->array_size)
2068 		num_layers = srf->array_size;
2069 	else if (srf->flags & SVGA3D_SURFACE_CUBEMAP)
2070 		num_layers *= SVGA3D_MAX_SURFACE_FACES;
2071 
2072 	num_mip = srf->mip_levels[0];
2073 	if (!num_mip)
2074 		num_mip = 1;
2075 
2076 	num_subres = num_layers * num_mip;
2077 	dirty_size = sizeof(*dirty) + num_subres * sizeof(dirty->boxes[0]);
2078 	acc_size = ttm_round_pot(dirty_size);
2079 	ret = ttm_mem_global_alloc(vmw_mem_glob(res->dev_priv),
2080 				   acc_size, &ctx);
2081 	if (ret) {
2082 		VMW_DEBUG_USER("Out of graphics memory for surface "
2083 			       "dirty tracker.\n");
2084 		return ret;
2085 	}
2086 
2087 	dirty = kvzalloc(dirty_size, GFP_KERNEL);
2088 	if (!dirty) {
2089 		ret = -ENOMEM;
2090 		goto out_no_dirty;
2091 	}
2092 
2093 	num_samples = max_t(u32, 1, srf->multisample_count);
2094 	ret = svga3dsurface_setup_cache(&srf->base_size, srf->format, num_mip,
2095 					num_layers, num_samples, &dirty->cache);
2096 	if (ret)
2097 		goto out_no_cache;
2098 
2099 	dirty->num_subres = num_subres;
2100 	dirty->size = acc_size;
2101 	res->dirty = (struct vmw_resource_dirty *) dirty;
2102 
2103 	return 0;
2104 
2105 out_no_cache:
2106 	kvfree(dirty);
2107 out_no_dirty:
2108 	ttm_mem_global_free(vmw_mem_glob(res->dev_priv), acc_size);
2109 	return ret;
2110 }
2111 
2112 /*
2113  * vmw_surface_dirty_free - The surface's dirty_free callback
2114  */
2115 static void vmw_surface_dirty_free(struct vmw_resource *res)
2116 {
2117 	struct vmw_surface_dirty *dirty =
2118 		(struct vmw_surface_dirty *) res->dirty;
2119 	size_t acc_size = dirty->size;
2120 
2121 	kvfree(dirty);
2122 	ttm_mem_global_free(vmw_mem_glob(res->dev_priv), acc_size);
2123 	res->dirty = NULL;
2124 }
2125 
2126 /*
2127  * vmw_surface_clean - The surface's clean callback
2128  */
2129 static int vmw_surface_clean(struct vmw_resource *res)
2130 {
2131 	struct vmw_private *dev_priv = res->dev_priv;
2132 	size_t alloc_size;
2133 	struct {
2134 		SVGA3dCmdHeader header;
2135 		SVGA3dCmdReadbackGBSurface body;
2136 	} *cmd;
2137 
2138 	alloc_size = sizeof(*cmd);
2139 	cmd = VMW_FIFO_RESERVE(dev_priv, alloc_size);
2140 	if (!cmd)
2141 		return -ENOMEM;
2142 
2143 	cmd->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
2144 	cmd->header.size = sizeof(cmd->body);
2145 	cmd->body.sid = res->id;
2146 	vmw_fifo_commit(dev_priv, alloc_size);
2147 
2148 	return 0;
2149 }
2150