1 /**************************************************************************
2  *
3  * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_resource_priv.h"
30 #include <ttm/ttm_placement.h>
31 #include "svga3d_surfacedefs.h"
32 
33 /**
34  * struct vmw_user_surface - User-space visible surface resource
35  *
36  * @base:           The TTM base object handling user-space visibility.
37  * @srf:            The surface metadata.
38  * @size:           TTM accounting size for the surface.
39  */
40 struct vmw_user_surface {
41 	struct ttm_base_object base;
42 	struct vmw_surface srf;
43 	uint32_t size;
44 	uint32_t backup_handle;
45 };
46 
47 /**
48  * struct vmw_surface_offset - Backing store mip level offset info
49  *
50  * @face:           Surface face.
51  * @mip:            Mip level.
52  * @bo_offset:      Offset into backing store of this mip level.
53  *
54  */
55 struct vmw_surface_offset {
56 	uint32_t face;
57 	uint32_t mip;
58 	uint32_t bo_offset;
59 };
60 
61 static void vmw_user_surface_free(struct vmw_resource *res);
62 static struct vmw_resource *
63 vmw_user_surface_base_to_res(struct ttm_base_object *base);
64 static int vmw_legacy_srf_bind(struct vmw_resource *res,
65 			       struct ttm_validate_buffer *val_buf);
66 static int vmw_legacy_srf_unbind(struct vmw_resource *res,
67 				 bool readback,
68 				 struct ttm_validate_buffer *val_buf);
69 static int vmw_legacy_srf_create(struct vmw_resource *res);
70 static int vmw_legacy_srf_destroy(struct vmw_resource *res);
71 
72 static const struct vmw_user_resource_conv user_surface_conv = {
73 	.object_type = VMW_RES_SURFACE,
74 	.base_obj_to_res = vmw_user_surface_base_to_res,
75 	.res_free = vmw_user_surface_free
76 };
77 
78 const struct vmw_user_resource_conv *user_surface_converter =
79 	&user_surface_conv;
80 
81 
82 static uint64_t vmw_user_surface_size;
83 
84 static const struct vmw_res_func vmw_legacy_surface_func = {
85 	.res_type = vmw_res_surface,
86 	.needs_backup = false,
87 	.may_evict = true,
88 	.type_name = "legacy surfaces",
89 	.backup_placement = &vmw_srf_placement,
90 	.create = &vmw_legacy_srf_create,
91 	.destroy = &vmw_legacy_srf_destroy,
92 	.bind = &vmw_legacy_srf_bind,
93 	.unbind = &vmw_legacy_srf_unbind
94 };
95 
96 /**
97  * struct vmw_surface_dma - SVGA3D DMA command
98  */
99 struct vmw_surface_dma {
100 	SVGA3dCmdHeader header;
101 	SVGA3dCmdSurfaceDMA body;
102 	SVGA3dCopyBox cb;
103 	SVGA3dCmdSurfaceDMASuffix suffix;
104 };
105 
106 /**
107  * struct vmw_surface_define - SVGA3D Surface Define command
108  */
109 struct vmw_surface_define {
110 	SVGA3dCmdHeader header;
111 	SVGA3dCmdDefineSurface body;
112 };
113 
114 /**
115  * struct vmw_surface_destroy - SVGA3D Surface Destroy command
116  */
117 struct vmw_surface_destroy {
118 	SVGA3dCmdHeader header;
119 	SVGA3dCmdDestroySurface body;
120 };
121 
122 
123 /**
124  * vmw_surface_dma_size - Compute fifo size for a dma command.
125  *
126  * @srf: Pointer to a struct vmw_surface
127  *
128  * Computes the required size for a surface dma command for backup or
129  * restoration of the surface represented by @srf.
130  */
131 static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
132 {
133 	return srf->num_sizes * sizeof(struct vmw_surface_dma);
134 }
135 
136 
137 /**
138  * vmw_surface_define_size - Compute fifo size for a surface define command.
139  *
140  * @srf: Pointer to a struct vmw_surface
141  *
142  * Computes the required size for a surface define command for the definition
143  * of the surface represented by @srf.
144  */
145 static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
146 {
147 	return sizeof(struct vmw_surface_define) + srf->num_sizes *
148 		sizeof(SVGA3dSize);
149 }
150 
151 
152 /**
153  * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
154  *
155  * Computes the required size for a surface destroy command for the destruction
156  * of a hw surface.
157  */
158 static inline uint32_t vmw_surface_destroy_size(void)
159 {
160 	return sizeof(struct vmw_surface_destroy);
161 }
162 
163 /**
164  * vmw_surface_destroy_encode - Encode a surface_destroy command.
165  *
166  * @id: The surface id
167  * @cmd_space: Pointer to memory area in which the commands should be encoded.
168  */
169 static void vmw_surface_destroy_encode(uint32_t id,
170 				       void *cmd_space)
171 {
172 	struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
173 		cmd_space;
174 
175 	cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
176 	cmd->header.size = sizeof(cmd->body);
177 	cmd->body.sid = id;
178 }
179 
180 /**
181  * vmw_surface_define_encode - Encode a surface_define command.
182  *
183  * @srf: Pointer to a struct vmw_surface object.
184  * @cmd_space: Pointer to memory area in which the commands should be encoded.
185  */
186 static void vmw_surface_define_encode(const struct vmw_surface *srf,
187 				      void *cmd_space)
188 {
189 	struct vmw_surface_define *cmd = (struct vmw_surface_define *)
190 		cmd_space;
191 	struct drm_vmw_size *src_size;
192 	SVGA3dSize *cmd_size;
193 	uint32_t cmd_len;
194 	int i;
195 
196 	cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
197 
198 	cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
199 	cmd->header.size = cmd_len;
200 	cmd->body.sid = srf->res.id;
201 	cmd->body.surfaceFlags = srf->flags;
202 	cmd->body.format = cpu_to_le32(srf->format);
203 	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
204 		cmd->body.face[i].numMipLevels = srf->mip_levels[i];
205 
206 	cmd += 1;
207 	cmd_size = (SVGA3dSize *) cmd;
208 	src_size = srf->sizes;
209 
210 	for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
211 		cmd_size->width = src_size->width;
212 		cmd_size->height = src_size->height;
213 		cmd_size->depth = src_size->depth;
214 	}
215 }
216 
217 /**
218  * vmw_surface_dma_encode - Encode a surface_dma command.
219  *
220  * @srf: Pointer to a struct vmw_surface object.
221  * @cmd_space: Pointer to memory area in which the commands should be encoded.
222  * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
223  * should be placed or read from.
224  * @to_surface: Boolean whether to DMA to the surface or from the surface.
225  */
226 static void vmw_surface_dma_encode(struct vmw_surface *srf,
227 				   void *cmd_space,
228 				   const SVGAGuestPtr *ptr,
229 				   bool to_surface)
230 {
231 	uint32_t i;
232 	struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
233 	const struct svga3d_surface_desc *desc =
234 		svga3dsurface_get_desc(srf->format);
235 
236 	for (i = 0; i < srf->num_sizes; ++i) {
237 		SVGA3dCmdHeader *header = &cmd->header;
238 		SVGA3dCmdSurfaceDMA *body = &cmd->body;
239 		SVGA3dCopyBox *cb = &cmd->cb;
240 		SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
241 		const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
242 		const struct drm_vmw_size *cur_size = &srf->sizes[i];
243 
244 		header->id = SVGA_3D_CMD_SURFACE_DMA;
245 		header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
246 
247 		body->guest.ptr = *ptr;
248 		body->guest.ptr.offset += cur_offset->bo_offset;
249 		body->guest.pitch = svga3dsurface_calculate_pitch(desc,
250 								  cur_size);
251 		body->host.sid = srf->res.id;
252 		body->host.face = cur_offset->face;
253 		body->host.mipmap = cur_offset->mip;
254 		body->transfer = ((to_surface) ?  SVGA3D_WRITE_HOST_VRAM :
255 				  SVGA3D_READ_HOST_VRAM);
256 		cb->x = 0;
257 		cb->y = 0;
258 		cb->z = 0;
259 		cb->srcx = 0;
260 		cb->srcy = 0;
261 		cb->srcz = 0;
262 		cb->w = cur_size->width;
263 		cb->h = cur_size->height;
264 		cb->d = cur_size->depth;
265 
266 		suffix->suffixSize = sizeof(*suffix);
267 		suffix->maximumOffset =
268 			svga3dsurface_get_image_buffer_size(desc, cur_size,
269 							    body->guest.pitch);
270 		suffix->flags.discard = 0;
271 		suffix->flags.unsynchronized = 0;
272 		suffix->flags.reserved = 0;
273 		++cmd;
274 	}
275 };
276 
277 
278 /**
279  * vmw_hw_surface_destroy - destroy a Device surface
280  *
281  * @res:        Pointer to a struct vmw_resource embedded in a struct
282  *              vmw_surface.
283  *
284  * Destroys a the device surface associated with a struct vmw_surface if
285  * any, and adjusts accounting and resource count accordingly.
286  */
287 static void vmw_hw_surface_destroy(struct vmw_resource *res)
288 {
289 
290 	struct vmw_private *dev_priv = res->dev_priv;
291 	struct vmw_surface *srf;
292 	void *cmd;
293 
294 	if (res->id != -1) {
295 
296 		cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
297 		if (unlikely(cmd == NULL)) {
298 			DRM_ERROR("Failed reserving FIFO space for surface "
299 				  "destruction.\n");
300 			return;
301 		}
302 
303 		vmw_surface_destroy_encode(res->id, cmd);
304 		vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
305 
306 		/*
307 		 * used_memory_size_atomic, or separate lock
308 		 * to avoid taking dev_priv::cmdbuf_mutex in
309 		 * the destroy path.
310 		 */
311 
312 		mutex_lock(&dev_priv->cmdbuf_mutex);
313 		srf = vmw_res_to_srf(res);
314 		dev_priv->used_memory_size -= res->backup_size;
315 		mutex_unlock(&dev_priv->cmdbuf_mutex);
316 	}
317 	vmw_3d_resource_dec(dev_priv, false);
318 }
319 
320 /**
321  * vmw_legacy_srf_create - Create a device surface as part of the
322  * resource validation process.
323  *
324  * @res: Pointer to a struct vmw_surface.
325  *
326  * If the surface doesn't have a hw id.
327  *
328  * Returns -EBUSY if there wasn't sufficient device resources to
329  * complete the validation. Retry after freeing up resources.
330  *
331  * May return other errors if the kernel is out of guest resources.
332  */
333 static int vmw_legacy_srf_create(struct vmw_resource *res)
334 {
335 	struct vmw_private *dev_priv = res->dev_priv;
336 	struct vmw_surface *srf;
337 	uint32_t submit_size;
338 	uint8_t *cmd;
339 	int ret;
340 
341 	if (likely(res->id != -1))
342 		return 0;
343 
344 	srf = vmw_res_to_srf(res);
345 	if (unlikely(dev_priv->used_memory_size + res->backup_size >=
346 		     dev_priv->memory_size))
347 		return -EBUSY;
348 
349 	/*
350 	 * Alloc id for the resource.
351 	 */
352 
353 	ret = vmw_resource_alloc_id(res);
354 	if (unlikely(ret != 0)) {
355 		DRM_ERROR("Failed to allocate a surface id.\n");
356 		goto out_no_id;
357 	}
358 
359 	if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
360 		ret = -EBUSY;
361 		goto out_no_fifo;
362 	}
363 
364 	/*
365 	 * Encode surface define- commands.
366 	 */
367 
368 	submit_size = vmw_surface_define_size(srf);
369 	cmd = vmw_fifo_reserve(dev_priv, submit_size);
370 	if (unlikely(cmd == NULL)) {
371 		DRM_ERROR("Failed reserving FIFO space for surface "
372 			  "creation.\n");
373 		ret = -ENOMEM;
374 		goto out_no_fifo;
375 	}
376 
377 	vmw_surface_define_encode(srf, cmd);
378 	vmw_fifo_commit(dev_priv, submit_size);
379 	/*
380 	 * Surface memory usage accounting.
381 	 */
382 
383 	dev_priv->used_memory_size += res->backup_size;
384 	return 0;
385 
386 out_no_fifo:
387 	vmw_resource_release_id(res);
388 out_no_id:
389 	return ret;
390 }
391 
392 /**
393  * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
394  *
395  * @res:            Pointer to a struct vmw_res embedded in a struct
396  *                  vmw_surface.
397  * @val_buf:        Pointer to a struct ttm_validate_buffer containing
398  *                  information about the backup buffer.
399  * @bind:           Boolean wether to DMA to the surface.
400  *
401  * Transfer backup data to or from a legacy surface as part of the
402  * validation process.
403  * May return other errors if the kernel is out of guest resources.
404  * The backup buffer will be fenced or idle upon successful completion,
405  * and if the surface needs persistent backup storage, the backup buffer
406  * will also be returned reserved iff @bind is true.
407  */
408 static int vmw_legacy_srf_dma(struct vmw_resource *res,
409 			      struct ttm_validate_buffer *val_buf,
410 			      bool bind)
411 {
412 	SVGAGuestPtr ptr;
413 	struct vmw_fence_obj *fence;
414 	uint32_t submit_size;
415 	struct vmw_surface *srf = vmw_res_to_srf(res);
416 	uint8_t *cmd;
417 	struct vmw_private *dev_priv = res->dev_priv;
418 
419 	BUG_ON(val_buf->bo == NULL);
420 
421 	submit_size = vmw_surface_dma_size(srf);
422 	cmd = vmw_fifo_reserve(dev_priv, submit_size);
423 	if (unlikely(cmd == NULL)) {
424 		DRM_ERROR("Failed reserving FIFO space for surface "
425 			  "DMA.\n");
426 		return -ENOMEM;
427 	}
428 	vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
429 	vmw_surface_dma_encode(srf, cmd, &ptr, bind);
430 
431 	vmw_fifo_commit(dev_priv, submit_size);
432 
433 	/*
434 	 * Create a fence object and fence the backup buffer.
435 	 */
436 
437 	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
438 					  &fence, NULL);
439 
440 	vmw_fence_single_bo(val_buf->bo, fence);
441 
442 	if (likely(fence != NULL))
443 		vmw_fence_obj_unreference(&fence);
444 
445 	return 0;
446 }
447 
448 /**
449  * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
450  *                       surface validation process.
451  *
452  * @res:            Pointer to a struct vmw_res embedded in a struct
453  *                  vmw_surface.
454  * @val_buf:        Pointer to a struct ttm_validate_buffer containing
455  *                  information about the backup buffer.
456  *
457  * This function will copy backup data to the surface if the
458  * backup buffer is dirty.
459  */
460 static int vmw_legacy_srf_bind(struct vmw_resource *res,
461 			       struct ttm_validate_buffer *val_buf)
462 {
463 	if (!res->backup_dirty)
464 		return 0;
465 
466 	return vmw_legacy_srf_dma(res, val_buf, true);
467 }
468 
469 
470 /**
471  * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
472  *                         surface eviction process.
473  *
474  * @res:            Pointer to a struct vmw_res embedded in a struct
475  *                  vmw_surface.
476  * @val_buf:        Pointer to a struct ttm_validate_buffer containing
477  *                  information about the backup buffer.
478  *
479  * This function will copy backup data from the surface.
480  */
481 static int vmw_legacy_srf_unbind(struct vmw_resource *res,
482 				 bool readback,
483 				 struct ttm_validate_buffer *val_buf)
484 {
485 	if (unlikely(readback))
486 		return vmw_legacy_srf_dma(res, val_buf, false);
487 	return 0;
488 }
489 
490 /**
491  * vmw_legacy_srf_destroy - Destroy a device surface as part of a
492  *                          resource eviction process.
493  *
494  * @res:            Pointer to a struct vmw_res embedded in a struct
495  *                  vmw_surface.
496  */
497 static int vmw_legacy_srf_destroy(struct vmw_resource *res)
498 {
499 	struct vmw_private *dev_priv = res->dev_priv;
500 	uint32_t submit_size;
501 	uint8_t *cmd;
502 
503 	BUG_ON(res->id == -1);
504 
505 	/*
506 	 * Encode the dma- and surface destroy commands.
507 	 */
508 
509 	submit_size = vmw_surface_destroy_size();
510 	cmd = vmw_fifo_reserve(dev_priv, submit_size);
511 	if (unlikely(cmd == NULL)) {
512 		DRM_ERROR("Failed reserving FIFO space for surface "
513 			  "eviction.\n");
514 		return -ENOMEM;
515 	}
516 
517 	vmw_surface_destroy_encode(res->id, cmd);
518 	vmw_fifo_commit(dev_priv, submit_size);
519 
520 	/*
521 	 * Surface memory usage accounting.
522 	 */
523 
524 	dev_priv->used_memory_size -= res->backup_size;
525 
526 	/*
527 	 * Release the surface ID.
528 	 */
529 
530 	vmw_resource_release_id(res);
531 
532 	return 0;
533 }
534 
535 
536 /**
537  * vmw_surface_init - initialize a struct vmw_surface
538  *
539  * @dev_priv:       Pointer to a device private struct.
540  * @srf:            Pointer to the struct vmw_surface to initialize.
541  * @res_free:       Pointer to a resource destructor used to free
542  *                  the object.
543  */
544 static int vmw_surface_init(struct vmw_private *dev_priv,
545 			    struct vmw_surface *srf,
546 			    void (*res_free) (struct vmw_resource *res))
547 {
548 	int ret;
549 	struct vmw_resource *res = &srf->res;
550 
551 	BUG_ON(res_free == NULL);
552 	(void) vmw_3d_resource_inc(dev_priv, false);
553 	ret = vmw_resource_init(dev_priv, res, true, res_free,
554 				&vmw_legacy_surface_func);
555 
556 	if (unlikely(ret != 0)) {
557 		vmw_3d_resource_dec(dev_priv, false);
558 		res_free(res);
559 		return ret;
560 	}
561 
562 	/*
563 	 * The surface won't be visible to hardware until a
564 	 * surface validate.
565 	 */
566 
567 	vmw_resource_activate(res, vmw_hw_surface_destroy);
568 	return ret;
569 }
570 
571 /**
572  * vmw_user_surface_base_to_res - TTM base object to resource converter for
573  *                                user visible surfaces
574  *
575  * @base:           Pointer to a TTM base object
576  *
577  * Returns the struct vmw_resource embedded in a struct vmw_surface
578  * for the user-visible object identified by the TTM base object @base.
579  */
580 static struct vmw_resource *
581 vmw_user_surface_base_to_res(struct ttm_base_object *base)
582 {
583 	return &(container_of(base, struct vmw_user_surface, base)->srf.res);
584 }
585 
586 /**
587  * vmw_user_surface_free - User visible surface resource destructor
588  *
589  * @res:            A struct vmw_resource embedded in a struct vmw_surface.
590  */
591 static void vmw_user_surface_free(struct vmw_resource *res)
592 {
593 	struct vmw_surface *srf = vmw_res_to_srf(res);
594 	struct vmw_user_surface *user_srf =
595 	    container_of(srf, struct vmw_user_surface, srf);
596 	struct vmw_private *dev_priv = srf->res.dev_priv;
597 	uint32_t size = user_srf->size;
598 
599 	kfree(srf->offsets);
600 	kfree(srf->sizes);
601 	kfree(srf->snooper.image);
602 	ttm_base_object_kfree(user_srf, base);
603 	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
604 }
605 
606 /**
607  * vmw_user_surface_free - User visible surface TTM base object destructor
608  *
609  * @p_base:         Pointer to a pointer to a TTM base object
610  *                  embedded in a struct vmw_user_surface.
611  *
612  * Drops the base object's reference on its resource, and the
613  * pointer pointed to by *p_base is set to NULL.
614  */
615 static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
616 {
617 	struct ttm_base_object *base = *p_base;
618 	struct vmw_user_surface *user_srf =
619 	    container_of(base, struct vmw_user_surface, base);
620 	struct vmw_resource *res = &user_srf->srf.res;
621 
622 	*p_base = NULL;
623 	vmw_resource_unreference(&res);
624 }
625 
626 /**
627  * vmw_user_surface_destroy_ioctl - Ioctl function implementing
628  *                                  the user surface destroy functionality.
629  *
630  * @dev:            Pointer to a struct drm_device.
631  * @data:           Pointer to data copied from / to user-space.
632  * @file_priv:      Pointer to a drm file private structure.
633  */
634 int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
635 			      struct drm_file *file_priv)
636 {
637 	struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
638 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
639 
640 	return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
641 }
642 
643 /**
644  * vmw_user_surface_define_ioctl - Ioctl function implementing
645  *                                  the user surface define functionality.
646  *
647  * @dev:            Pointer to a struct drm_device.
648  * @data:           Pointer to data copied from / to user-space.
649  * @file_priv:      Pointer to a drm file private structure.
650  */
651 int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
652 			     struct drm_file *file_priv)
653 {
654 	struct vmw_private *dev_priv = vmw_priv(dev);
655 	struct vmw_user_surface *user_srf;
656 	struct vmw_surface *srf;
657 	struct vmw_resource *res;
658 	struct vmw_resource *tmp;
659 	union drm_vmw_surface_create_arg *arg =
660 	    (union drm_vmw_surface_create_arg *)data;
661 	struct drm_vmw_surface_create_req *req = &arg->req;
662 	struct drm_vmw_surface_arg *rep = &arg->rep;
663 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
664 	struct drm_vmw_size __user *user_sizes;
665 	int ret;
666 	int i, j;
667 	uint32_t cur_bo_offset;
668 	struct drm_vmw_size *cur_size;
669 	struct vmw_surface_offset *cur_offset;
670 	uint32_t num_sizes;
671 	uint32_t size;
672 	struct vmw_master *vmaster = vmw_master(file_priv->master);
673 	const struct svga3d_surface_desc *desc;
674 
675 	if (unlikely(vmw_user_surface_size == 0))
676 		vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
677 			128;
678 
679 	num_sizes = 0;
680 	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
681 		num_sizes += req->mip_levels[i];
682 
683 	if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
684 	    DRM_VMW_MAX_MIP_LEVELS)
685 		return -EINVAL;
686 
687 	size = vmw_user_surface_size + 128 +
688 		ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
689 		ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
690 
691 
692 	desc = svga3dsurface_get_desc(req->format);
693 	if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
694 		DRM_ERROR("Invalid surface format for surface creation.\n");
695 		return -EINVAL;
696 	}
697 
698 	ret = ttm_read_lock(&vmaster->lock, true);
699 	if (unlikely(ret != 0))
700 		return ret;
701 
702 	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
703 				   size, false, true);
704 	if (unlikely(ret != 0)) {
705 		if (ret != -ERESTARTSYS)
706 			DRM_ERROR("Out of graphics memory for surface"
707 				  " creation.\n");
708 		goto out_unlock;
709 	}
710 
711 	user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
712 	if (unlikely(user_srf == NULL)) {
713 		ret = -ENOMEM;
714 		goto out_no_user_srf;
715 	}
716 
717 	srf = &user_srf->srf;
718 	res = &srf->res;
719 
720 	srf->flags = req->flags;
721 	srf->format = req->format;
722 	srf->scanout = req->scanout;
723 
724 	memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
725 	srf->num_sizes = num_sizes;
726 	user_srf->size = size;
727 
728 	srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
729 	if (unlikely(srf->sizes == NULL)) {
730 		ret = -ENOMEM;
731 		goto out_no_sizes;
732 	}
733 	srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
734 			       GFP_KERNEL);
735 	if (unlikely(srf->sizes == NULL)) {
736 		ret = -ENOMEM;
737 		goto out_no_offsets;
738 	}
739 
740 	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
741 	    req->size_addr;
742 
743 	ret = copy_from_user(srf->sizes, user_sizes,
744 			     srf->num_sizes * sizeof(*srf->sizes));
745 	if (unlikely(ret != 0)) {
746 		ret = -EFAULT;
747 		goto out_no_copy;
748 	}
749 
750 	srf->base_size = *srf->sizes;
751 	srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
752 	srf->multisample_count = 1;
753 
754 	cur_bo_offset = 0;
755 	cur_offset = srf->offsets;
756 	cur_size = srf->sizes;
757 
758 	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
759 		for (j = 0; j < srf->mip_levels[i]; ++j) {
760 			uint32_t stride = svga3dsurface_calculate_pitch
761 				(desc, cur_size);
762 
763 			cur_offset->face = i;
764 			cur_offset->mip = j;
765 			cur_offset->bo_offset = cur_bo_offset;
766 			cur_bo_offset += svga3dsurface_get_image_buffer_size
767 				(desc, cur_size, stride);
768 			++cur_offset;
769 			++cur_size;
770 		}
771 	}
772 	res->backup_size = cur_bo_offset;
773 	if (srf->scanout &&
774 	    srf->num_sizes == 1 &&
775 	    srf->sizes[0].width == 64 &&
776 	    srf->sizes[0].height == 64 &&
777 	    srf->format == SVGA3D_A8R8G8B8) {
778 
779 		srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
780 		/* clear the image */
781 		if (srf->snooper.image) {
782 			memset(srf->snooper.image, 0x00, 64 * 64 * 4);
783 		} else {
784 			DRM_ERROR("Failed to allocate cursor_image\n");
785 			ret = -ENOMEM;
786 			goto out_no_copy;
787 		}
788 	} else {
789 		srf->snooper.image = NULL;
790 	}
791 	srf->snooper.crtc = NULL;
792 
793 	user_srf->base.shareable = false;
794 	user_srf->base.tfile = NULL;
795 
796 	/**
797 	 * From this point, the generic resource management functions
798 	 * destroy the object on failure.
799 	 */
800 
801 	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
802 	if (unlikely(ret != 0))
803 		goto out_unlock;
804 
805 	tmp = vmw_resource_reference(&srf->res);
806 	ret = ttm_base_object_init(tfile, &user_srf->base,
807 				   req->shareable, VMW_RES_SURFACE,
808 				   &vmw_user_surface_base_release, NULL);
809 
810 	if (unlikely(ret != 0)) {
811 		vmw_resource_unreference(&tmp);
812 		vmw_resource_unreference(&res);
813 		goto out_unlock;
814 	}
815 
816 	rep->sid = user_srf->base.hash.key;
817 	vmw_resource_unreference(&res);
818 
819 	ttm_read_unlock(&vmaster->lock);
820 	return 0;
821 out_no_copy:
822 	kfree(srf->offsets);
823 out_no_offsets:
824 	kfree(srf->sizes);
825 out_no_sizes:
826 	ttm_base_object_kfree(user_srf, base);
827 out_no_user_srf:
828 	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
829 out_unlock:
830 	ttm_read_unlock(&vmaster->lock);
831 	return ret;
832 }
833 
834 /**
835  * vmw_user_surface_define_ioctl - Ioctl function implementing
836  *                                  the user surface reference functionality.
837  *
838  * @dev:            Pointer to a struct drm_device.
839  * @data:           Pointer to data copied from / to user-space.
840  * @file_priv:      Pointer to a drm file private structure.
841  */
842 int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
843 				struct drm_file *file_priv)
844 {
845 	union drm_vmw_surface_reference_arg *arg =
846 	    (union drm_vmw_surface_reference_arg *)data;
847 	struct drm_vmw_surface_arg *req = &arg->req;
848 	struct drm_vmw_surface_create_req *rep = &arg->rep;
849 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
850 	struct vmw_surface *srf;
851 	struct vmw_user_surface *user_srf;
852 	struct drm_vmw_size __user *user_sizes;
853 	struct ttm_base_object *base;
854 	int ret = -EINVAL;
855 
856 	base = ttm_base_object_lookup(tfile, req->sid);
857 	if (unlikely(base == NULL)) {
858 		DRM_ERROR("Could not find surface to reference.\n");
859 		return -EINVAL;
860 	}
861 
862 	if (unlikely(base->object_type != VMW_RES_SURFACE))
863 		goto out_bad_resource;
864 
865 	user_srf = container_of(base, struct vmw_user_surface, base);
866 	srf = &user_srf->srf;
867 
868 	ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
869 	if (unlikely(ret != 0)) {
870 		DRM_ERROR("Could not add a reference to a surface.\n");
871 		goto out_no_reference;
872 	}
873 
874 	rep->flags = srf->flags;
875 	rep->format = srf->format;
876 	memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
877 	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
878 	    rep->size_addr;
879 
880 	if (user_sizes)
881 		ret = copy_to_user(user_sizes, srf->sizes,
882 				   srf->num_sizes * sizeof(*srf->sizes));
883 	if (unlikely(ret != 0)) {
884 		DRM_ERROR("copy_to_user failed %p %u\n",
885 			  user_sizes, srf->num_sizes);
886 		ret = -EFAULT;
887 	}
888 out_bad_resource:
889 out_no_reference:
890 	ttm_base_object_unref(&base);
891 
892 	return ret;
893 }
894