1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  */
25 
26 #include "virtgpu_drv.h"
27 
28 static void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
29 				       uint32_t *resid)
30 {
31 	int handle = ida_alloc_min(&vgdev->resource_ida, 1, GFP_KERNEL);
32 	*resid = handle;
33 }
34 
35 static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
36 {
37 	ida_free(&vgdev->resource_ida, id);
38 }
39 
40 static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
41 {
42 	struct virtio_gpu_object *bo;
43 	struct virtio_gpu_device *vgdev;
44 
45 	bo = container_of(tbo, struct virtio_gpu_object, tbo);
46 	vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
47 
48 	if (bo->created)
49 		virtio_gpu_cmd_unref_resource(vgdev, bo->hw_res_handle);
50 	if (bo->pages)
51 		virtio_gpu_object_free_sg_table(bo);
52 	if (bo->vmap)
53 		virtio_gpu_object_kunmap(bo);
54 	drm_gem_object_release(&bo->gem_base);
55 	virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
56 	kfree(bo);
57 }
58 
59 static void virtio_gpu_init_ttm_placement(struct virtio_gpu_object *vgbo,
60 					  bool pinned)
61 {
62 	u32 c = 1;
63 	u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
64 
65 	vgbo->placement.placement = &vgbo->placement_code;
66 	vgbo->placement.busy_placement = &vgbo->placement_code;
67 	vgbo->placement_code.fpfn = 0;
68 	vgbo->placement_code.lpfn = 0;
69 	vgbo->placement_code.flags =
70 		TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT | pflag;
71 	vgbo->placement.num_placement = c;
72 	vgbo->placement.num_busy_placement = c;
73 
74 }
75 
76 int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
77 			     unsigned long size, bool kernel, bool pinned,
78 			     struct virtio_gpu_object **bo_ptr)
79 {
80 	struct virtio_gpu_object *bo;
81 	enum ttm_bo_type type;
82 	size_t acc_size;
83 	int ret;
84 
85 	if (kernel)
86 		type = ttm_bo_type_kernel;
87 	else
88 		type = ttm_bo_type_device;
89 	*bo_ptr = NULL;
90 
91 	acc_size = ttm_bo_dma_acc_size(&vgdev->mman.bdev, size,
92 				       sizeof(struct virtio_gpu_object));
93 
94 	bo = kzalloc(sizeof(struct virtio_gpu_object), GFP_KERNEL);
95 	if (bo == NULL)
96 		return -ENOMEM;
97 	virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
98 	size = roundup(size, PAGE_SIZE);
99 	ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, size);
100 	if (ret != 0) {
101 		virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
102 		kfree(bo);
103 		return ret;
104 	}
105 	bo->dumb = false;
106 	virtio_gpu_init_ttm_placement(bo, pinned);
107 
108 	ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, size, type,
109 			  &bo->placement, 0, !kernel, acc_size,
110 			  NULL, NULL, &virtio_gpu_ttm_bo_destroy);
111 	/* ttm_bo_init failure will call the destroy */
112 	if (ret != 0)
113 		return ret;
114 
115 	*bo_ptr = bo;
116 	return 0;
117 }
118 
119 void virtio_gpu_object_kunmap(struct virtio_gpu_object *bo)
120 {
121 	bo->vmap = NULL;
122 	ttm_bo_kunmap(&bo->kmap);
123 }
124 
125 int virtio_gpu_object_kmap(struct virtio_gpu_object *bo)
126 {
127 	bool is_iomem;
128 	int r;
129 
130 	WARN_ON(bo->vmap);
131 
132 	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
133 	if (r)
134 		return r;
135 	bo->vmap = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
136 	return 0;
137 }
138 
139 int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
140 				   struct virtio_gpu_object *bo)
141 {
142 	int ret;
143 	struct page **pages = bo->tbo.ttm->pages;
144 	int nr_pages = bo->tbo.num_pages;
145 	struct ttm_operation_ctx ctx = {
146 		.interruptible = false,
147 		.no_wait_gpu = false
148 	};
149 
150 	/* wtf swapping */
151 	if (bo->pages)
152 		return 0;
153 
154 	if (bo->tbo.ttm->state == tt_unpopulated)
155 		bo->tbo.ttm->bdev->driver->ttm_tt_populate(bo->tbo.ttm, &ctx);
156 	bo->pages = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
157 	if (!bo->pages)
158 		goto out;
159 
160 	ret = sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0,
161 					nr_pages << PAGE_SHIFT, GFP_KERNEL);
162 	if (ret)
163 		goto out;
164 	return 0;
165 out:
166 	kfree(bo->pages);
167 	bo->pages = NULL;
168 	return -ENOMEM;
169 }
170 
171 void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo)
172 {
173 	sg_free_table(bo->pages);
174 	kfree(bo->pages);
175 	bo->pages = NULL;
176 }
177 
178 int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait)
179 {
180 	int r;
181 
182 	r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
183 	if (unlikely(r != 0))
184 		return r;
185 	r = ttm_bo_wait(&bo->tbo, true, no_wait);
186 	ttm_bo_unreserve(&bo->tbo);
187 	return r;
188 }
189 
190