1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/pm_runtime.h>
26 
27 #include "mock_engine.h"
28 #include "mock_context.h"
29 #include "mock_request.h"
30 #include "mock_gem_device.h"
31 #include "mock_gem_object.h"
32 #include "mock_gtt.h"
33 #include "mock_uncore.h"
34 
35 void mock_device_flush(struct drm_i915_private *i915)
36 {
37 	struct intel_engine_cs *engine;
38 	enum intel_engine_id id;
39 
40 	lockdep_assert_held(&i915->drm.struct_mutex);
41 
42 	for_each_engine(engine, i915, id)
43 		mock_engine_flush(engine);
44 
45 	i915_gem_retire_requests(i915);
46 }
47 
48 static void mock_device_release(struct drm_device *dev)
49 {
50 	struct drm_i915_private *i915 = to_i915(dev);
51 	struct intel_engine_cs *engine;
52 	enum intel_engine_id id;
53 
54 	mutex_lock(&i915->drm.struct_mutex);
55 	mock_device_flush(i915);
56 	mutex_unlock(&i915->drm.struct_mutex);
57 
58 	cancel_delayed_work_sync(&i915->gt.retire_work);
59 	cancel_delayed_work_sync(&i915->gt.idle_work);
60 
61 	mutex_lock(&i915->drm.struct_mutex);
62 	for_each_engine(engine, i915, id)
63 		mock_engine_free(engine);
64 	i915_gem_context_fini(i915);
65 	mutex_unlock(&i915->drm.struct_mutex);
66 
67 	drain_workqueue(i915->wq);
68 	i915_gem_drain_freed_objects(i915);
69 
70 	mutex_lock(&i915->drm.struct_mutex);
71 	mock_fini_ggtt(i915);
72 	i915_gem_timeline_fini(&i915->gt.global_timeline);
73 	mutex_unlock(&i915->drm.struct_mutex);
74 
75 	destroy_workqueue(i915->wq);
76 
77 	kmem_cache_destroy(i915->priorities);
78 	kmem_cache_destroy(i915->dependencies);
79 	kmem_cache_destroy(i915->requests);
80 	kmem_cache_destroy(i915->vmas);
81 	kmem_cache_destroy(i915->objects);
82 
83 	drm_dev_fini(&i915->drm);
84 	put_device(&i915->drm.pdev->dev);
85 }
86 
87 static struct drm_driver mock_driver = {
88 	.name = "mock",
89 	.driver_features = DRIVER_GEM,
90 	.release = mock_device_release,
91 
92 	.gem_close_object = i915_gem_close_object,
93 	.gem_free_object_unlocked = i915_gem_free_object,
94 };
95 
96 static void release_dev(struct device *dev)
97 {
98 	struct pci_dev *pdev = to_pci_dev(dev);
99 
100 	kfree(pdev);
101 }
102 
103 static void mock_retire_work_handler(struct work_struct *work)
104 {
105 }
106 
107 static void mock_idle_work_handler(struct work_struct *work)
108 {
109 }
110 
111 struct drm_i915_private *mock_gem_device(void)
112 {
113 	struct drm_i915_private *i915;
114 	struct intel_engine_cs *engine;
115 	enum intel_engine_id id;
116 	struct pci_dev *pdev;
117 	int err;
118 
119 	pdev = kzalloc(sizeof(*pdev) + sizeof(*i915), GFP_KERNEL);
120 	if (!pdev)
121 		goto err;
122 
123 	device_initialize(&pdev->dev);
124 	pdev->class = PCI_BASE_CLASS_DISPLAY << 16;
125 	pdev->dev.release = release_dev;
126 	dev_set_name(&pdev->dev, "mock");
127 	dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
128 
129 	pm_runtime_dont_use_autosuspend(&pdev->dev);
130 	pm_runtime_get_sync(&pdev->dev);
131 
132 	i915 = (struct drm_i915_private *)(pdev + 1);
133 	pci_set_drvdata(pdev, i915);
134 
135 	err = drm_dev_init(&i915->drm, &mock_driver, &pdev->dev);
136 	if (err) {
137 		pr_err("Failed to initialise mock GEM device: err=%d\n", err);
138 		goto put_device;
139 	}
140 	i915->drm.pdev = pdev;
141 	i915->drm.dev_private = i915;
142 
143 	/* Using the global GTT may ask questions about KMS users, so prepare */
144 	drm_mode_config_init(&i915->drm);
145 
146 	mkwrite_device_info(i915)->gen = -1;
147 
148 	spin_lock_init(&i915->mm.object_stat_lock);
149 	mock_uncore_init(i915);
150 
151 	init_waitqueue_head(&i915->gpu_error.wait_queue);
152 	init_waitqueue_head(&i915->gpu_error.reset_queue);
153 
154 	i915->wq = alloc_ordered_workqueue("mock", 0);
155 	if (!i915->wq)
156 		goto put_device;
157 
158 	INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
159 	init_llist_head(&i915->mm.free_list);
160 	INIT_LIST_HEAD(&i915->mm.unbound_list);
161 	INIT_LIST_HEAD(&i915->mm.bound_list);
162 
163 	ida_init(&i915->context_hw_ida);
164 
165 	INIT_DELAYED_WORK(&i915->gt.retire_work, mock_retire_work_handler);
166 	INIT_DELAYED_WORK(&i915->gt.idle_work, mock_idle_work_handler);
167 
168 	i915->gt.awake = true;
169 
170 	i915->objects = KMEM_CACHE(mock_object, SLAB_HWCACHE_ALIGN);
171 	if (!i915->objects)
172 		goto err_wq;
173 
174 	i915->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
175 	if (!i915->vmas)
176 		goto err_objects;
177 
178 	i915->requests = KMEM_CACHE(mock_request,
179 				    SLAB_HWCACHE_ALIGN |
180 				    SLAB_RECLAIM_ACCOUNT |
181 				    SLAB_TYPESAFE_BY_RCU);
182 	if (!i915->requests)
183 		goto err_vmas;
184 
185 	i915->dependencies = KMEM_CACHE(i915_dependency,
186 					SLAB_HWCACHE_ALIGN |
187 					SLAB_RECLAIM_ACCOUNT);
188 	if (!i915->dependencies)
189 		goto err_requests;
190 
191 	i915->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN);
192 	if (!i915->priorities)
193 		goto err_dependencies;
194 
195 	mutex_lock(&i915->drm.struct_mutex);
196 	INIT_LIST_HEAD(&i915->gt.timelines);
197 	err = i915_gem_timeline_init__global(i915);
198 	if (err) {
199 		mutex_unlock(&i915->drm.struct_mutex);
200 		goto err_priorities;
201 	}
202 
203 	mock_init_ggtt(i915);
204 	mutex_unlock(&i915->drm.struct_mutex);
205 
206 	mkwrite_device_info(i915)->ring_mask = BIT(0);
207 	i915->engine[RCS] = mock_engine(i915, "mock");
208 	if (!i915->engine[RCS])
209 		goto err_dependencies;
210 
211 	i915->kernel_context = mock_context(i915, NULL);
212 	if (!i915->kernel_context)
213 		goto err_engine;
214 
215 	return i915;
216 
217 err_engine:
218 	for_each_engine(engine, i915, id)
219 		mock_engine_free(engine);
220 err_priorities:
221 	kmem_cache_destroy(i915->priorities);
222 err_dependencies:
223 	kmem_cache_destroy(i915->dependencies);
224 err_requests:
225 	kmem_cache_destroy(i915->requests);
226 err_vmas:
227 	kmem_cache_destroy(i915->vmas);
228 err_objects:
229 	kmem_cache_destroy(i915->objects);
230 err_wq:
231 	destroy_workqueue(i915->wq);
232 put_device:
233 	put_device(&pdev->dev);
234 err:
235 	return NULL;
236 }
237