1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/pm_domain.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/iommu.h>
28 
29 #include <drm/drm_managed.h>
30 
31 #include "gt/intel_gt.h"
32 #include "gt/intel_gt_requests.h"
33 #include "gt/mock_engine.h"
34 #include "intel_memory_region.h"
35 #include "intel_region_ttm.h"
36 
37 #include "mock_request.h"
38 #include "mock_gem_device.h"
39 #include "mock_gtt.h"
40 #include "mock_uncore.h"
41 #include "mock_region.h"
42 
43 #include "gem/selftests/mock_context.h"
44 #include "gem/selftests/mock_gem_object.h"
45 
46 void mock_device_flush(struct drm_i915_private *i915)
47 {
48 	struct intel_gt *gt = to_gt(i915);
49 	struct intel_engine_cs *engine;
50 	enum intel_engine_id id;
51 
52 	do {
53 		for_each_engine(engine, gt, id)
54 			mock_engine_flush(engine);
55 	} while (intel_gt_retire_requests_timeout(gt, MAX_SCHEDULE_TIMEOUT,
56 						  NULL));
57 }
58 
59 static void mock_device_release(struct drm_device *dev)
60 {
61 	struct drm_i915_private *i915 = to_i915(dev);
62 
63 	if (!i915->do_release)
64 		goto out;
65 
66 	mock_device_flush(i915);
67 	intel_gt_driver_remove(to_gt(i915));
68 
69 	i915_gem_drain_workqueue(i915);
70 
71 	mock_fini_ggtt(to_gt(i915)->ggtt);
72 	destroy_workqueue(i915->unordered_wq);
73 	destroy_workqueue(i915->wq);
74 
75 	intel_region_ttm_device_fini(i915);
76 	intel_gt_driver_late_release_all(i915);
77 	intel_memory_regions_driver_release(i915);
78 
79 	drm_mode_config_cleanup(&i915->drm);
80 
81 out:
82 	i915_params_free(&i915->params);
83 }
84 
85 static const struct drm_driver mock_driver = {
86 	.name = "mock",
87 	.driver_features = DRIVER_GEM,
88 	.release = mock_device_release,
89 };
90 
91 static void release_dev(struct device *dev)
92 {
93 	struct pci_dev *pdev = to_pci_dev(dev);
94 
95 	kfree(pdev);
96 }
97 
98 static int pm_domain_resume(struct device *dev)
99 {
100 	return pm_generic_runtime_resume(dev);
101 }
102 
103 static int pm_domain_suspend(struct device *dev)
104 {
105 	return pm_generic_runtime_suspend(dev);
106 }
107 
108 static struct dev_pm_domain pm_domain = {
109 	.ops = {
110 		.runtime_suspend = pm_domain_suspend,
111 		.runtime_resume = pm_domain_resume,
112 	},
113 };
114 
115 static void mock_gt_probe(struct drm_i915_private *i915)
116 {
117 	i915->gt[0] = to_gt(i915);
118 	i915->gt[0]->name = "Mock GT";
119 }
120 
121 static const struct intel_device_info mock_info = {
122 	.__runtime.graphics.ip.ver = -1,
123 	.__runtime.page_sizes = (I915_GTT_PAGE_SIZE_4K |
124 				 I915_GTT_PAGE_SIZE_64K |
125 				 I915_GTT_PAGE_SIZE_2M),
126 	.memory_regions = REGION_SMEM,
127 	.platform_engine_mask = BIT(0),
128 
129 	/* simply use legacy cache level for mock device */
130 	.max_pat_index = 3,
131 	.cachelevel_to_pat = {
132 		[I915_CACHE_NONE]   = 0,
133 		[I915_CACHE_LLC]    = 1,
134 		[I915_CACHE_L3_LLC] = 2,
135 		[I915_CACHE_WT]     = 3,
136 	},
137 };
138 
139 struct drm_i915_private *mock_gem_device(void)
140 {
141 #if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
142 	static struct dev_iommu fake_iommu = { .priv = (void *)-1 };
143 #endif
144 	struct drm_i915_private *i915;
145 	struct pci_dev *pdev;
146 	int ret;
147 
148 	pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
149 	if (!pdev)
150 		return NULL;
151 	device_initialize(&pdev->dev);
152 	pdev->class = PCI_BASE_CLASS_DISPLAY << 16;
153 	pdev->dev.release = release_dev;
154 	dev_set_name(&pdev->dev, "mock");
155 	dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
156 
157 #if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
158 	/* HACK to disable iommu for the fake device; force identity mapping */
159 	pdev->dev.iommu = &fake_iommu;
160 #endif
161 	if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
162 		put_device(&pdev->dev);
163 		return NULL;
164 	}
165 
166 	i915 = devm_drm_dev_alloc(&pdev->dev, &mock_driver,
167 				  struct drm_i915_private, drm);
168 	if (IS_ERR(i915)) {
169 		pr_err("Failed to allocate mock GEM device: err=%ld\n", PTR_ERR(i915));
170 		devres_release_group(&pdev->dev, NULL);
171 		put_device(&pdev->dev);
172 
173 		return NULL;
174 	}
175 
176 	pci_set_drvdata(pdev, i915);
177 
178 	/* Device parameters start as a copy of module parameters. */
179 	i915_params_copy(&i915->params, &i915_modparams);
180 
181 	/* Set up device info and initial runtime info. */
182 	intel_device_info_driver_create(i915, pdev->device, &mock_info);
183 
184 	dev_pm_domain_set(&pdev->dev, &pm_domain);
185 	pm_runtime_enable(&pdev->dev);
186 	pm_runtime_dont_use_autosuspend(&pdev->dev);
187 	if (pm_runtime_enabled(&pdev->dev))
188 		WARN_ON(pm_runtime_get_sync(&pdev->dev));
189 
190 	intel_runtime_pm_init_early(&i915->runtime_pm);
191 	/* wakeref tracking has significant overhead */
192 	i915->runtime_pm.no_wakeref_tracking = true;
193 
194 	/* Using the global GTT may ask questions about KMS users, so prepare */
195 	drm_mode_config_init(&i915->drm);
196 
197 	intel_memory_regions_hw_probe(i915);
198 
199 	spin_lock_init(&i915->gpu_error.lock);
200 
201 	i915_gem_init__mm(i915);
202 	intel_root_gt_init_early(i915);
203 	mock_uncore_init(&i915->uncore, i915);
204 	atomic_inc(&to_gt(i915)->wakeref.count); /* disable; no hw support */
205 	to_gt(i915)->awake = -ENODEV;
206 	mock_gt_probe(i915);
207 
208 	ret = intel_region_ttm_device_init(i915);
209 	if (ret)
210 		goto err_ttm;
211 
212 	i915->wq = alloc_ordered_workqueue("mock", 0);
213 	if (!i915->wq)
214 		goto err_drv;
215 
216 	i915->unordered_wq = alloc_workqueue("mock-unordered", 0, 0);
217 	if (!i915->unordered_wq)
218 		goto err_wq;
219 
220 	mock_init_contexts(i915);
221 
222 	/* allocate the ggtt */
223 	ret = intel_gt_assign_ggtt(to_gt(i915));
224 	if (ret)
225 		goto err_unlock;
226 
227 	mock_init_ggtt(to_gt(i915));
228 	to_gt(i915)->vm = i915_vm_get(&to_gt(i915)->ggtt->vm);
229 
230 	to_gt(i915)->info.engine_mask = BIT(0);
231 
232 	to_gt(i915)->engine[RCS0] = mock_engine(i915, "mock", RCS0);
233 	if (!to_gt(i915)->engine[RCS0])
234 		goto err_unlock;
235 
236 	if (mock_engine_init(to_gt(i915)->engine[RCS0]))
237 		goto err_context;
238 
239 	__clear_bit(I915_WEDGED, &to_gt(i915)->reset.flags);
240 	intel_engines_driver_register(i915);
241 
242 	i915->do_release = true;
243 	ida_init(&i915->selftest.mock_region_instances);
244 
245 	return i915;
246 
247 err_context:
248 	intel_gt_driver_remove(to_gt(i915));
249 err_unlock:
250 	destroy_workqueue(i915->unordered_wq);
251 err_wq:
252 	destroy_workqueue(i915->wq);
253 err_drv:
254 	intel_region_ttm_device_fini(i915);
255 err_ttm:
256 	intel_gt_driver_late_release_all(i915);
257 	intel_memory_regions_driver_release(i915);
258 	drm_mode_config_cleanup(&i915->drm);
259 	mock_destroy_device(i915);
260 
261 	return NULL;
262 }
263 
264 void mock_destroy_device(struct drm_i915_private *i915)
265 {
266 	struct device *dev = i915->drm.dev;
267 
268 	devres_release_group(dev, NULL);
269 	put_device(dev);
270 }
271