1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/pm_domain.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/iommu.h>
28 
29 #include <drm/drm_managed.h>
30 
31 #include "gt/intel_gt.h"
32 #include "gt/intel_gt_requests.h"
33 #include "gt/mock_engine.h"
34 #include "intel_memory_region.h"
35 #include "intel_region_ttm.h"
36 
37 #include "mock_request.h"
38 #include "mock_gem_device.h"
39 #include "mock_gtt.h"
40 #include "mock_uncore.h"
41 #include "mock_region.h"
42 
43 #include "gem/selftests/mock_context.h"
44 #include "gem/selftests/mock_gem_object.h"
45 
46 void mock_device_flush(struct drm_i915_private *i915)
47 {
48 	struct intel_gt *gt = to_gt(i915);
49 	struct intel_engine_cs *engine;
50 	enum intel_engine_id id;
51 
52 	do {
53 		for_each_engine(engine, gt, id)
54 			mock_engine_flush(engine);
55 	} while (intel_gt_retire_requests_timeout(gt, MAX_SCHEDULE_TIMEOUT,
56 						  NULL));
57 }
58 
59 static void mock_device_release(struct drm_device *dev)
60 {
61 	struct drm_i915_private *i915 = to_i915(dev);
62 
63 	if (!i915->do_release)
64 		goto out;
65 
66 	mock_device_flush(i915);
67 	intel_gt_driver_remove(to_gt(i915));
68 
69 	i915_gem_drain_workqueue(i915);
70 	i915_gem_drain_freed_objects(i915);
71 
72 	mock_fini_ggtt(to_gt(i915)->ggtt);
73 	destroy_workqueue(i915->wq);
74 
75 	intel_region_ttm_device_fini(i915);
76 	intel_gt_driver_late_release_all(i915);
77 	intel_memory_regions_driver_release(i915);
78 
79 	drm_mode_config_cleanup(&i915->drm);
80 
81 out:
82 	i915_params_free(&i915->params);
83 }
84 
85 static const struct drm_driver mock_driver = {
86 	.name = "mock",
87 	.driver_features = DRIVER_GEM,
88 	.release = mock_device_release,
89 };
90 
91 static void release_dev(struct device *dev)
92 {
93 	struct pci_dev *pdev = to_pci_dev(dev);
94 
95 	kfree(pdev);
96 }
97 
98 static int pm_domain_resume(struct device *dev)
99 {
100 	return pm_generic_runtime_resume(dev);
101 }
102 
103 static int pm_domain_suspend(struct device *dev)
104 {
105 	return pm_generic_runtime_suspend(dev);
106 }
107 
108 static struct dev_pm_domain pm_domain = {
109 	.ops = {
110 		.runtime_suspend = pm_domain_suspend,
111 		.runtime_resume = pm_domain_resume,
112 	},
113 };
114 
115 static void mock_gt_probe(struct drm_i915_private *i915)
116 {
117 	i915->gt[0] = &i915->gt0;
118 }
119 
120 struct drm_i915_private *mock_gem_device(void)
121 {
122 #if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
123 	static struct dev_iommu fake_iommu = { .priv = (void *)-1 };
124 #endif
125 	struct drm_i915_private *i915;
126 	struct pci_dev *pdev;
127 	int ret;
128 
129 	pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
130 	if (!pdev)
131 		return NULL;
132 	device_initialize(&pdev->dev);
133 	pdev->class = PCI_BASE_CLASS_DISPLAY << 16;
134 	pdev->dev.release = release_dev;
135 	dev_set_name(&pdev->dev, "mock");
136 	dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
137 
138 #if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
139 	/* HACK to disable iommu for the fake device; force identity mapping */
140 	pdev->dev.iommu = &fake_iommu;
141 #endif
142 	if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
143 		put_device(&pdev->dev);
144 		return NULL;
145 	}
146 
147 	i915 = devm_drm_dev_alloc(&pdev->dev, &mock_driver,
148 				  struct drm_i915_private, drm);
149 	if (IS_ERR(i915)) {
150 		pr_err("Failed to allocate mock GEM device: err=%ld\n", PTR_ERR(i915));
151 		devres_release_group(&pdev->dev, NULL);
152 		put_device(&pdev->dev);
153 
154 		return NULL;
155 	}
156 
157 	pci_set_drvdata(pdev, i915);
158 
159 	dev_pm_domain_set(&pdev->dev, &pm_domain);
160 	pm_runtime_enable(&pdev->dev);
161 	pm_runtime_dont_use_autosuspend(&pdev->dev);
162 	if (pm_runtime_enabled(&pdev->dev))
163 		WARN_ON(pm_runtime_get_sync(&pdev->dev));
164 
165 
166 	i915_params_copy(&i915->params, &i915_modparams);
167 
168 	intel_runtime_pm_init_early(&i915->runtime_pm);
169 	/* wakeref tracking has significant overhead */
170 	i915->runtime_pm.no_wakeref_tracking = true;
171 
172 	/* Using the global GTT may ask questions about KMS users, so prepare */
173 	drm_mode_config_init(&i915->drm);
174 
175 	mkwrite_device_info(i915)->graphics.ver = -1;
176 
177 	mkwrite_device_info(i915)->page_sizes =
178 		I915_GTT_PAGE_SIZE_4K |
179 		I915_GTT_PAGE_SIZE_64K |
180 		I915_GTT_PAGE_SIZE_2M;
181 
182 	mkwrite_device_info(i915)->memory_regions = REGION_SMEM;
183 	intel_memory_regions_hw_probe(i915);
184 
185 	spin_lock_init(&i915->gpu_error.lock);
186 
187 	i915_gem_init__mm(i915);
188 	intel_root_gt_init_early(i915);
189 	mock_uncore_init(&i915->uncore, i915);
190 	atomic_inc(&to_gt(i915)->wakeref.count); /* disable; no hw support */
191 	to_gt(i915)->awake = -ENODEV;
192 	mock_gt_probe(i915);
193 
194 	ret = intel_region_ttm_device_init(i915);
195 	if (ret)
196 		goto err_ttm;
197 
198 	i915->wq = alloc_ordered_workqueue("mock", 0);
199 	if (!i915->wq)
200 		goto err_drv;
201 
202 	mock_init_contexts(i915);
203 
204 	/* allocate the ggtt */
205 	ret = intel_gt_assign_ggtt(to_gt(i915));
206 	if (ret)
207 		goto err_unlock;
208 
209 	mock_init_ggtt(to_gt(i915));
210 	to_gt(i915)->vm = i915_vm_get(&to_gt(i915)->ggtt->vm);
211 
212 	mkwrite_device_info(i915)->platform_engine_mask = BIT(0);
213 	to_gt(i915)->info.engine_mask = BIT(0);
214 
215 	to_gt(i915)->engine[RCS0] = mock_engine(i915, "mock", RCS0);
216 	if (!to_gt(i915)->engine[RCS0])
217 		goto err_unlock;
218 
219 	if (mock_engine_init(to_gt(i915)->engine[RCS0]))
220 		goto err_context;
221 
222 	__clear_bit(I915_WEDGED, &to_gt(i915)->reset.flags);
223 	intel_engines_driver_register(i915);
224 
225 	i915->do_release = true;
226 	ida_init(&i915->selftest.mock_region_instances);
227 
228 	return i915;
229 
230 err_context:
231 	intel_gt_driver_remove(to_gt(i915));
232 err_unlock:
233 	destroy_workqueue(i915->wq);
234 err_drv:
235 	intel_region_ttm_device_fini(i915);
236 err_ttm:
237 	intel_gt_driver_late_release_all(i915);
238 	intel_memory_regions_driver_release(i915);
239 	drm_mode_config_cleanup(&i915->drm);
240 	mock_destroy_device(i915);
241 
242 	return NULL;
243 }
244 
245 void mock_destroy_device(struct drm_i915_private *i915)
246 {
247 	struct device *dev = i915->drm.dev;
248 
249 	devres_release_group(dev, NULL);
250 	put_device(dev);
251 }
252