1 /* 2 * Copyright © 2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #include <linux/pm_domain.h> 26 #include <linux/pm_runtime.h> 27 28 #include "mock_engine.h" 29 #include "mock_context.h" 30 #include "mock_request.h" 31 #include "mock_gem_device.h" 32 #include "mock_gem_object.h" 33 #include "mock_gtt.h" 34 #include "mock_uncore.h" 35 36 void mock_device_flush(struct drm_i915_private *i915) 37 { 38 struct intel_engine_cs *engine; 39 enum intel_engine_id id; 40 41 lockdep_assert_held(&i915->drm.struct_mutex); 42 43 for_each_engine(engine, i915, id) 44 mock_engine_flush(engine); 45 46 i915_retire_requests(i915); 47 GEM_BUG_ON(i915->gt.active_requests); 48 } 49 50 static void mock_device_release(struct drm_device *dev) 51 { 52 struct drm_i915_private *i915 = to_i915(dev); 53 struct intel_engine_cs *engine; 54 enum intel_engine_id id; 55 56 mutex_lock(&i915->drm.struct_mutex); 57 mock_device_flush(i915); 58 i915_gem_contexts_lost(i915); 59 mutex_unlock(&i915->drm.struct_mutex); 60 61 drain_delayed_work(&i915->gt.retire_work); 62 drain_delayed_work(&i915->gt.idle_work); 63 i915_gem_drain_workqueue(i915); 64 65 mutex_lock(&i915->drm.struct_mutex); 66 for_each_engine(engine, i915, id) 67 mock_engine_free(engine); 68 i915_gem_contexts_fini(i915); 69 mutex_unlock(&i915->drm.struct_mutex); 70 71 i915_timelines_fini(i915); 72 73 drain_workqueue(i915->wq); 74 i915_gem_drain_freed_objects(i915); 75 76 mutex_lock(&i915->drm.struct_mutex); 77 mock_fini_ggtt(&i915->ggtt); 78 mutex_unlock(&i915->drm.struct_mutex); 79 80 destroy_workqueue(i915->wq); 81 82 kmem_cache_destroy(i915->priorities); 83 kmem_cache_destroy(i915->dependencies); 84 kmem_cache_destroy(i915->requests); 85 kmem_cache_destroy(i915->vmas); 86 kmem_cache_destroy(i915->objects); 87 88 i915_gemfs_fini(i915); 89 90 drm_mode_config_cleanup(&i915->drm); 91 92 drm_dev_fini(&i915->drm); 93 put_device(&i915->drm.pdev->dev); 94 } 95 96 static struct drm_driver mock_driver = { 97 .name = "mock", 98 .driver_features = DRIVER_GEM, 99 .release = mock_device_release, 100 101 .gem_close_object = i915_gem_close_object, 102 .gem_free_object_unlocked = i915_gem_free_object, 103 }; 104 105 static void release_dev(struct device *dev) 106 { 107 struct pci_dev *pdev = to_pci_dev(dev); 108 109 kfree(pdev); 110 } 111 112 static void mock_retire_work_handler(struct work_struct *work) 113 { 114 } 115 116 static void mock_idle_work_handler(struct work_struct *work) 117 { 118 } 119 120 static int pm_domain_resume(struct device *dev) 121 { 122 return pm_generic_runtime_resume(dev); 123 } 124 125 static int pm_domain_suspend(struct device *dev) 126 { 127 return pm_generic_runtime_suspend(dev); 128 } 129 130 static struct dev_pm_domain pm_domain = { 131 .ops = { 132 .runtime_suspend = pm_domain_suspend, 133 .runtime_resume = pm_domain_resume, 134 }, 135 }; 136 137 struct drm_i915_private *mock_gem_device(void) 138 { 139 struct drm_i915_private *i915; 140 struct pci_dev *pdev; 141 int err; 142 143 pdev = kzalloc(sizeof(*pdev) + sizeof(*i915), GFP_KERNEL); 144 if (!pdev) 145 goto err; 146 147 device_initialize(&pdev->dev); 148 pdev->class = PCI_BASE_CLASS_DISPLAY << 16; 149 pdev->dev.release = release_dev; 150 dev_set_name(&pdev->dev, "mock"); 151 dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 152 153 #if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU) 154 /* hack to disable iommu for the fake device; force identity mapping */ 155 pdev->dev.archdata.iommu = (void *)-1; 156 #endif 157 158 i915 = (struct drm_i915_private *)(pdev + 1); 159 pci_set_drvdata(pdev, i915); 160 161 intel_runtime_pm_init_early(i915); 162 163 dev_pm_domain_set(&pdev->dev, &pm_domain); 164 pm_runtime_enable(&pdev->dev); 165 pm_runtime_dont_use_autosuspend(&pdev->dev); 166 if (pm_runtime_enabled(&pdev->dev)) 167 WARN_ON(pm_runtime_get_sync(&pdev->dev)); 168 169 err = drm_dev_init(&i915->drm, &mock_driver, &pdev->dev); 170 if (err) { 171 pr_err("Failed to initialise mock GEM device: err=%d\n", err); 172 goto put_device; 173 } 174 i915->drm.pdev = pdev; 175 i915->drm.dev_private = i915; 176 177 /* Using the global GTT may ask questions about KMS users, so prepare */ 178 drm_mode_config_init(&i915->drm); 179 180 mkwrite_device_info(i915)->gen = -1; 181 182 mkwrite_device_info(i915)->page_sizes = 183 I915_GTT_PAGE_SIZE_4K | 184 I915_GTT_PAGE_SIZE_64K | 185 I915_GTT_PAGE_SIZE_2M; 186 187 mock_uncore_init(i915); 188 i915_gem_init__mm(i915); 189 190 init_waitqueue_head(&i915->gpu_error.wait_queue); 191 init_waitqueue_head(&i915->gpu_error.reset_queue); 192 mutex_init(&i915->gpu_error.wedge_mutex); 193 194 i915->wq = alloc_ordered_workqueue("mock", 0); 195 if (!i915->wq) 196 goto err_drv; 197 198 mock_init_contexts(i915); 199 200 INIT_DELAYED_WORK(&i915->gt.retire_work, mock_retire_work_handler); 201 INIT_DELAYED_WORK(&i915->gt.idle_work, mock_idle_work_handler); 202 203 i915->gt.awake = true; 204 205 i915->objects = KMEM_CACHE(mock_object, SLAB_HWCACHE_ALIGN); 206 if (!i915->objects) 207 goto err_wq; 208 209 i915->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN); 210 if (!i915->vmas) 211 goto err_objects; 212 213 i915->requests = KMEM_CACHE(mock_request, 214 SLAB_HWCACHE_ALIGN | 215 SLAB_RECLAIM_ACCOUNT | 216 SLAB_TYPESAFE_BY_RCU); 217 if (!i915->requests) 218 goto err_vmas; 219 220 i915->dependencies = KMEM_CACHE(i915_dependency, 221 SLAB_HWCACHE_ALIGN | 222 SLAB_RECLAIM_ACCOUNT); 223 if (!i915->dependencies) 224 goto err_requests; 225 226 i915->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN); 227 if (!i915->priorities) 228 goto err_dependencies; 229 230 i915_timelines_init(i915); 231 232 INIT_LIST_HEAD(&i915->gt.active_rings); 233 INIT_LIST_HEAD(&i915->gt.closed_vma); 234 235 mutex_lock(&i915->drm.struct_mutex); 236 237 mock_init_ggtt(i915, &i915->ggtt); 238 239 mkwrite_device_info(i915)->ring_mask = BIT(0); 240 i915->kernel_context = mock_context(i915, NULL); 241 if (!i915->kernel_context) 242 goto err_unlock; 243 244 i915->engine[RCS] = mock_engine(i915, "mock", RCS); 245 if (!i915->engine[RCS]) 246 goto err_context; 247 248 mutex_unlock(&i915->drm.struct_mutex); 249 250 WARN_ON(i915_gemfs_init(i915)); 251 252 return i915; 253 254 err_context: 255 i915_gem_contexts_fini(i915); 256 err_unlock: 257 mutex_unlock(&i915->drm.struct_mutex); 258 i915_timelines_fini(i915); 259 kmem_cache_destroy(i915->priorities); 260 err_dependencies: 261 kmem_cache_destroy(i915->dependencies); 262 err_requests: 263 kmem_cache_destroy(i915->requests); 264 err_vmas: 265 kmem_cache_destroy(i915->vmas); 266 err_objects: 267 kmem_cache_destroy(i915->objects); 268 err_wq: 269 destroy_workqueue(i915->wq); 270 err_drv: 271 drm_mode_config_cleanup(&i915->drm); 272 drm_dev_fini(&i915->drm); 273 put_device: 274 put_device(&pdev->dev); 275 err: 276 return NULL; 277 } 278