1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright (C) 2013-2017 Oracle Corporation 4 * This file is based on ast_ttm.c 5 * Copyright 2012 Red Hat Inc. 6 * Authors: Dave Airlie <airlied@redhat.com> 7 * Michael Thayer <michael.thayer@oracle.com> 8 */ 9 #include <linux/pci.h> 10 #include <drm/drm_file.h> 11 #include <drm/ttm/ttm_page_alloc.h> 12 #include "vbox_drv.h" 13 14 static inline struct vbox_private *vbox_bdev(struct ttm_bo_device *bd) 15 { 16 return container_of(bd, struct vbox_private, ttm.bdev); 17 } 18 19 static void vbox_bo_ttm_destroy(struct ttm_buffer_object *tbo) 20 { 21 struct vbox_bo *bo; 22 23 bo = container_of(tbo, struct vbox_bo, bo); 24 25 drm_gem_object_release(&bo->gem); 26 kfree(bo); 27 } 28 29 static bool vbox_ttm_bo_is_vbox_bo(struct ttm_buffer_object *bo) 30 { 31 if (bo->destroy == &vbox_bo_ttm_destroy) 32 return true; 33 34 return false; 35 } 36 37 static int 38 vbox_bo_init_mem_type(struct ttm_bo_device *bdev, u32 type, 39 struct ttm_mem_type_manager *man) 40 { 41 switch (type) { 42 case TTM_PL_SYSTEM: 43 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; 44 man->available_caching = TTM_PL_MASK_CACHING; 45 man->default_caching = TTM_PL_FLAG_CACHED; 46 break; 47 case TTM_PL_VRAM: 48 man->func = &ttm_bo_manager_func; 49 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; 50 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; 51 man->default_caching = TTM_PL_FLAG_WC; 52 break; 53 default: 54 DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type); 55 return -EINVAL; 56 } 57 58 return 0; 59 } 60 61 static void 62 vbox_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) 63 { 64 struct vbox_bo *vboxbo = vbox_bo(bo); 65 66 if (!vbox_ttm_bo_is_vbox_bo(bo)) 67 return; 68 69 vbox_ttm_placement(vboxbo, TTM_PL_FLAG_SYSTEM); 70 *pl = vboxbo->placement; 71 } 72 73 static int vbox_bo_verify_access(struct ttm_buffer_object *bo, 74 struct file *filp) 75 { 76 return 0; 77 } 78 79 static int vbox_ttm_io_mem_reserve(struct ttm_bo_device *bdev, 80 struct ttm_mem_reg *mem) 81 { 82 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 83 struct vbox_private *vbox = vbox_bdev(bdev); 84 85 mem->bus.addr = NULL; 86 mem->bus.offset = 0; 87 mem->bus.size = mem->num_pages << PAGE_SHIFT; 88 mem->bus.base = 0; 89 mem->bus.is_iomem = false; 90 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) 91 return -EINVAL; 92 switch (mem->mem_type) { 93 case TTM_PL_SYSTEM: 94 /* system memory */ 95 return 0; 96 case TTM_PL_VRAM: 97 mem->bus.offset = mem->start << PAGE_SHIFT; 98 mem->bus.base = pci_resource_start(vbox->ddev.pdev, 0); 99 mem->bus.is_iomem = true; 100 break; 101 default: 102 return -EINVAL; 103 } 104 return 0; 105 } 106 107 static void vbox_ttm_io_mem_free(struct ttm_bo_device *bdev, 108 struct ttm_mem_reg *mem) 109 { 110 } 111 112 static void vbox_ttm_backend_destroy(struct ttm_tt *tt) 113 { 114 ttm_tt_fini(tt); 115 kfree(tt); 116 } 117 118 static struct ttm_backend_func vbox_tt_backend_func = { 119 .destroy = &vbox_ttm_backend_destroy, 120 }; 121 122 static struct ttm_tt *vbox_ttm_tt_create(struct ttm_buffer_object *bo, 123 u32 page_flags) 124 { 125 struct ttm_tt *tt; 126 127 tt = kzalloc(sizeof(*tt), GFP_KERNEL); 128 if (!tt) 129 return NULL; 130 131 tt->func = &vbox_tt_backend_func; 132 if (ttm_tt_init(tt, bo, page_flags)) { 133 kfree(tt); 134 return NULL; 135 } 136 137 return tt; 138 } 139 140 static struct ttm_bo_driver vbox_bo_driver = { 141 .ttm_tt_create = vbox_ttm_tt_create, 142 .init_mem_type = vbox_bo_init_mem_type, 143 .eviction_valuable = ttm_bo_eviction_valuable, 144 .evict_flags = vbox_bo_evict_flags, 145 .verify_access = vbox_bo_verify_access, 146 .io_mem_reserve = &vbox_ttm_io_mem_reserve, 147 .io_mem_free = &vbox_ttm_io_mem_free, 148 }; 149 150 int vbox_mm_init(struct vbox_private *vbox) 151 { 152 int ret; 153 struct drm_device *dev = &vbox->ddev; 154 struct ttm_bo_device *bdev = &vbox->ttm.bdev; 155 156 ret = ttm_bo_device_init(&vbox->ttm.bdev, 157 &vbox_bo_driver, 158 dev->anon_inode->i_mapping, 159 true); 160 if (ret) { 161 DRM_ERROR("Error initialising bo driver; %d\n", ret); 162 return ret; 163 } 164 165 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM, 166 vbox->available_vram_size >> PAGE_SHIFT); 167 if (ret) { 168 DRM_ERROR("Failed ttm VRAM init: %d\n", ret); 169 goto err_device_release; 170 } 171 172 #ifdef DRM_MTRR_WC 173 vbox->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0), 174 pci_resource_len(dev->pdev, 0), 175 DRM_MTRR_WC); 176 #else 177 vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0), 178 pci_resource_len(dev->pdev, 0)); 179 #endif 180 return 0; 181 182 err_device_release: 183 ttm_bo_device_release(&vbox->ttm.bdev); 184 return ret; 185 } 186 187 void vbox_mm_fini(struct vbox_private *vbox) 188 { 189 #ifdef DRM_MTRR_WC 190 drm_mtrr_del(vbox->fb_mtrr, 191 pci_resource_start(vbox->ddev.pdev, 0), 192 pci_resource_len(vbox->ddev.pdev, 0), DRM_MTRR_WC); 193 #else 194 arch_phys_wc_del(vbox->fb_mtrr); 195 #endif 196 ttm_bo_device_release(&vbox->ttm.bdev); 197 } 198 199 void vbox_ttm_placement(struct vbox_bo *bo, int domain) 200 { 201 unsigned int i; 202 u32 c = 0; 203 204 bo->placement.placement = bo->placements; 205 bo->placement.busy_placement = bo->placements; 206 207 if (domain & TTM_PL_FLAG_VRAM) 208 bo->placements[c++].flags = 209 TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; 210 if (domain & TTM_PL_FLAG_SYSTEM) 211 bo->placements[c++].flags = 212 TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 213 if (!c) 214 bo->placements[c++].flags = 215 TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 216 217 bo->placement.num_placement = c; 218 bo->placement.num_busy_placement = c; 219 220 for (i = 0; i < c; ++i) { 221 bo->placements[i].fpfn = 0; 222 bo->placements[i].lpfn = 0; 223 } 224 } 225 226 int vbox_bo_create(struct vbox_private *vbox, int size, int align, 227 u32 flags, struct vbox_bo **pvboxbo) 228 { 229 struct vbox_bo *vboxbo; 230 size_t acc_size; 231 int ret; 232 233 vboxbo = kzalloc(sizeof(*vboxbo), GFP_KERNEL); 234 if (!vboxbo) 235 return -ENOMEM; 236 237 ret = drm_gem_object_init(&vbox->ddev, &vboxbo->gem, size); 238 if (ret) 239 goto err_free_vboxbo; 240 241 vboxbo->bo.bdev = &vbox->ttm.bdev; 242 243 vbox_ttm_placement(vboxbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); 244 245 acc_size = ttm_bo_dma_acc_size(&vbox->ttm.bdev, size, 246 sizeof(struct vbox_bo)); 247 248 ret = ttm_bo_init(&vbox->ttm.bdev, &vboxbo->bo, size, 249 ttm_bo_type_device, &vboxbo->placement, 250 align >> PAGE_SHIFT, false, acc_size, 251 NULL, NULL, vbox_bo_ttm_destroy); 252 if (ret) 253 goto err_free_vboxbo; 254 255 *pvboxbo = vboxbo; 256 257 return 0; 258 259 err_free_vboxbo: 260 kfree(vboxbo); 261 return ret; 262 } 263 264 int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag) 265 { 266 struct ttm_operation_ctx ctx = { false, false }; 267 int i, ret; 268 269 if (bo->pin_count) { 270 bo->pin_count++; 271 return 0; 272 } 273 274 ret = vbox_bo_reserve(bo, false); 275 if (ret) 276 return ret; 277 278 vbox_ttm_placement(bo, pl_flag); 279 280 for (i = 0; i < bo->placement.num_placement; i++) 281 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; 282 283 ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx); 284 if (ret == 0) 285 bo->pin_count = 1; 286 287 vbox_bo_unreserve(bo); 288 289 return ret; 290 } 291 292 int vbox_bo_unpin(struct vbox_bo *bo) 293 { 294 struct ttm_operation_ctx ctx = { false, false }; 295 int i, ret; 296 297 if (!bo->pin_count) { 298 DRM_ERROR("unpin bad %p\n", bo); 299 return 0; 300 } 301 bo->pin_count--; 302 if (bo->pin_count) 303 return 0; 304 305 ret = vbox_bo_reserve(bo, false); 306 if (ret) { 307 DRM_ERROR("Error %d reserving bo, leaving it pinned\n", ret); 308 return ret; 309 } 310 311 for (i = 0; i < bo->placement.num_placement; i++) 312 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; 313 314 ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx); 315 316 vbox_bo_unreserve(bo); 317 318 return ret; 319 } 320 321 /* 322 * Move a vbox-owned buffer object to system memory if no one else has it 323 * pinned. The caller must have pinned it previously, and this call will 324 * release the caller's pin. 325 */ 326 int vbox_bo_push_sysram(struct vbox_bo *bo) 327 { 328 struct ttm_operation_ctx ctx = { false, false }; 329 int i, ret; 330 331 if (!bo->pin_count) { 332 DRM_ERROR("unpin bad %p\n", bo); 333 return 0; 334 } 335 bo->pin_count--; 336 if (bo->pin_count) 337 return 0; 338 339 if (bo->kmap.virtual) { 340 ttm_bo_kunmap(&bo->kmap); 341 bo->kmap.virtual = NULL; 342 } 343 344 vbox_ttm_placement(bo, TTM_PL_FLAG_SYSTEM); 345 346 for (i = 0; i < bo->placement.num_placement; i++) 347 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; 348 349 ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx); 350 if (ret) { 351 DRM_ERROR("pushing to VRAM failed\n"); 352 return ret; 353 } 354 355 return 0; 356 } 357 358 int vbox_mmap(struct file *filp, struct vm_area_struct *vma) 359 { 360 struct drm_file *file_priv = filp->private_data; 361 struct vbox_private *vbox = file_priv->minor->dev->dev_private; 362 363 return ttm_bo_mmap(filp, vma, &vbox->ttm.bdev); 364 } 365 366 void *vbox_bo_kmap(struct vbox_bo *bo) 367 { 368 int ret; 369 370 if (bo->kmap.virtual) 371 return bo->kmap.virtual; 372 373 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); 374 if (ret) { 375 DRM_ERROR("Error kmapping bo: %d\n", ret); 376 return NULL; 377 } 378 379 return bo->kmap.virtual; 380 } 381 382 void vbox_bo_kunmap(struct vbox_bo *bo) 383 { 384 if (bo->kmap.virtual) { 385 ttm_bo_kunmap(&bo->kmap); 386 bo->kmap.virtual = NULL; 387 } 388 } 389