1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd 4 * Author:Mark Yao <mark.yao@rock-chips.com> 5 */ 6 7 #include <linux/dma-buf.h> 8 #include <linux/iommu.h> 9 #include <linux/vmalloc.h> 10 11 #include <drm/drm.h> 12 #include <drm/drm_gem.h> 13 #include <drm/drm_prime.h> 14 #include <drm/drm_vma_manager.h> 15 16 #include "rockchip_drm_drv.h" 17 #include "rockchip_drm_gem.h" 18 19 static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj) 20 { 21 struct drm_device *drm = rk_obj->base.dev; 22 struct rockchip_drm_private *private = drm->dev_private; 23 int prot = IOMMU_READ | IOMMU_WRITE; 24 ssize_t ret; 25 26 mutex_lock(&private->mm_lock); 27 ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm, 28 rk_obj->base.size, PAGE_SIZE, 29 0, 0); 30 mutex_unlock(&private->mm_lock); 31 32 if (ret < 0) { 33 DRM_ERROR("out of I/O virtual memory: %zd\n", ret); 34 return ret; 35 } 36 37 rk_obj->dma_addr = rk_obj->mm.start; 38 39 ret = iommu_map_sgtable(private->domain, rk_obj->dma_addr, rk_obj->sgt, 40 prot); 41 if (ret < rk_obj->base.size) { 42 DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n", 43 ret, rk_obj->base.size); 44 ret = -ENOMEM; 45 goto err_remove_node; 46 } 47 48 rk_obj->size = ret; 49 50 return 0; 51 52 err_remove_node: 53 mutex_lock(&private->mm_lock); 54 drm_mm_remove_node(&rk_obj->mm); 55 mutex_unlock(&private->mm_lock); 56 57 return ret; 58 } 59 60 static int rockchip_gem_iommu_unmap(struct rockchip_gem_object *rk_obj) 61 { 62 struct drm_device *drm = rk_obj->base.dev; 63 struct rockchip_drm_private *private = drm->dev_private; 64 65 iommu_unmap(private->domain, rk_obj->dma_addr, rk_obj->size); 66 67 mutex_lock(&private->mm_lock); 68 69 drm_mm_remove_node(&rk_obj->mm); 70 71 mutex_unlock(&private->mm_lock); 72 73 return 0; 74 } 75 76 static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj) 77 { 78 struct drm_device *drm = rk_obj->base.dev; 79 int ret, i; 80 struct scatterlist *s; 81 82 rk_obj->pages = drm_gem_get_pages(&rk_obj->base); 83 if (IS_ERR(rk_obj->pages)) 84 return PTR_ERR(rk_obj->pages); 85 86 rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT; 87 88 rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages); 89 if (IS_ERR(rk_obj->sgt)) { 90 ret = PTR_ERR(rk_obj->sgt); 91 goto err_put_pages; 92 } 93 94 /* 95 * Fake up the SG table so that dma_sync_sg_for_device() can be used 96 * to flush the pages associated with it. 97 * 98 * TODO: Replace this by drm_clflush_sg() once it can be implemented 99 * without relying on symbols that are not exported. 100 */ 101 for_each_sgtable_sg(rk_obj->sgt, s, i) 102 sg_dma_address(s) = sg_phys(s); 103 104 dma_sync_sgtable_for_device(drm->dev, rk_obj->sgt, DMA_TO_DEVICE); 105 106 return 0; 107 108 err_put_pages: 109 drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false); 110 return ret; 111 } 112 113 static void rockchip_gem_put_pages(struct rockchip_gem_object *rk_obj) 114 { 115 sg_free_table(rk_obj->sgt); 116 kfree(rk_obj->sgt); 117 drm_gem_put_pages(&rk_obj->base, rk_obj->pages, true, true); 118 } 119 120 static int rockchip_gem_alloc_iommu(struct rockchip_gem_object *rk_obj, 121 bool alloc_kmap) 122 { 123 int ret; 124 125 ret = rockchip_gem_get_pages(rk_obj); 126 if (ret < 0) 127 return ret; 128 129 ret = rockchip_gem_iommu_map(rk_obj); 130 if (ret < 0) 131 goto err_free; 132 133 if (alloc_kmap) { 134 rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP, 135 pgprot_writecombine(PAGE_KERNEL)); 136 if (!rk_obj->kvaddr) { 137 DRM_ERROR("failed to vmap() buffer\n"); 138 ret = -ENOMEM; 139 goto err_unmap; 140 } 141 } 142 143 return 0; 144 145 err_unmap: 146 rockchip_gem_iommu_unmap(rk_obj); 147 err_free: 148 rockchip_gem_put_pages(rk_obj); 149 150 return ret; 151 } 152 153 static int rockchip_gem_alloc_dma(struct rockchip_gem_object *rk_obj, 154 bool alloc_kmap) 155 { 156 struct drm_gem_object *obj = &rk_obj->base; 157 struct drm_device *drm = obj->dev; 158 159 rk_obj->dma_attrs = DMA_ATTR_WRITE_COMBINE; 160 161 if (!alloc_kmap) 162 rk_obj->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING; 163 164 rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size, 165 &rk_obj->dma_addr, GFP_KERNEL, 166 rk_obj->dma_attrs); 167 if (!rk_obj->kvaddr) { 168 DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size); 169 return -ENOMEM; 170 } 171 172 return 0; 173 } 174 175 static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj, 176 bool alloc_kmap) 177 { 178 struct drm_gem_object *obj = &rk_obj->base; 179 struct drm_device *drm = obj->dev; 180 struct rockchip_drm_private *private = drm->dev_private; 181 182 if (private->domain) 183 return rockchip_gem_alloc_iommu(rk_obj, alloc_kmap); 184 else 185 return rockchip_gem_alloc_dma(rk_obj, alloc_kmap); 186 } 187 188 static void rockchip_gem_free_iommu(struct rockchip_gem_object *rk_obj) 189 { 190 vunmap(rk_obj->kvaddr); 191 rockchip_gem_iommu_unmap(rk_obj); 192 rockchip_gem_put_pages(rk_obj); 193 } 194 195 static void rockchip_gem_free_dma(struct rockchip_gem_object *rk_obj) 196 { 197 struct drm_gem_object *obj = &rk_obj->base; 198 struct drm_device *drm = obj->dev; 199 200 dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr, 201 rk_obj->dma_attrs); 202 } 203 204 static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj) 205 { 206 if (rk_obj->pages) 207 rockchip_gem_free_iommu(rk_obj); 208 else 209 rockchip_gem_free_dma(rk_obj); 210 } 211 212 static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj, 213 struct vm_area_struct *vma) 214 { 215 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); 216 unsigned int count = obj->size >> PAGE_SHIFT; 217 unsigned long user_count = vma_pages(vma); 218 219 if (user_count == 0) 220 return -ENXIO; 221 222 return vm_map_pages(vma, rk_obj->pages, count); 223 } 224 225 static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj, 226 struct vm_area_struct *vma) 227 { 228 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); 229 struct drm_device *drm = obj->dev; 230 231 return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr, 232 obj->size, rk_obj->dma_attrs); 233 } 234 235 static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj, 236 struct vm_area_struct *vma) 237 { 238 int ret; 239 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); 240 241 /* 242 * We allocated a struct page table for rk_obj, so clear 243 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap(). 244 */ 245 vma->vm_flags &= ~VM_PFNMAP; 246 247 if (rk_obj->pages) 248 ret = rockchip_drm_gem_object_mmap_iommu(obj, vma); 249 else 250 ret = rockchip_drm_gem_object_mmap_dma(obj, vma); 251 252 if (ret) 253 drm_gem_vm_close(vma); 254 255 return ret; 256 } 257 258 int rockchip_gem_mmap_buf(struct drm_gem_object *obj, 259 struct vm_area_struct *vma) 260 { 261 int ret; 262 263 ret = drm_gem_mmap_obj(obj, obj->size, vma); 264 if (ret) 265 return ret; 266 267 return rockchip_drm_gem_object_mmap(obj, vma); 268 } 269 270 /* drm driver mmap file operations */ 271 int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma) 272 { 273 struct drm_gem_object *obj; 274 int ret; 275 276 ret = drm_gem_mmap(filp, vma); 277 if (ret) 278 return ret; 279 280 /* 281 * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the 282 * whole buffer from the start. 283 */ 284 vma->vm_pgoff = 0; 285 286 obj = vma->vm_private_data; 287 288 return rockchip_drm_gem_object_mmap(obj, vma); 289 } 290 291 static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj) 292 { 293 drm_gem_object_release(&rk_obj->base); 294 kfree(rk_obj); 295 } 296 297 static struct rockchip_gem_object * 298 rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size) 299 { 300 struct rockchip_gem_object *rk_obj; 301 struct drm_gem_object *obj; 302 303 size = round_up(size, PAGE_SIZE); 304 305 rk_obj = kzalloc(sizeof(*rk_obj), GFP_KERNEL); 306 if (!rk_obj) 307 return ERR_PTR(-ENOMEM); 308 309 obj = &rk_obj->base; 310 311 drm_gem_object_init(drm, obj, size); 312 313 return rk_obj; 314 } 315 316 struct rockchip_gem_object * 317 rockchip_gem_create_object(struct drm_device *drm, unsigned int size, 318 bool alloc_kmap) 319 { 320 struct rockchip_gem_object *rk_obj; 321 int ret; 322 323 rk_obj = rockchip_gem_alloc_object(drm, size); 324 if (IS_ERR(rk_obj)) 325 return rk_obj; 326 327 ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap); 328 if (ret) 329 goto err_free_rk_obj; 330 331 return rk_obj; 332 333 err_free_rk_obj: 334 rockchip_gem_release_object(rk_obj); 335 return ERR_PTR(ret); 336 } 337 338 /* 339 * rockchip_gem_free_object - (struct drm_driver)->gem_free_object_unlocked 340 * callback function 341 */ 342 void rockchip_gem_free_object(struct drm_gem_object *obj) 343 { 344 struct drm_device *drm = obj->dev; 345 struct rockchip_drm_private *private = drm->dev_private; 346 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); 347 348 if (obj->import_attach) { 349 if (private->domain) { 350 rockchip_gem_iommu_unmap(rk_obj); 351 } else { 352 dma_unmap_sgtable(drm->dev, rk_obj->sgt, 353 DMA_BIDIRECTIONAL, 0); 354 } 355 drm_prime_gem_destroy(obj, rk_obj->sgt); 356 } else { 357 rockchip_gem_free_buf(rk_obj); 358 } 359 360 rockchip_gem_release_object(rk_obj); 361 } 362 363 /* 364 * rockchip_gem_create_with_handle - allocate an object with the given 365 * size and create a gem handle on it 366 * 367 * returns a struct rockchip_gem_object* on success or ERR_PTR values 368 * on failure. 369 */ 370 static struct rockchip_gem_object * 371 rockchip_gem_create_with_handle(struct drm_file *file_priv, 372 struct drm_device *drm, unsigned int size, 373 unsigned int *handle) 374 { 375 struct rockchip_gem_object *rk_obj; 376 struct drm_gem_object *obj; 377 int ret; 378 379 rk_obj = rockchip_gem_create_object(drm, size, false); 380 if (IS_ERR(rk_obj)) 381 return ERR_CAST(rk_obj); 382 383 obj = &rk_obj->base; 384 385 /* 386 * allocate a id of idr table where the obj is registered 387 * and handle has the id what user can see. 388 */ 389 ret = drm_gem_handle_create(file_priv, obj, handle); 390 if (ret) 391 goto err_handle_create; 392 393 /* drop reference from allocate - handle holds it now. */ 394 drm_gem_object_put(obj); 395 396 return rk_obj; 397 398 err_handle_create: 399 rockchip_gem_free_object(obj); 400 401 return ERR_PTR(ret); 402 } 403 404 /* 405 * rockchip_gem_dumb_create - (struct drm_driver)->dumb_create callback 406 * function 407 * 408 * This aligns the pitch and size arguments to the minimum required. wrap 409 * this into your own function if you need bigger alignment. 410 */ 411 int rockchip_gem_dumb_create(struct drm_file *file_priv, 412 struct drm_device *dev, 413 struct drm_mode_create_dumb *args) 414 { 415 struct rockchip_gem_object *rk_obj; 416 int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); 417 418 /* 419 * align to 64 bytes since Mali requires it. 420 */ 421 args->pitch = ALIGN(min_pitch, 64); 422 args->size = args->pitch * args->height; 423 424 rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size, 425 &args->handle); 426 427 return PTR_ERR_OR_ZERO(rk_obj); 428 } 429 430 /* 431 * Allocate a sg_table for this GEM object. 432 * Note: Both the table's contents, and the sg_table itself must be freed by 433 * the caller. 434 * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error. 435 */ 436 struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj) 437 { 438 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); 439 struct drm_device *drm = obj->dev; 440 struct sg_table *sgt; 441 int ret; 442 443 if (rk_obj->pages) 444 return drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages); 445 446 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 447 if (!sgt) 448 return ERR_PTR(-ENOMEM); 449 450 ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr, 451 rk_obj->dma_addr, obj->size, 452 rk_obj->dma_attrs); 453 if (ret) { 454 DRM_ERROR("failed to allocate sgt, %d\n", ret); 455 kfree(sgt); 456 return ERR_PTR(ret); 457 } 458 459 return sgt; 460 } 461 462 static int 463 rockchip_gem_iommu_map_sg(struct drm_device *drm, 464 struct dma_buf_attachment *attach, 465 struct sg_table *sg, 466 struct rockchip_gem_object *rk_obj) 467 { 468 rk_obj->sgt = sg; 469 return rockchip_gem_iommu_map(rk_obj); 470 } 471 472 static int 473 rockchip_gem_dma_map_sg(struct drm_device *drm, 474 struct dma_buf_attachment *attach, 475 struct sg_table *sg, 476 struct rockchip_gem_object *rk_obj) 477 { 478 int err = dma_map_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0); 479 if (err) 480 return err; 481 482 if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) { 483 DRM_ERROR("failed to map sg_table to contiguous linear address.\n"); 484 dma_unmap_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0); 485 return -EINVAL; 486 } 487 488 rk_obj->dma_addr = sg_dma_address(sg->sgl); 489 rk_obj->sgt = sg; 490 return 0; 491 } 492 493 struct drm_gem_object * 494 rockchip_gem_prime_import_sg_table(struct drm_device *drm, 495 struct dma_buf_attachment *attach, 496 struct sg_table *sg) 497 { 498 struct rockchip_drm_private *private = drm->dev_private; 499 struct rockchip_gem_object *rk_obj; 500 int ret; 501 502 rk_obj = rockchip_gem_alloc_object(drm, attach->dmabuf->size); 503 if (IS_ERR(rk_obj)) 504 return ERR_CAST(rk_obj); 505 506 if (private->domain) 507 ret = rockchip_gem_iommu_map_sg(drm, attach, sg, rk_obj); 508 else 509 ret = rockchip_gem_dma_map_sg(drm, attach, sg, rk_obj); 510 511 if (ret < 0) { 512 DRM_ERROR("failed to import sg table: %d\n", ret); 513 goto err_free_rk_obj; 514 } 515 516 return &rk_obj->base; 517 518 err_free_rk_obj: 519 rockchip_gem_release_object(rk_obj); 520 return ERR_PTR(ret); 521 } 522 523 void *rockchip_gem_prime_vmap(struct drm_gem_object *obj) 524 { 525 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); 526 527 if (rk_obj->pages) 528 return vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP, 529 pgprot_writecombine(PAGE_KERNEL)); 530 531 if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING) 532 return NULL; 533 534 return rk_obj->kvaddr; 535 } 536 537 void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) 538 { 539 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); 540 541 if (rk_obj->pages) { 542 vunmap(vaddr); 543 return; 544 } 545 546 /* Nothing to do if allocated by DMA mapping API. */ 547 } 548