1 /* 2 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd 3 * Author:Mark Yao <mark.yao@rock-chips.com> 4 * 5 * This software is licensed under the terms of the GNU General Public 6 * License version 2, as published by the Free Software Foundation, and 7 * may be copied, distributed, and modified under those terms. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 */ 14 15 #include <drm/drm.h> 16 #include <drm/drmP.h> 17 #include <drm/drm_gem.h> 18 #include <drm/drm_vma_manager.h> 19 #include <linux/iommu.h> 20 21 #include "rockchip_drm_drv.h" 22 #include "rockchip_drm_gem.h" 23 24 static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj) 25 { 26 struct drm_device *drm = rk_obj->base.dev; 27 struct rockchip_drm_private *private = drm->dev_private; 28 int prot = IOMMU_READ | IOMMU_WRITE; 29 ssize_t ret; 30 31 mutex_lock(&private->mm_lock); 32 33 ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm, 34 rk_obj->base.size, PAGE_SIZE, 35 0, 0); 36 37 mutex_unlock(&private->mm_lock); 38 if (ret < 0) { 39 DRM_ERROR("out of I/O virtual memory: %zd\n", ret); 40 return ret; 41 } 42 43 rk_obj->dma_addr = rk_obj->mm.start; 44 45 ret = iommu_map_sg(private->domain, rk_obj->dma_addr, rk_obj->sgt->sgl, 46 rk_obj->sgt->nents, prot); 47 if (ret < rk_obj->base.size) { 48 DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n", 49 ret, rk_obj->base.size); 50 ret = -ENOMEM; 51 goto err_remove_node; 52 } 53 54 rk_obj->size = ret; 55 56 return 0; 57 58 err_remove_node: 59 drm_mm_remove_node(&rk_obj->mm); 60 61 return ret; 62 } 63 64 static int rockchip_gem_iommu_unmap(struct rockchip_gem_object *rk_obj) 65 { 66 struct drm_device *drm = rk_obj->base.dev; 67 struct rockchip_drm_private *private = drm->dev_private; 68 69 iommu_unmap(private->domain, rk_obj->dma_addr, rk_obj->size); 70 71 mutex_lock(&private->mm_lock); 72 73 drm_mm_remove_node(&rk_obj->mm); 74 75 mutex_unlock(&private->mm_lock); 76 77 return 0; 78 } 79 80 static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj) 81 { 82 struct drm_device *drm = rk_obj->base.dev; 83 int ret, i; 84 struct scatterlist *s; 85 86 rk_obj->pages = drm_gem_get_pages(&rk_obj->base); 87 if (IS_ERR(rk_obj->pages)) 88 return PTR_ERR(rk_obj->pages); 89 90 rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT; 91 92 rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages); 93 if (IS_ERR(rk_obj->sgt)) { 94 ret = PTR_ERR(rk_obj->sgt); 95 goto err_put_pages; 96 } 97 98 /* 99 * Fake up the SG table so that dma_sync_sg_for_device() can be used 100 * to flush the pages associated with it. 101 * 102 * TODO: Replace this by drm_clflush_sg() once it can be implemented 103 * without relying on symbols that are not exported. 104 */ 105 for_each_sg(rk_obj->sgt->sgl, s, rk_obj->sgt->nents, i) 106 sg_dma_address(s) = sg_phys(s); 107 108 dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl, rk_obj->sgt->nents, 109 DMA_TO_DEVICE); 110 111 return 0; 112 113 err_put_pages: 114 drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false); 115 return ret; 116 } 117 118 static void rockchip_gem_put_pages(struct rockchip_gem_object *rk_obj) 119 { 120 sg_free_table(rk_obj->sgt); 121 kfree(rk_obj->sgt); 122 drm_gem_put_pages(&rk_obj->base, rk_obj->pages, true, true); 123 } 124 125 static int rockchip_gem_alloc_iommu(struct rockchip_gem_object *rk_obj, 126 bool alloc_kmap) 127 { 128 int ret; 129 130 ret = rockchip_gem_get_pages(rk_obj); 131 if (ret < 0) 132 return ret; 133 134 ret = rockchip_gem_iommu_map(rk_obj); 135 if (ret < 0) 136 goto err_free; 137 138 if (alloc_kmap) { 139 rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP, 140 pgprot_writecombine(PAGE_KERNEL)); 141 if (!rk_obj->kvaddr) { 142 DRM_ERROR("failed to vmap() buffer\n"); 143 ret = -ENOMEM; 144 goto err_unmap; 145 } 146 } 147 148 return 0; 149 150 err_unmap: 151 rockchip_gem_iommu_unmap(rk_obj); 152 err_free: 153 rockchip_gem_put_pages(rk_obj); 154 155 return ret; 156 } 157 158 static int rockchip_gem_alloc_dma(struct rockchip_gem_object *rk_obj, 159 bool alloc_kmap) 160 { 161 struct drm_gem_object *obj = &rk_obj->base; 162 struct drm_device *drm = obj->dev; 163 164 rk_obj->dma_attrs = DMA_ATTR_WRITE_COMBINE; 165 166 if (!alloc_kmap) 167 rk_obj->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING; 168 169 rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size, 170 &rk_obj->dma_addr, GFP_KERNEL, 171 rk_obj->dma_attrs); 172 if (!rk_obj->kvaddr) { 173 DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size); 174 return -ENOMEM; 175 } 176 177 return 0; 178 } 179 180 static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj, 181 bool alloc_kmap) 182 { 183 struct drm_gem_object *obj = &rk_obj->base; 184 struct drm_device *drm = obj->dev; 185 struct rockchip_drm_private *private = drm->dev_private; 186 187 if (private->domain) 188 return rockchip_gem_alloc_iommu(rk_obj, alloc_kmap); 189 else 190 return rockchip_gem_alloc_dma(rk_obj, alloc_kmap); 191 } 192 193 static void rockchip_gem_free_iommu(struct rockchip_gem_object *rk_obj) 194 { 195 vunmap(rk_obj->kvaddr); 196 rockchip_gem_iommu_unmap(rk_obj); 197 rockchip_gem_put_pages(rk_obj); 198 } 199 200 static void rockchip_gem_free_dma(struct rockchip_gem_object *rk_obj) 201 { 202 struct drm_gem_object *obj = &rk_obj->base; 203 struct drm_device *drm = obj->dev; 204 205 dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr, 206 rk_obj->dma_attrs); 207 } 208 209 static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj) 210 { 211 if (rk_obj->pages) 212 rockchip_gem_free_iommu(rk_obj); 213 else 214 rockchip_gem_free_dma(rk_obj); 215 } 216 217 static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj, 218 struct vm_area_struct *vma) 219 { 220 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); 221 unsigned int i, count = obj->size >> PAGE_SHIFT; 222 unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 223 unsigned long uaddr = vma->vm_start; 224 unsigned long offset = vma->vm_pgoff; 225 unsigned long end = user_count + offset; 226 int ret; 227 228 if (user_count == 0) 229 return -ENXIO; 230 if (end > count) 231 return -ENXIO; 232 233 for (i = offset; i < end; i++) { 234 ret = vm_insert_page(vma, uaddr, rk_obj->pages[i]); 235 if (ret) 236 return ret; 237 uaddr += PAGE_SIZE; 238 } 239 240 return 0; 241 } 242 243 static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj, 244 struct vm_area_struct *vma) 245 { 246 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); 247 struct drm_device *drm = obj->dev; 248 249 return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr, 250 obj->size, rk_obj->dma_attrs); 251 } 252 253 static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj, 254 struct vm_area_struct *vma) 255 { 256 int ret; 257 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); 258 259 /* 260 * We allocated a struct page table for rk_obj, so clear 261 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap(). 262 */ 263 vma->vm_flags &= ~VM_PFNMAP; 264 vma->vm_pgoff = 0; 265 266 if (rk_obj->pages) 267 ret = rockchip_drm_gem_object_mmap_iommu(obj, vma); 268 else 269 ret = rockchip_drm_gem_object_mmap_dma(obj, vma); 270 271 if (ret) 272 drm_gem_vm_close(vma); 273 274 return ret; 275 } 276 277 int rockchip_gem_mmap_buf(struct drm_gem_object *obj, 278 struct vm_area_struct *vma) 279 { 280 int ret; 281 282 ret = drm_gem_mmap_obj(obj, obj->size, vma); 283 if (ret) 284 return ret; 285 286 return rockchip_drm_gem_object_mmap(obj, vma); 287 } 288 289 /* drm driver mmap file operations */ 290 int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma) 291 { 292 struct drm_gem_object *obj; 293 int ret; 294 295 ret = drm_gem_mmap(filp, vma); 296 if (ret) 297 return ret; 298 299 obj = vma->vm_private_data; 300 301 return rockchip_drm_gem_object_mmap(obj, vma); 302 } 303 304 static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj) 305 { 306 drm_gem_object_release(&rk_obj->base); 307 kfree(rk_obj); 308 } 309 310 struct rockchip_gem_object * 311 rockchip_gem_create_object(struct drm_device *drm, unsigned int size, 312 bool alloc_kmap) 313 { 314 struct rockchip_gem_object *rk_obj; 315 struct drm_gem_object *obj; 316 int ret; 317 318 size = round_up(size, PAGE_SIZE); 319 320 rk_obj = kzalloc(sizeof(*rk_obj), GFP_KERNEL); 321 if (!rk_obj) 322 return ERR_PTR(-ENOMEM); 323 324 obj = &rk_obj->base; 325 326 drm_gem_object_init(drm, obj, size); 327 328 ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap); 329 if (ret) 330 goto err_free_rk_obj; 331 332 return rk_obj; 333 334 err_free_rk_obj: 335 rockchip_gem_release_object(rk_obj); 336 return ERR_PTR(ret); 337 } 338 339 /* 340 * rockchip_gem_free_object - (struct drm_driver)->gem_free_object callback 341 * function 342 */ 343 void rockchip_gem_free_object(struct drm_gem_object *obj) 344 { 345 struct rockchip_gem_object *rk_obj; 346 347 rk_obj = to_rockchip_obj(obj); 348 349 rockchip_gem_free_buf(rk_obj); 350 351 rockchip_gem_release_object(rk_obj); 352 } 353 354 /* 355 * rockchip_gem_create_with_handle - allocate an object with the given 356 * size and create a gem handle on it 357 * 358 * returns a struct rockchip_gem_object* on success or ERR_PTR values 359 * on failure. 360 */ 361 static struct rockchip_gem_object * 362 rockchip_gem_create_with_handle(struct drm_file *file_priv, 363 struct drm_device *drm, unsigned int size, 364 unsigned int *handle) 365 { 366 struct rockchip_gem_object *rk_obj; 367 struct drm_gem_object *obj; 368 int ret; 369 370 rk_obj = rockchip_gem_create_object(drm, size, false); 371 if (IS_ERR(rk_obj)) 372 return ERR_CAST(rk_obj); 373 374 obj = &rk_obj->base; 375 376 /* 377 * allocate a id of idr table where the obj is registered 378 * and handle has the id what user can see. 379 */ 380 ret = drm_gem_handle_create(file_priv, obj, handle); 381 if (ret) 382 goto err_handle_create; 383 384 /* drop reference from allocate - handle holds it now. */ 385 drm_gem_object_unreference_unlocked(obj); 386 387 return rk_obj; 388 389 err_handle_create: 390 rockchip_gem_free_object(obj); 391 392 return ERR_PTR(ret); 393 } 394 395 int rockchip_gem_dumb_map_offset(struct drm_file *file_priv, 396 struct drm_device *dev, uint32_t handle, 397 uint64_t *offset) 398 { 399 struct drm_gem_object *obj; 400 int ret; 401 402 obj = drm_gem_object_lookup(file_priv, handle); 403 if (!obj) { 404 DRM_ERROR("failed to lookup gem object.\n"); 405 return -EINVAL; 406 } 407 408 ret = drm_gem_create_mmap_offset(obj); 409 if (ret) 410 goto out; 411 412 *offset = drm_vma_node_offset_addr(&obj->vma_node); 413 DRM_DEBUG_KMS("offset = 0x%llx\n", *offset); 414 415 out: 416 drm_gem_object_unreference_unlocked(obj); 417 418 return 0; 419 } 420 421 /* 422 * rockchip_gem_dumb_create - (struct drm_driver)->dumb_create callback 423 * function 424 * 425 * This aligns the pitch and size arguments to the minimum required. wrap 426 * this into your own function if you need bigger alignment. 427 */ 428 int rockchip_gem_dumb_create(struct drm_file *file_priv, 429 struct drm_device *dev, 430 struct drm_mode_create_dumb *args) 431 { 432 struct rockchip_gem_object *rk_obj; 433 int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); 434 435 /* 436 * align to 64 bytes since Mali requires it. 437 */ 438 args->pitch = ALIGN(min_pitch, 64); 439 args->size = args->pitch * args->height; 440 441 rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size, 442 &args->handle); 443 444 return PTR_ERR_OR_ZERO(rk_obj); 445 } 446 447 /* 448 * Allocate a sg_table for this GEM object. 449 * Note: Both the table's contents, and the sg_table itself must be freed by 450 * the caller. 451 * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error. 452 */ 453 struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj) 454 { 455 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); 456 struct drm_device *drm = obj->dev; 457 struct sg_table *sgt; 458 int ret; 459 460 if (rk_obj->pages) 461 return drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages); 462 463 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 464 if (!sgt) 465 return ERR_PTR(-ENOMEM); 466 467 ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr, 468 rk_obj->dma_addr, obj->size, 469 rk_obj->dma_attrs); 470 if (ret) { 471 DRM_ERROR("failed to allocate sgt, %d\n", ret); 472 kfree(sgt); 473 return ERR_PTR(ret); 474 } 475 476 return sgt; 477 } 478 479 void *rockchip_gem_prime_vmap(struct drm_gem_object *obj) 480 { 481 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); 482 483 if (rk_obj->pages) 484 return vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP, 485 pgprot_writecombine(PAGE_KERNEL)); 486 487 if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING) 488 return NULL; 489 490 return rk_obj->kvaddr; 491 } 492 493 void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) 494 { 495 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); 496 497 if (rk_obj->pages) { 498 vunmap(vaddr); 499 return; 500 } 501 502 /* Nothing to do if allocated by DMA mapping API. */ 503 } 504