1 /* 2 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd 3 * Author:Mark Yao <mark.yao@rock-chips.com> 4 * 5 * This software is licensed under the terms of the GNU General Public 6 * License version 2, as published by the Free Software Foundation, and 7 * may be copied, distributed, and modified under those terms. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 */ 14 15 #include <drm/drm.h> 16 #include <drm/drmP.h> 17 #include <drm/drm_gem.h> 18 #include <drm/drm_vma_manager.h> 19 #include <linux/iommu.h> 20 21 #include "rockchip_drm_drv.h" 22 #include "rockchip_drm_gem.h" 23 24 static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj) 25 { 26 struct drm_device *drm = rk_obj->base.dev; 27 struct rockchip_drm_private *private = drm->dev_private; 28 int prot = IOMMU_READ | IOMMU_WRITE; 29 ssize_t ret; 30 31 mutex_lock(&private->mm_lock); 32 ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm, 33 rk_obj->base.size, PAGE_SIZE, 34 0, 0); 35 mutex_unlock(&private->mm_lock); 36 37 if (ret < 0) { 38 DRM_ERROR("out of I/O virtual memory: %zd\n", ret); 39 return ret; 40 } 41 42 rk_obj->dma_addr = rk_obj->mm.start; 43 44 ret = iommu_map_sg(private->domain, rk_obj->dma_addr, rk_obj->sgt->sgl, 45 rk_obj->sgt->nents, prot); 46 if (ret < rk_obj->base.size) { 47 DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n", 48 ret, rk_obj->base.size); 49 ret = -ENOMEM; 50 goto err_remove_node; 51 } 52 53 rk_obj->size = ret; 54 55 return 0; 56 57 err_remove_node: 58 mutex_lock(&private->mm_lock); 59 drm_mm_remove_node(&rk_obj->mm); 60 mutex_unlock(&private->mm_lock); 61 62 return ret; 63 } 64 65 static int rockchip_gem_iommu_unmap(struct rockchip_gem_object *rk_obj) 66 { 67 struct drm_device *drm = rk_obj->base.dev; 68 struct rockchip_drm_private *private = drm->dev_private; 69 70 iommu_unmap(private->domain, rk_obj->dma_addr, rk_obj->size); 71 72 mutex_lock(&private->mm_lock); 73 74 drm_mm_remove_node(&rk_obj->mm); 75 76 mutex_unlock(&private->mm_lock); 77 78 return 0; 79 } 80 81 static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj) 82 { 83 struct drm_device *drm = rk_obj->base.dev; 84 int ret, i; 85 struct scatterlist *s; 86 87 rk_obj->pages = drm_gem_get_pages(&rk_obj->base); 88 if (IS_ERR(rk_obj->pages)) 89 return PTR_ERR(rk_obj->pages); 90 91 rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT; 92 93 rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages); 94 if (IS_ERR(rk_obj->sgt)) { 95 ret = PTR_ERR(rk_obj->sgt); 96 goto err_put_pages; 97 } 98 99 /* 100 * Fake up the SG table so that dma_sync_sg_for_device() can be used 101 * to flush the pages associated with it. 102 * 103 * TODO: Replace this by drm_clflush_sg() once it can be implemented 104 * without relying on symbols that are not exported. 105 */ 106 for_each_sg(rk_obj->sgt->sgl, s, rk_obj->sgt->nents, i) 107 sg_dma_address(s) = sg_phys(s); 108 109 dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl, rk_obj->sgt->nents, 110 DMA_TO_DEVICE); 111 112 return 0; 113 114 err_put_pages: 115 drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false); 116 return ret; 117 } 118 119 static void rockchip_gem_put_pages(struct rockchip_gem_object *rk_obj) 120 { 121 sg_free_table(rk_obj->sgt); 122 kfree(rk_obj->sgt); 123 drm_gem_put_pages(&rk_obj->base, rk_obj->pages, true, true); 124 } 125 126 static int rockchip_gem_alloc_iommu(struct rockchip_gem_object *rk_obj, 127 bool alloc_kmap) 128 { 129 int ret; 130 131 ret = rockchip_gem_get_pages(rk_obj); 132 if (ret < 0) 133 return ret; 134 135 ret = rockchip_gem_iommu_map(rk_obj); 136 if (ret < 0) 137 goto err_free; 138 139 if (alloc_kmap) { 140 rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP, 141 pgprot_writecombine(PAGE_KERNEL)); 142 if (!rk_obj->kvaddr) { 143 DRM_ERROR("failed to vmap() buffer\n"); 144 ret = -ENOMEM; 145 goto err_unmap; 146 } 147 } 148 149 return 0; 150 151 err_unmap: 152 rockchip_gem_iommu_unmap(rk_obj); 153 err_free: 154 rockchip_gem_put_pages(rk_obj); 155 156 return ret; 157 } 158 159 static int rockchip_gem_alloc_dma(struct rockchip_gem_object *rk_obj, 160 bool alloc_kmap) 161 { 162 struct drm_gem_object *obj = &rk_obj->base; 163 struct drm_device *drm = obj->dev; 164 165 rk_obj->dma_attrs = DMA_ATTR_WRITE_COMBINE; 166 167 if (!alloc_kmap) 168 rk_obj->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING; 169 170 rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size, 171 &rk_obj->dma_addr, GFP_KERNEL, 172 rk_obj->dma_attrs); 173 if (!rk_obj->kvaddr) { 174 DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size); 175 return -ENOMEM; 176 } 177 178 return 0; 179 } 180 181 static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj, 182 bool alloc_kmap) 183 { 184 struct drm_gem_object *obj = &rk_obj->base; 185 struct drm_device *drm = obj->dev; 186 struct rockchip_drm_private *private = drm->dev_private; 187 188 if (private->domain) 189 return rockchip_gem_alloc_iommu(rk_obj, alloc_kmap); 190 else 191 return rockchip_gem_alloc_dma(rk_obj, alloc_kmap); 192 } 193 194 static void rockchip_gem_free_iommu(struct rockchip_gem_object *rk_obj) 195 { 196 vunmap(rk_obj->kvaddr); 197 rockchip_gem_iommu_unmap(rk_obj); 198 rockchip_gem_put_pages(rk_obj); 199 } 200 201 static void rockchip_gem_free_dma(struct rockchip_gem_object *rk_obj) 202 { 203 struct drm_gem_object *obj = &rk_obj->base; 204 struct drm_device *drm = obj->dev; 205 206 dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr, 207 rk_obj->dma_attrs); 208 } 209 210 static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj) 211 { 212 if (rk_obj->pages) 213 rockchip_gem_free_iommu(rk_obj); 214 else 215 rockchip_gem_free_dma(rk_obj); 216 } 217 218 static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj, 219 struct vm_area_struct *vma) 220 { 221 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); 222 unsigned int i, count = obj->size >> PAGE_SHIFT; 223 unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 224 unsigned long uaddr = vma->vm_start; 225 unsigned long offset = vma->vm_pgoff; 226 unsigned long end = user_count + offset; 227 int ret; 228 229 if (user_count == 0) 230 return -ENXIO; 231 if (end > count) 232 return -ENXIO; 233 234 for (i = offset; i < end; i++) { 235 ret = vm_insert_page(vma, uaddr, rk_obj->pages[i]); 236 if (ret) 237 return ret; 238 uaddr += PAGE_SIZE; 239 } 240 241 return 0; 242 } 243 244 static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj, 245 struct vm_area_struct *vma) 246 { 247 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); 248 struct drm_device *drm = obj->dev; 249 250 return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr, 251 obj->size, rk_obj->dma_attrs); 252 } 253 254 static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj, 255 struct vm_area_struct *vma) 256 { 257 int ret; 258 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); 259 260 /* 261 * We allocated a struct page table for rk_obj, so clear 262 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap(). 263 */ 264 vma->vm_flags &= ~VM_PFNMAP; 265 vma->vm_pgoff = 0; 266 267 if (rk_obj->pages) 268 ret = rockchip_drm_gem_object_mmap_iommu(obj, vma); 269 else 270 ret = rockchip_drm_gem_object_mmap_dma(obj, vma); 271 272 if (ret) 273 drm_gem_vm_close(vma); 274 275 return ret; 276 } 277 278 int rockchip_gem_mmap_buf(struct drm_gem_object *obj, 279 struct vm_area_struct *vma) 280 { 281 int ret; 282 283 ret = drm_gem_mmap_obj(obj, obj->size, vma); 284 if (ret) 285 return ret; 286 287 return rockchip_drm_gem_object_mmap(obj, vma); 288 } 289 290 /* drm driver mmap file operations */ 291 int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma) 292 { 293 struct drm_gem_object *obj; 294 int ret; 295 296 ret = drm_gem_mmap(filp, vma); 297 if (ret) 298 return ret; 299 300 obj = vma->vm_private_data; 301 302 return rockchip_drm_gem_object_mmap(obj, vma); 303 } 304 305 static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj) 306 { 307 drm_gem_object_release(&rk_obj->base); 308 kfree(rk_obj); 309 } 310 311 struct rockchip_gem_object * 312 rockchip_gem_create_object(struct drm_device *drm, unsigned int size, 313 bool alloc_kmap) 314 { 315 struct rockchip_gem_object *rk_obj; 316 struct drm_gem_object *obj; 317 int ret; 318 319 size = round_up(size, PAGE_SIZE); 320 321 rk_obj = kzalloc(sizeof(*rk_obj), GFP_KERNEL); 322 if (!rk_obj) 323 return ERR_PTR(-ENOMEM); 324 325 obj = &rk_obj->base; 326 327 drm_gem_object_init(drm, obj, size); 328 329 ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap); 330 if (ret) 331 goto err_free_rk_obj; 332 333 return rk_obj; 334 335 err_free_rk_obj: 336 rockchip_gem_release_object(rk_obj); 337 return ERR_PTR(ret); 338 } 339 340 /* 341 * rockchip_gem_free_object - (struct drm_driver)->gem_free_object callback 342 * function 343 */ 344 void rockchip_gem_free_object(struct drm_gem_object *obj) 345 { 346 struct rockchip_gem_object *rk_obj; 347 348 rk_obj = to_rockchip_obj(obj); 349 350 rockchip_gem_free_buf(rk_obj); 351 352 rockchip_gem_release_object(rk_obj); 353 } 354 355 /* 356 * rockchip_gem_create_with_handle - allocate an object with the given 357 * size and create a gem handle on it 358 * 359 * returns a struct rockchip_gem_object* on success or ERR_PTR values 360 * on failure. 361 */ 362 static struct rockchip_gem_object * 363 rockchip_gem_create_with_handle(struct drm_file *file_priv, 364 struct drm_device *drm, unsigned int size, 365 unsigned int *handle) 366 { 367 struct rockchip_gem_object *rk_obj; 368 struct drm_gem_object *obj; 369 int ret; 370 371 rk_obj = rockchip_gem_create_object(drm, size, false); 372 if (IS_ERR(rk_obj)) 373 return ERR_CAST(rk_obj); 374 375 obj = &rk_obj->base; 376 377 /* 378 * allocate a id of idr table where the obj is registered 379 * and handle has the id what user can see. 380 */ 381 ret = drm_gem_handle_create(file_priv, obj, handle); 382 if (ret) 383 goto err_handle_create; 384 385 /* drop reference from allocate - handle holds it now. */ 386 drm_gem_object_put_unlocked(obj); 387 388 return rk_obj; 389 390 err_handle_create: 391 rockchip_gem_free_object(obj); 392 393 return ERR_PTR(ret); 394 } 395 396 /* 397 * rockchip_gem_dumb_create - (struct drm_driver)->dumb_create callback 398 * function 399 * 400 * This aligns the pitch and size arguments to the minimum required. wrap 401 * this into your own function if you need bigger alignment. 402 */ 403 int rockchip_gem_dumb_create(struct drm_file *file_priv, 404 struct drm_device *dev, 405 struct drm_mode_create_dumb *args) 406 { 407 struct rockchip_gem_object *rk_obj; 408 int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); 409 410 /* 411 * align to 64 bytes since Mali requires it. 412 */ 413 args->pitch = ALIGN(min_pitch, 64); 414 args->size = args->pitch * args->height; 415 416 rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size, 417 &args->handle); 418 419 return PTR_ERR_OR_ZERO(rk_obj); 420 } 421 422 /* 423 * Allocate a sg_table for this GEM object. 424 * Note: Both the table's contents, and the sg_table itself must be freed by 425 * the caller. 426 * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error. 427 */ 428 struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj) 429 { 430 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); 431 struct drm_device *drm = obj->dev; 432 struct sg_table *sgt; 433 int ret; 434 435 if (rk_obj->pages) 436 return drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages); 437 438 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 439 if (!sgt) 440 return ERR_PTR(-ENOMEM); 441 442 ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr, 443 rk_obj->dma_addr, obj->size, 444 rk_obj->dma_attrs); 445 if (ret) { 446 DRM_ERROR("failed to allocate sgt, %d\n", ret); 447 kfree(sgt); 448 return ERR_PTR(ret); 449 } 450 451 return sgt; 452 } 453 454 void *rockchip_gem_prime_vmap(struct drm_gem_object *obj) 455 { 456 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); 457 458 if (rk_obj->pages) 459 return vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP, 460 pgprot_writecombine(PAGE_KERNEL)); 461 462 if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING) 463 return NULL; 464 465 return rk_obj->kvaddr; 466 } 467 468 void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) 469 { 470 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); 471 472 if (rk_obj->pages) { 473 vunmap(vaddr); 474 return; 475 } 476 477 /* Nothing to do if allocated by DMA mapping API. */ 478 } 479