1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
4 * Author: Rob Clark <rob.clark@linaro.org>
5 */
6
7 #include <linux/dma-mapping.h>
8 #include <linux/seq_file.h>
9 #include <linux/shmem_fs.h>
10 #include <linux/spinlock.h>
11 #include <linux/pfn_t.h>
12
13 #include <drm/drm_prime.h>
14 #include <drm/drm_vma_manager.h>
15
16 #include "omap_drv.h"
17 #include "omap_dmm_tiler.h"
18
19 /*
20 * GEM buffer object implementation.
21 */
22
23 /* note: we use upper 8 bits of flags for driver-internal flags: */
24 #define OMAP_BO_MEM_DMA_API 0x01000000 /* memory allocated with the dma_alloc_* API */
25 #define OMAP_BO_MEM_SHMEM 0x02000000 /* memory allocated through shmem backing */
26 #define OMAP_BO_MEM_DMABUF 0x08000000 /* memory imported from a dmabuf */
27
28 struct omap_gem_object {
29 struct drm_gem_object base;
30
31 struct list_head mm_list;
32
33 u32 flags;
34
35 /** width/height for tiled formats (rounded up to slot boundaries) */
36 u16 width, height;
37
38 /** roll applied when mapping to DMM */
39 u32 roll;
40
41 /** protects pin_cnt, block, pages, dma_addrs and vaddr */
42 struct mutex lock;
43
44 /**
45 * dma_addr contains the buffer DMA address. It is valid for
46 *
47 * - buffers allocated through the DMA mapping API (with the
48 * OMAP_BO_MEM_DMA_API flag set)
49 *
50 * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
51 * if they are physically contiguous (when sgt->orig_nents == 1)
52 *
53 * - buffers mapped through the TILER when pin_cnt is not zero, in which
54 * case the DMA address points to the TILER aperture
55 *
56 * Physically contiguous buffers have their DMA address equal to the
57 * physical address as we don't remap those buffers through the TILER.
58 *
59 * Buffers mapped to the TILER have their DMA address pointing to the
60 * TILER aperture. As TILER mappings are refcounted (through pin_cnt)
61 * the DMA address must be accessed through omap_gem_pin() to ensure
62 * that the mapping won't disappear unexpectedly. References must be
63 * released with omap_gem_unpin().
64 */
65 dma_addr_t dma_addr;
66
67 /**
68 * # of users
69 */
70 refcount_t pin_cnt;
71
72 /**
73 * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
74 * is set and the sgt field is valid.
75 */
76 struct sg_table *sgt;
77
78 /**
79 * tiler block used when buffer is remapped in DMM/TILER.
80 */
81 struct tiler_block *block;
82
83 /**
84 * Array of backing pages, if allocated. Note that pages are never
85 * allocated for buffers originally allocated from contiguous memory
86 */
87 struct page **pages;
88
89 /** addresses corresponding to pages in above array */
90 dma_addr_t *dma_addrs;
91
92 /**
93 * Virtual address, if mapped.
94 */
95 void *vaddr;
96 };
97
98 #define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
99
100 /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
101 * not necessarily pinned in TILER all the time, and (b) when they are
102 * they are not necessarily page aligned, we reserve one or more small
103 * regions in each of the 2d containers to use as a user-GART where we
104 * can create a second page-aligned mapping of parts of the buffer
105 * being accessed from userspace.
106 *
107 * Note that we could optimize slightly when we know that multiple
108 * tiler containers are backed by the same PAT.. but I'll leave that
109 * for later..
110 */
111 #define NUM_USERGART_ENTRIES 2
112 struct omap_drm_usergart_entry {
113 struct tiler_block *block; /* the reserved tiler block */
114 dma_addr_t dma_addr;
115 struct drm_gem_object *obj; /* the current pinned obj */
116 pgoff_t obj_pgoff; /* page offset of obj currently
117 mapped in */
118 };
119
120 struct omap_drm_usergart {
121 struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES];
122 int height; /* height in rows */
123 int height_shift; /* ilog2(height in rows) */
124 int slot_shift; /* ilog2(width per slot) */
125 int stride_pfn; /* stride in pages */
126 int last; /* index of last used entry */
127 };
128
129 /* -----------------------------------------------------------------------------
130 * Helpers
131 */
132
133 /** get mmap offset */
omap_gem_mmap_offset(struct drm_gem_object * obj)134 u64 omap_gem_mmap_offset(struct drm_gem_object *obj)
135 {
136 struct drm_device *dev = obj->dev;
137 int ret;
138 size_t size;
139
140 /* Make it mmapable */
141 size = omap_gem_mmap_size(obj);
142 ret = drm_gem_create_mmap_offset_size(obj, size);
143 if (ret) {
144 dev_err(dev->dev, "could not allocate mmap offset\n");
145 return 0;
146 }
147
148 return drm_vma_node_offset_addr(&obj->vma_node);
149 }
150
omap_gem_is_contiguous(struct omap_gem_object * omap_obj)151 static bool omap_gem_is_contiguous(struct omap_gem_object *omap_obj)
152 {
153 if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
154 return true;
155
156 if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1)
157 return true;
158
159 return false;
160 }
161
162 /* -----------------------------------------------------------------------------
163 * Eviction
164 */
165
omap_gem_evict_entry(struct drm_gem_object * obj,enum tiler_fmt fmt,struct omap_drm_usergart_entry * entry)166 static void omap_gem_evict_entry(struct drm_gem_object *obj,
167 enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
168 {
169 struct omap_gem_object *omap_obj = to_omap_bo(obj);
170 struct omap_drm_private *priv = obj->dev->dev_private;
171 int n = priv->usergart[fmt].height;
172 size_t size = PAGE_SIZE * n;
173 loff_t off = omap_gem_mmap_offset(obj) +
174 (entry->obj_pgoff << PAGE_SHIFT);
175 const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
176
177 if (m > 1) {
178 int i;
179 /* if stride > than PAGE_SIZE then sparse mapping: */
180 for (i = n; i > 0; i--) {
181 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
182 off, PAGE_SIZE, 1);
183 off += PAGE_SIZE * m;
184 }
185 } else {
186 unmap_mapping_range(obj->dev->anon_inode->i_mapping,
187 off, size, 1);
188 }
189
190 entry->obj = NULL;
191 }
192
193 /* Evict a buffer from usergart, if it is mapped there */
omap_gem_evict(struct drm_gem_object * obj)194 static void omap_gem_evict(struct drm_gem_object *obj)
195 {
196 struct omap_gem_object *omap_obj = to_omap_bo(obj);
197 struct omap_drm_private *priv = obj->dev->dev_private;
198
199 if (omap_obj->flags & OMAP_BO_TILED_MASK) {
200 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
201 int i;
202
203 for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
204 struct omap_drm_usergart_entry *entry =
205 &priv->usergart[fmt].entry[i];
206
207 if (entry->obj == obj)
208 omap_gem_evict_entry(obj, fmt, entry);
209 }
210 }
211 }
212
213 /* -----------------------------------------------------------------------------
214 * Page Management
215 */
216
217 /*
218 * Ensure backing pages are allocated. Must be called with the omap_obj.lock
219 * held.
220 */
omap_gem_attach_pages(struct drm_gem_object * obj)221 static int omap_gem_attach_pages(struct drm_gem_object *obj)
222 {
223 struct drm_device *dev = obj->dev;
224 struct omap_gem_object *omap_obj = to_omap_bo(obj);
225 struct page **pages;
226 int npages = obj->size >> PAGE_SHIFT;
227 int i, ret;
228 dma_addr_t *addrs;
229
230 lockdep_assert_held(&omap_obj->lock);
231
232 /*
233 * If not using shmem (in which case backing pages don't need to be
234 * allocated) or if pages are already allocated we're done.
235 */
236 if (!(omap_obj->flags & OMAP_BO_MEM_SHMEM) || omap_obj->pages)
237 return 0;
238
239 pages = drm_gem_get_pages(obj);
240 if (IS_ERR(pages)) {
241 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
242 return PTR_ERR(pages);
243 }
244
245 /* for non-cached buffers, ensure the new pages are clean because
246 * DSS, GPU, etc. are not cache coherent:
247 */
248 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
249 addrs = kmalloc_array(npages, sizeof(*addrs), GFP_KERNEL);
250 if (!addrs) {
251 ret = -ENOMEM;
252 goto free_pages;
253 }
254
255 for (i = 0; i < npages; i++) {
256 addrs[i] = dma_map_page(dev->dev, pages[i],
257 0, PAGE_SIZE, DMA_TO_DEVICE);
258
259 if (dma_mapping_error(dev->dev, addrs[i])) {
260 dev_warn(dev->dev,
261 "%s: failed to map page\n", __func__);
262
263 for (i = i - 1; i >= 0; --i) {
264 dma_unmap_page(dev->dev, addrs[i],
265 PAGE_SIZE, DMA_TO_DEVICE);
266 }
267
268 ret = -ENOMEM;
269 goto free_addrs;
270 }
271 }
272 } else {
273 addrs = kcalloc(npages, sizeof(*addrs), GFP_KERNEL);
274 if (!addrs) {
275 ret = -ENOMEM;
276 goto free_pages;
277 }
278 }
279
280 omap_obj->dma_addrs = addrs;
281 omap_obj->pages = pages;
282
283 return 0;
284
285 free_addrs:
286 kfree(addrs);
287 free_pages:
288 drm_gem_put_pages(obj, pages, true, false);
289
290 return ret;
291 }
292
293 /* Release backing pages. Must be called with the omap_obj.lock held. */
omap_gem_detach_pages(struct drm_gem_object * obj)294 static void omap_gem_detach_pages(struct drm_gem_object *obj)
295 {
296 struct omap_gem_object *omap_obj = to_omap_bo(obj);
297 unsigned int npages = obj->size >> PAGE_SHIFT;
298 unsigned int i;
299
300 lockdep_assert_held(&omap_obj->lock);
301
302 for (i = 0; i < npages; i++) {
303 if (omap_obj->dma_addrs[i])
304 dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i],
305 PAGE_SIZE, DMA_TO_DEVICE);
306 }
307
308 kfree(omap_obj->dma_addrs);
309 omap_obj->dma_addrs = NULL;
310
311 drm_gem_put_pages(obj, omap_obj->pages, true, false);
312 omap_obj->pages = NULL;
313 }
314
315 /* get buffer flags */
omap_gem_flags(struct drm_gem_object * obj)316 u32 omap_gem_flags(struct drm_gem_object *obj)
317 {
318 return to_omap_bo(obj)->flags;
319 }
320
321 /** get mmap size */
omap_gem_mmap_size(struct drm_gem_object * obj)322 size_t omap_gem_mmap_size(struct drm_gem_object *obj)
323 {
324 struct omap_gem_object *omap_obj = to_omap_bo(obj);
325 size_t size = obj->size;
326
327 if (omap_obj->flags & OMAP_BO_TILED_MASK) {
328 /* for tiled buffers, the virtual size has stride rounded up
329 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
330 * 32kb later!). But we don't back the entire buffer with
331 * pages, only the valid picture part.. so need to adjust for
332 * this in the size used to mmap and generate mmap offset
333 */
334 size = tiler_vsize(gem2fmt(omap_obj->flags),
335 omap_obj->width, omap_obj->height);
336 }
337
338 return size;
339 }
340
341 /* -----------------------------------------------------------------------------
342 * Fault Handling
343 */
344
345 /* Normal handling for the case of faulting in non-tiled buffers */
omap_gem_fault_1d(struct drm_gem_object * obj,struct vm_area_struct * vma,struct vm_fault * vmf)346 static vm_fault_t omap_gem_fault_1d(struct drm_gem_object *obj,
347 struct vm_area_struct *vma, struct vm_fault *vmf)
348 {
349 struct omap_gem_object *omap_obj = to_omap_bo(obj);
350 unsigned long pfn;
351 pgoff_t pgoff;
352
353 /* We don't use vmf->pgoff since that has the fake offset: */
354 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
355
356 if (omap_obj->pages) {
357 omap_gem_cpu_sync_page(obj, pgoff);
358 pfn = page_to_pfn(omap_obj->pages[pgoff]);
359 } else {
360 BUG_ON(!omap_gem_is_contiguous(omap_obj));
361 pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff;
362 }
363
364 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
365 pfn, pfn << PAGE_SHIFT);
366
367 return vmf_insert_mixed(vma, vmf->address,
368 __pfn_to_pfn_t(pfn, PFN_DEV));
369 }
370
371 /* Special handling for the case of faulting in 2d tiled buffers */
omap_gem_fault_2d(struct drm_gem_object * obj,struct vm_area_struct * vma,struct vm_fault * vmf)372 static vm_fault_t omap_gem_fault_2d(struct drm_gem_object *obj,
373 struct vm_area_struct *vma, struct vm_fault *vmf)
374 {
375 struct omap_gem_object *omap_obj = to_omap_bo(obj);
376 struct omap_drm_private *priv = obj->dev->dev_private;
377 struct omap_drm_usergart_entry *entry;
378 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
379 struct page *pages[64]; /* XXX is this too much to have on stack? */
380 unsigned long pfn;
381 pgoff_t pgoff, base_pgoff;
382 unsigned long vaddr;
383 int i, err, slots;
384 vm_fault_t ret = VM_FAULT_NOPAGE;
385
386 /*
387 * Note the height of the slot is also equal to the number of pages
388 * that need to be mapped in to fill 4kb wide CPU page. If the slot
389 * height is 64, then 64 pages fill a 4kb wide by 64 row region.
390 */
391 const int n = priv->usergart[fmt].height;
392 const int n_shift = priv->usergart[fmt].height_shift;
393
394 /*
395 * If buffer width in bytes > PAGE_SIZE then the virtual stride is
396 * rounded up to next multiple of PAGE_SIZE.. this need to be taken
397 * into account in some of the math, so figure out virtual stride
398 * in pages
399 */
400 const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
401
402 /* We don't use vmf->pgoff since that has the fake offset: */
403 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
404
405 /*
406 * Actual address we start mapping at is rounded down to previous slot
407 * boundary in the y direction:
408 */
409 base_pgoff = round_down(pgoff, m << n_shift);
410
411 /* figure out buffer width in slots */
412 slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
413
414 vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT);
415
416 entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
417
418 /* evict previous buffer using this usergart entry, if any: */
419 if (entry->obj)
420 omap_gem_evict_entry(entry->obj, fmt, entry);
421
422 entry->obj = obj;
423 entry->obj_pgoff = base_pgoff;
424
425 /* now convert base_pgoff to phys offset from virt offset: */
426 base_pgoff = (base_pgoff >> n_shift) * slots;
427
428 /* for wider-than 4k.. figure out which part of the slot-row we want: */
429 if (m > 1) {
430 int off = pgoff % m;
431 entry->obj_pgoff += off;
432 base_pgoff /= m;
433 slots = min(slots - (off << n_shift), n);
434 base_pgoff += off << n_shift;
435 vaddr += off << PAGE_SHIFT;
436 }
437
438 /*
439 * Map in pages. Beyond the valid pixel part of the buffer, we set
440 * pages[i] to NULL to get a dummy page mapped in.. if someone
441 * reads/writes it they will get random/undefined content, but at
442 * least it won't be corrupting whatever other random page used to
443 * be mapped in, or other undefined behavior.
444 */
445 memcpy(pages, &omap_obj->pages[base_pgoff],
446 sizeof(struct page *) * slots);
447 memset(pages + slots, 0,
448 sizeof(struct page *) * (n - slots));
449
450 err = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
451 if (err) {
452 ret = vmf_error(err);
453 dev_err(obj->dev->dev, "failed to pin: %d\n", err);
454 return ret;
455 }
456
457 pfn = entry->dma_addr >> PAGE_SHIFT;
458
459 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
460 pfn, pfn << PAGE_SHIFT);
461
462 for (i = n; i > 0; i--) {
463 ret = vmf_insert_mixed(vma,
464 vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
465 if (ret & VM_FAULT_ERROR)
466 break;
467 pfn += priv->usergart[fmt].stride_pfn;
468 vaddr += PAGE_SIZE * m;
469 }
470
471 /* simple round-robin: */
472 priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
473 % NUM_USERGART_ENTRIES;
474
475 return ret;
476 }
477
478 /**
479 * omap_gem_fault - pagefault handler for GEM objects
480 * @vmf: fault detail
481 *
482 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
483 * does most of the work for us including the actual map/unmap calls
484 * but we need to do the actual page work.
485 *
486 * The VMA was set up by GEM. In doing so it also ensured that the
487 * vma->vm_private_data points to the GEM object that is backing this
488 * mapping.
489 */
omap_gem_fault(struct vm_fault * vmf)490 static vm_fault_t omap_gem_fault(struct vm_fault *vmf)
491 {
492 struct vm_area_struct *vma = vmf->vma;
493 struct drm_gem_object *obj = vma->vm_private_data;
494 struct omap_gem_object *omap_obj = to_omap_bo(obj);
495 int err;
496 vm_fault_t ret;
497
498 /* Make sure we don't parallel update on a fault, nor move or remove
499 * something from beneath our feet
500 */
501 mutex_lock(&omap_obj->lock);
502
503 /* if a shmem backed object, make sure we have pages attached now */
504 err = omap_gem_attach_pages(obj);
505 if (err) {
506 ret = vmf_error(err);
507 goto fail;
508 }
509
510 /* where should we do corresponding put_pages().. we are mapping
511 * the original page, rather than thru a GART, so we can't rely
512 * on eviction to trigger this. But munmap() or all mappings should
513 * probably trigger put_pages()?
514 */
515
516 if (omap_obj->flags & OMAP_BO_TILED_MASK)
517 ret = omap_gem_fault_2d(obj, vma, vmf);
518 else
519 ret = omap_gem_fault_1d(obj, vma, vmf);
520
521
522 fail:
523 mutex_unlock(&omap_obj->lock);
524 return ret;
525 }
526
omap_gem_object_mmap(struct drm_gem_object * obj,struct vm_area_struct * vma)527 static int omap_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
528 {
529 struct omap_gem_object *omap_obj = to_omap_bo(obj);
530
531 vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP | VM_IO | VM_MIXEDMAP);
532
533 if (omap_obj->flags & OMAP_BO_WC) {
534 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
535 } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
536 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
537 } else {
538 /*
539 * We do have some private objects, at least for scanout buffers
540 * on hardware without DMM/TILER. But these are allocated write-
541 * combine
542 */
543 if (WARN_ON(!obj->filp))
544 return -EINVAL;
545
546 /*
547 * Shunt off cached objs to shmem file so they have their own
548 * address_space (so unmap_mapping_range does what we want,
549 * in particular in the case of mmap'd dmabufs)
550 */
551 vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
552 vma_set_file(vma, obj->filp);
553
554 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
555 }
556
557 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
558
559 return 0;
560 }
561
562 /* -----------------------------------------------------------------------------
563 * Dumb Buffers
564 */
565
566 /**
567 * omap_gem_dumb_create - create a dumb buffer
568 * @file: our client file
569 * @dev: our device
570 * @args: the requested arguments copied from userspace
571 *
572 * Allocate a buffer suitable for use for a frame buffer of the
573 * form described by user space. Give userspace a handle by which
574 * to reference it.
575 */
omap_gem_dumb_create(struct drm_file * file,struct drm_device * dev,struct drm_mode_create_dumb * args)576 int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
577 struct drm_mode_create_dumb *args)
578 {
579 union omap_gem_size gsize;
580
581 args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
582
583 args->size = PAGE_ALIGN(args->pitch * args->height);
584
585 gsize = (union omap_gem_size){
586 .bytes = args->size,
587 };
588
589 return omap_gem_new_handle(dev, file, gsize,
590 OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
591 }
592
593 /**
594 * omap_gem_dumb_map_offset - create an offset for a dumb buffer
595 * @file: our drm client file
596 * @dev: drm device
597 * @handle: GEM handle to the object (from dumb_create)
598 * @offset: memory map offset placeholder
599 *
600 * Do the necessary setup to allow the mapping of the frame buffer
601 * into user memory. We don't have to do much here at the moment.
602 */
omap_gem_dumb_map_offset(struct drm_file * file,struct drm_device * dev,u32 handle,u64 * offset)603 int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
604 u32 handle, u64 *offset)
605 {
606 struct drm_gem_object *obj;
607 int ret = 0;
608
609 /* GEM does all our handle to object mapping */
610 obj = drm_gem_object_lookup(file, handle);
611 if (obj == NULL) {
612 ret = -ENOENT;
613 goto fail;
614 }
615
616 *offset = omap_gem_mmap_offset(obj);
617
618 drm_gem_object_put(obj);
619
620 fail:
621 return ret;
622 }
623
624 #ifdef CONFIG_DRM_FBDEV_EMULATION
625 /* Set scrolling position. This allows us to implement fast scrolling
626 * for console.
627 *
628 * Call only from non-atomic contexts.
629 */
omap_gem_roll(struct drm_gem_object * obj,u32 roll)630 int omap_gem_roll(struct drm_gem_object *obj, u32 roll)
631 {
632 struct omap_gem_object *omap_obj = to_omap_bo(obj);
633 u32 npages = obj->size >> PAGE_SHIFT;
634 int ret = 0;
635
636 if (roll > npages) {
637 dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
638 return -EINVAL;
639 }
640
641 omap_obj->roll = roll;
642
643 mutex_lock(&omap_obj->lock);
644
645 /* if we aren't mapped yet, we don't need to do anything */
646 if (omap_obj->block) {
647 ret = omap_gem_attach_pages(obj);
648 if (ret)
649 goto fail;
650
651 ret = tiler_pin(omap_obj->block, omap_obj->pages, npages,
652 roll, true);
653 if (ret)
654 dev_err(obj->dev->dev, "could not repin: %d\n", ret);
655 }
656
657 fail:
658 mutex_unlock(&omap_obj->lock);
659
660 return ret;
661 }
662 #endif
663
664 /* -----------------------------------------------------------------------------
665 * Memory Management & DMA Sync
666 */
667
668 /*
669 * shmem buffers that are mapped cached are not coherent.
670 *
671 * We keep track of dirty pages using page faulting to perform cache management.
672 * When a page is mapped to the CPU in read/write mode the device can't access
673 * it and omap_obj->dma_addrs[i] is NULL. When a page is mapped to the device
674 * the omap_obj->dma_addrs[i] is set to the DMA address, and the page is
675 * unmapped from the CPU.
676 */
omap_gem_is_cached_coherent(struct drm_gem_object * obj)677 static inline bool omap_gem_is_cached_coherent(struct drm_gem_object *obj)
678 {
679 struct omap_gem_object *omap_obj = to_omap_bo(obj);
680
681 return !((omap_obj->flags & OMAP_BO_MEM_SHMEM) &&
682 ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED));
683 }
684
685 /* Sync the buffer for CPU access.. note pages should already be
686 * attached, ie. omap_gem_get_pages()
687 */
omap_gem_cpu_sync_page(struct drm_gem_object * obj,int pgoff)688 void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff)
689 {
690 struct drm_device *dev = obj->dev;
691 struct omap_gem_object *omap_obj = to_omap_bo(obj);
692
693 if (omap_gem_is_cached_coherent(obj))
694 return;
695
696 if (omap_obj->dma_addrs[pgoff]) {
697 dma_unmap_page(dev->dev, omap_obj->dma_addrs[pgoff],
698 PAGE_SIZE, DMA_TO_DEVICE);
699 omap_obj->dma_addrs[pgoff] = 0;
700 }
701 }
702
703 /* sync the buffer for DMA access */
omap_gem_dma_sync_buffer(struct drm_gem_object * obj,enum dma_data_direction dir)704 void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
705 enum dma_data_direction dir)
706 {
707 struct drm_device *dev = obj->dev;
708 struct omap_gem_object *omap_obj = to_omap_bo(obj);
709 int i, npages = obj->size >> PAGE_SHIFT;
710 struct page **pages = omap_obj->pages;
711 bool dirty = false;
712
713 if (omap_gem_is_cached_coherent(obj))
714 return;
715
716 for (i = 0; i < npages; i++) {
717 if (!omap_obj->dma_addrs[i]) {
718 dma_addr_t addr;
719
720 addr = dma_map_page(dev->dev, pages[i], 0,
721 PAGE_SIZE, dir);
722 if (dma_mapping_error(dev->dev, addr)) {
723 dev_warn(dev->dev, "%s: failed to map page\n",
724 __func__);
725 break;
726 }
727
728 dirty = true;
729 omap_obj->dma_addrs[i] = addr;
730 }
731 }
732
733 if (dirty) {
734 unmap_mapping_range(obj->filp->f_mapping, 0,
735 omap_gem_mmap_size(obj), 1);
736 }
737 }
738
omap_gem_pin_tiler(struct drm_gem_object * obj)739 static int omap_gem_pin_tiler(struct drm_gem_object *obj)
740 {
741 struct omap_gem_object *omap_obj = to_omap_bo(obj);
742 u32 npages = obj->size >> PAGE_SHIFT;
743 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
744 struct tiler_block *block;
745 int ret;
746
747 BUG_ON(omap_obj->block);
748
749 if (omap_obj->flags & OMAP_BO_TILED_MASK) {
750 block = tiler_reserve_2d(fmt, omap_obj->width, omap_obj->height,
751 PAGE_SIZE);
752 } else {
753 block = tiler_reserve_1d(obj->size);
754 }
755
756 if (IS_ERR(block)) {
757 ret = PTR_ERR(block);
758 dev_err(obj->dev->dev, "could not remap: %d (%d)\n", ret, fmt);
759 goto fail;
760 }
761
762 /* TODO: enable async refill.. */
763 ret = tiler_pin(block, omap_obj->pages, npages, omap_obj->roll, true);
764 if (ret) {
765 tiler_release(block);
766 dev_err(obj->dev->dev, "could not pin: %d\n", ret);
767 goto fail;
768 }
769
770 omap_obj->dma_addr = tiler_ssptr(block);
771 omap_obj->block = block;
772
773 DBG("got dma address: %pad", &omap_obj->dma_addr);
774
775 fail:
776 return ret;
777 }
778
779 /**
780 * omap_gem_pin() - Pin a GEM object in memory
781 * @obj: the GEM object
782 * @dma_addr: the DMA address
783 *
784 * Pin the given GEM object in memory and fill the dma_addr pointer with the
785 * object's DMA address. If the buffer is not physically contiguous it will be
786 * remapped through the TILER to provide a contiguous view.
787 *
788 * Pins are reference-counted, calling this function multiple times is allowed
789 * as long the corresponding omap_gem_unpin() calls are balanced.
790 *
791 * Return 0 on success or a negative error code otherwise.
792 */
omap_gem_pin(struct drm_gem_object * obj,dma_addr_t * dma_addr)793 int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
794 {
795 struct omap_drm_private *priv = obj->dev->dev_private;
796 struct omap_gem_object *omap_obj = to_omap_bo(obj);
797 int ret = 0;
798
799 mutex_lock(&omap_obj->lock);
800
801 if (!omap_gem_is_contiguous(omap_obj)) {
802 if (refcount_read(&omap_obj->pin_cnt) == 0) {
803
804 refcount_set(&omap_obj->pin_cnt, 1);
805
806 ret = omap_gem_attach_pages(obj);
807 if (ret)
808 goto fail;
809
810 if (omap_obj->flags & OMAP_BO_SCANOUT) {
811 if (priv->has_dmm) {
812 ret = omap_gem_pin_tiler(obj);
813 if (ret)
814 goto fail;
815 }
816 }
817 } else {
818 refcount_inc(&omap_obj->pin_cnt);
819 }
820 }
821
822 if (dma_addr)
823 *dma_addr = omap_obj->dma_addr;
824
825 fail:
826 mutex_unlock(&omap_obj->lock);
827
828 return ret;
829 }
830
831 /**
832 * omap_gem_unpin_locked() - Unpin a GEM object from memory
833 * @obj: the GEM object
834 *
835 * omap_gem_unpin() without locking.
836 */
omap_gem_unpin_locked(struct drm_gem_object * obj)837 static void omap_gem_unpin_locked(struct drm_gem_object *obj)
838 {
839 struct omap_drm_private *priv = obj->dev->dev_private;
840 struct omap_gem_object *omap_obj = to_omap_bo(obj);
841 int ret;
842
843 if (omap_gem_is_contiguous(omap_obj))
844 return;
845
846 if (refcount_dec_and_test(&omap_obj->pin_cnt)) {
847 if (omap_obj->sgt) {
848 sg_free_table(omap_obj->sgt);
849 kfree(omap_obj->sgt);
850 omap_obj->sgt = NULL;
851 }
852 if (!(omap_obj->flags & OMAP_BO_SCANOUT))
853 return;
854 if (priv->has_dmm) {
855 ret = tiler_unpin(omap_obj->block);
856 if (ret) {
857 dev_err(obj->dev->dev,
858 "could not unpin pages: %d\n", ret);
859 }
860 ret = tiler_release(omap_obj->block);
861 if (ret) {
862 dev_err(obj->dev->dev,
863 "could not release unmap: %d\n", ret);
864 }
865 omap_obj->dma_addr = 0;
866 omap_obj->block = NULL;
867 }
868 }
869 }
870
871 /**
872 * omap_gem_unpin() - Unpin a GEM object from memory
873 * @obj: the GEM object
874 *
875 * Unpin the given GEM object previously pinned with omap_gem_pin(). Pins are
876 * reference-counted, the actual unpin will only be performed when the number
877 * of calls to this function matches the number of calls to omap_gem_pin().
878 */
omap_gem_unpin(struct drm_gem_object * obj)879 void omap_gem_unpin(struct drm_gem_object *obj)
880 {
881 struct omap_gem_object *omap_obj = to_omap_bo(obj);
882
883 mutex_lock(&omap_obj->lock);
884 omap_gem_unpin_locked(obj);
885 mutex_unlock(&omap_obj->lock);
886 }
887
888 /* Get rotated scanout address (only valid if already pinned), at the
889 * specified orientation and x,y offset from top-left corner of buffer
890 * (only valid for tiled 2d buffers)
891 */
omap_gem_rotated_dma_addr(struct drm_gem_object * obj,u32 orient,int x,int y,dma_addr_t * dma_addr)892 int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
893 int x, int y, dma_addr_t *dma_addr)
894 {
895 struct omap_gem_object *omap_obj = to_omap_bo(obj);
896 int ret = -EINVAL;
897
898 mutex_lock(&omap_obj->lock);
899
900 if ((refcount_read(&omap_obj->pin_cnt) > 0) && omap_obj->block &&
901 (omap_obj->flags & OMAP_BO_TILED_MASK)) {
902 *dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
903 ret = 0;
904 }
905
906 mutex_unlock(&omap_obj->lock);
907
908 return ret;
909 }
910
911 /* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
omap_gem_tiled_stride(struct drm_gem_object * obj,u32 orient)912 int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient)
913 {
914 struct omap_gem_object *omap_obj = to_omap_bo(obj);
915 int ret = -EINVAL;
916 if (omap_obj->flags & OMAP_BO_TILED_MASK)
917 ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
918 return ret;
919 }
920
921 /* if !remap, and we don't have pages backing, then fail, rather than
922 * increasing the pin count (which we don't really do yet anyways,
923 * because we don't support swapping pages back out). And 'remap'
924 * might not be quite the right name, but I wanted to keep it working
925 * similarly to omap_gem_pin(). Note though that mutex is not
926 * aquired if !remap (because this can be called in atomic ctxt),
927 * but probably omap_gem_unpin() should be changed to work in the
928 * same way. If !remap, a matching omap_gem_put_pages() call is not
929 * required (and should not be made).
930 */
omap_gem_get_pages(struct drm_gem_object * obj,struct page *** pages,bool remap)931 int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
932 bool remap)
933 {
934 struct omap_gem_object *omap_obj = to_omap_bo(obj);
935 int ret = 0;
936
937 mutex_lock(&omap_obj->lock);
938
939 if (remap) {
940 ret = omap_gem_attach_pages(obj);
941 if (ret)
942 goto unlock;
943 }
944
945 if (!omap_obj->pages) {
946 ret = -ENOMEM;
947 goto unlock;
948 }
949
950 *pages = omap_obj->pages;
951
952 unlock:
953 mutex_unlock(&omap_obj->lock);
954
955 return ret;
956 }
957
958 /* release pages when DMA no longer being performed */
omap_gem_put_pages(struct drm_gem_object * obj)959 int omap_gem_put_pages(struct drm_gem_object *obj)
960 {
961 /* do something here if we dynamically attach/detach pages.. at
962 * least they would no longer need to be pinned if everyone has
963 * released the pages..
964 */
965 return 0;
966 }
967
omap_gem_get_sg(struct drm_gem_object * obj,enum dma_data_direction dir)968 struct sg_table *omap_gem_get_sg(struct drm_gem_object *obj,
969 enum dma_data_direction dir)
970 {
971 struct omap_gem_object *omap_obj = to_omap_bo(obj);
972 dma_addr_t addr;
973 struct sg_table *sgt;
974 struct scatterlist *sg;
975 unsigned int count, len, stride, i;
976 int ret;
977
978 ret = omap_gem_pin(obj, &addr);
979 if (ret)
980 return ERR_PTR(ret);
981
982 mutex_lock(&omap_obj->lock);
983
984 sgt = omap_obj->sgt;
985 if (sgt)
986 goto out;
987
988 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
989 if (!sgt) {
990 ret = -ENOMEM;
991 goto err_unpin;
992 }
993
994 if (addr) {
995 if (omap_obj->flags & OMAP_BO_TILED_MASK) {
996 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
997
998 len = omap_obj->width << (int)fmt;
999 count = omap_obj->height;
1000 stride = tiler_stride(fmt, 0);
1001 } else {
1002 len = obj->size;
1003 count = 1;
1004 stride = 0;
1005 }
1006 } else {
1007 count = obj->size >> PAGE_SHIFT;
1008 }
1009
1010 ret = sg_alloc_table(sgt, count, GFP_KERNEL);
1011 if (ret)
1012 goto err_free;
1013
1014 /* this must be after omap_gem_pin() to ensure we have pages attached */
1015 omap_gem_dma_sync_buffer(obj, dir);
1016
1017 if (addr) {
1018 for_each_sg(sgt->sgl, sg, count, i) {
1019 sg_set_page(sg, phys_to_page(addr), len,
1020 offset_in_page(addr));
1021 sg_dma_address(sg) = addr;
1022 sg_dma_len(sg) = len;
1023
1024 addr += stride;
1025 }
1026 } else {
1027 for_each_sg(sgt->sgl, sg, count, i) {
1028 sg_set_page(sg, omap_obj->pages[i], PAGE_SIZE, 0);
1029 sg_dma_address(sg) = omap_obj->dma_addrs[i];
1030 sg_dma_len(sg) = PAGE_SIZE;
1031 }
1032 }
1033
1034 omap_obj->sgt = sgt;
1035 out:
1036 mutex_unlock(&omap_obj->lock);
1037 return sgt;
1038
1039 err_free:
1040 kfree(sgt);
1041 err_unpin:
1042 mutex_unlock(&omap_obj->lock);
1043 omap_gem_unpin(obj);
1044 return ERR_PTR(ret);
1045 }
1046
omap_gem_put_sg(struct drm_gem_object * obj,struct sg_table * sgt)1047 void omap_gem_put_sg(struct drm_gem_object *obj, struct sg_table *sgt)
1048 {
1049 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1050
1051 if (WARN_ON(omap_obj->sgt != sgt))
1052 return;
1053
1054 omap_gem_unpin(obj);
1055 }
1056
1057 #ifdef CONFIG_DRM_FBDEV_EMULATION
1058 /*
1059 * Get kernel virtual address for CPU access.. this more or less only
1060 * exists for omap_fbdev.
1061 */
omap_gem_vaddr(struct drm_gem_object * obj)1062 void *omap_gem_vaddr(struct drm_gem_object *obj)
1063 {
1064 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1065 void *vaddr;
1066 int ret;
1067
1068 mutex_lock(&omap_obj->lock);
1069
1070 if (!omap_obj->vaddr) {
1071 ret = omap_gem_attach_pages(obj);
1072 if (ret) {
1073 vaddr = ERR_PTR(ret);
1074 goto unlock;
1075 }
1076
1077 omap_obj->vaddr = vmap(omap_obj->pages, obj->size >> PAGE_SHIFT,
1078 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
1079 }
1080
1081 vaddr = omap_obj->vaddr;
1082
1083 unlock:
1084 mutex_unlock(&omap_obj->lock);
1085 return vaddr;
1086 }
1087 #endif
1088
1089 /* -----------------------------------------------------------------------------
1090 * Power Management
1091 */
1092
1093 #ifdef CONFIG_PM
1094 /* re-pin objects in DMM in resume path: */
omap_gem_resume(struct drm_device * dev)1095 int omap_gem_resume(struct drm_device *dev)
1096 {
1097 struct omap_drm_private *priv = dev->dev_private;
1098 struct omap_gem_object *omap_obj;
1099 int ret = 0;
1100
1101 mutex_lock(&priv->list_lock);
1102 list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
1103 if (omap_obj->block) {
1104 struct drm_gem_object *obj = &omap_obj->base;
1105 u32 npages = obj->size >> PAGE_SHIFT;
1106
1107 WARN_ON(!omap_obj->pages); /* this can't happen */
1108 ret = tiler_pin(omap_obj->block,
1109 omap_obj->pages, npages,
1110 omap_obj->roll, true);
1111 if (ret) {
1112 dev_err(dev->dev, "could not repin: %d\n", ret);
1113 goto done;
1114 }
1115 }
1116 }
1117
1118 done:
1119 mutex_unlock(&priv->list_lock);
1120 return ret;
1121 }
1122 #endif
1123
1124 /* -----------------------------------------------------------------------------
1125 * DebugFS
1126 */
1127
1128 #ifdef CONFIG_DEBUG_FS
omap_gem_describe(struct drm_gem_object * obj,struct seq_file * m)1129 void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
1130 {
1131 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1132 u64 off;
1133
1134 off = drm_vma_node_start(&obj->vma_node);
1135
1136 mutex_lock(&omap_obj->lock);
1137
1138 seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
1139 omap_obj->flags, obj->name, kref_read(&obj->refcount),
1140 off, &omap_obj->dma_addr,
1141 refcount_read(&omap_obj->pin_cnt),
1142 omap_obj->vaddr, omap_obj->roll);
1143
1144 if (omap_obj->flags & OMAP_BO_TILED_MASK) {
1145 seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
1146 if (omap_obj->block) {
1147 struct tcm_area *area = &omap_obj->block->area;
1148 seq_printf(m, " (%dx%d, %dx%d)",
1149 area->p0.x, area->p0.y,
1150 area->p1.x, area->p1.y);
1151 }
1152 } else {
1153 seq_printf(m, " %zu", obj->size);
1154 }
1155
1156 mutex_unlock(&omap_obj->lock);
1157
1158 seq_printf(m, "\n");
1159 }
1160
omap_gem_describe_objects(struct list_head * list,struct seq_file * m)1161 void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
1162 {
1163 struct omap_gem_object *omap_obj;
1164 int count = 0;
1165 size_t size = 0;
1166
1167 list_for_each_entry(omap_obj, list, mm_list) {
1168 struct drm_gem_object *obj = &omap_obj->base;
1169 seq_printf(m, " ");
1170 omap_gem_describe(obj, m);
1171 count++;
1172 size += obj->size;
1173 }
1174
1175 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
1176 }
1177 #endif
1178
1179 /* -----------------------------------------------------------------------------
1180 * Constructor & Destructor
1181 */
1182
omap_gem_free_object(struct drm_gem_object * obj)1183 static void omap_gem_free_object(struct drm_gem_object *obj)
1184 {
1185 struct drm_device *dev = obj->dev;
1186 struct omap_drm_private *priv = dev->dev_private;
1187 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1188
1189 omap_gem_evict(obj);
1190
1191 mutex_lock(&priv->list_lock);
1192 list_del(&omap_obj->mm_list);
1193 mutex_unlock(&priv->list_lock);
1194
1195 /*
1196 * We own the sole reference to the object at this point, but to keep
1197 * lockdep happy, we must still take the omap_obj_lock to call
1198 * omap_gem_detach_pages(). This should hardly make any difference as
1199 * there can't be any lock contention.
1200 */
1201 mutex_lock(&omap_obj->lock);
1202
1203 /* The object should not be pinned. */
1204 WARN_ON(refcount_read(&omap_obj->pin_cnt) > 0);
1205
1206 if (omap_obj->pages) {
1207 if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
1208 kfree(omap_obj->pages);
1209 else
1210 omap_gem_detach_pages(obj);
1211 }
1212
1213 if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
1214 dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
1215 omap_obj->dma_addr);
1216 } else if (omap_obj->vaddr) {
1217 vunmap(omap_obj->vaddr);
1218 } else if (obj->import_attach) {
1219 drm_prime_gem_destroy(obj, omap_obj->sgt);
1220 }
1221
1222 mutex_unlock(&omap_obj->lock);
1223
1224 drm_gem_object_release(obj);
1225
1226 mutex_destroy(&omap_obj->lock);
1227
1228 kfree(omap_obj);
1229 }
1230
omap_gem_validate_flags(struct drm_device * dev,u32 flags)1231 static bool omap_gem_validate_flags(struct drm_device *dev, u32 flags)
1232 {
1233 struct omap_drm_private *priv = dev->dev_private;
1234
1235 switch (flags & OMAP_BO_CACHE_MASK) {
1236 case OMAP_BO_CACHED:
1237 case OMAP_BO_WC:
1238 case OMAP_BO_CACHE_MASK:
1239 break;
1240
1241 default:
1242 return false;
1243 }
1244
1245 if (flags & OMAP_BO_TILED_MASK) {
1246 if (!priv->usergart)
1247 return false;
1248
1249 switch (flags & OMAP_BO_TILED_MASK) {
1250 case OMAP_BO_TILED_8:
1251 case OMAP_BO_TILED_16:
1252 case OMAP_BO_TILED_32:
1253 break;
1254
1255 default:
1256 return false;
1257 }
1258 }
1259
1260 return true;
1261 }
1262
1263 static const struct vm_operations_struct omap_gem_vm_ops = {
1264 .fault = omap_gem_fault,
1265 .open = drm_gem_vm_open,
1266 .close = drm_gem_vm_close,
1267 };
1268
1269 static const struct drm_gem_object_funcs omap_gem_object_funcs = {
1270 .free = omap_gem_free_object,
1271 .export = omap_gem_prime_export,
1272 .mmap = omap_gem_object_mmap,
1273 .vm_ops = &omap_gem_vm_ops,
1274 };
1275
1276 /* GEM buffer object constructor */
omap_gem_new(struct drm_device * dev,union omap_gem_size gsize,u32 flags)1277 struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1278 union omap_gem_size gsize, u32 flags)
1279 {
1280 struct omap_drm_private *priv = dev->dev_private;
1281 struct omap_gem_object *omap_obj;
1282 struct drm_gem_object *obj;
1283 struct address_space *mapping;
1284 size_t size;
1285 int ret;
1286
1287 if (!omap_gem_validate_flags(dev, flags))
1288 return NULL;
1289
1290 /* Validate the flags and compute the memory and cache flags. */
1291 if (flags & OMAP_BO_TILED_MASK) {
1292 /*
1293 * Tiled buffers are always shmem paged backed. When they are
1294 * scanned out, they are remapped into DMM/TILER.
1295 */
1296 flags |= OMAP_BO_MEM_SHMEM;
1297
1298 /*
1299 * Currently don't allow cached buffers. There is some caching
1300 * stuff that needs to be handled better.
1301 */
1302 flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
1303 flags |= tiler_get_cpu_cache_flags();
1304 } else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
1305 /*
1306 * If we don't have DMM, we must allocate scanout buffers
1307 * from contiguous DMA memory.
1308 */
1309 flags |= OMAP_BO_MEM_DMA_API;
1310 } else if (!(flags & OMAP_BO_MEM_DMABUF)) {
1311 /*
1312 * All other buffers not backed by dma_buf are shmem-backed.
1313 */
1314 flags |= OMAP_BO_MEM_SHMEM;
1315 }
1316
1317 /* Allocate the initialize the OMAP GEM object. */
1318 omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
1319 if (!omap_obj)
1320 return NULL;
1321
1322 obj = &omap_obj->base;
1323 omap_obj->flags = flags;
1324 mutex_init(&omap_obj->lock);
1325
1326 if (flags & OMAP_BO_TILED_MASK) {
1327 /*
1328 * For tiled buffers align dimensions to slot boundaries and
1329 * calculate size based on aligned dimensions.
1330 */
1331 tiler_align(gem2fmt(flags), &gsize.tiled.width,
1332 &gsize.tiled.height);
1333
1334 size = tiler_size(gem2fmt(flags), gsize.tiled.width,
1335 gsize.tiled.height);
1336
1337 omap_obj->width = gsize.tiled.width;
1338 omap_obj->height = gsize.tiled.height;
1339 } else {
1340 size = PAGE_ALIGN(gsize.bytes);
1341 }
1342
1343 obj->funcs = &omap_gem_object_funcs;
1344
1345 /* Initialize the GEM object. */
1346 if (!(flags & OMAP_BO_MEM_SHMEM)) {
1347 drm_gem_private_object_init(dev, obj, size);
1348 } else {
1349 ret = drm_gem_object_init(dev, obj, size);
1350 if (ret)
1351 goto err_free;
1352
1353 mapping = obj->filp->f_mapping;
1354 mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
1355 }
1356
1357 /* Allocate memory if needed. */
1358 if (flags & OMAP_BO_MEM_DMA_API) {
1359 omap_obj->vaddr = dma_alloc_wc(dev->dev, size,
1360 &omap_obj->dma_addr,
1361 GFP_KERNEL);
1362 if (!omap_obj->vaddr)
1363 goto err_release;
1364 }
1365
1366 mutex_lock(&priv->list_lock);
1367 list_add(&omap_obj->mm_list, &priv->obj_list);
1368 mutex_unlock(&priv->list_lock);
1369
1370 return obj;
1371
1372 err_release:
1373 drm_gem_object_release(obj);
1374 err_free:
1375 kfree(omap_obj);
1376 return NULL;
1377 }
1378
omap_gem_new_dmabuf(struct drm_device * dev,size_t size,struct sg_table * sgt)1379 struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
1380 struct sg_table *sgt)
1381 {
1382 struct omap_drm_private *priv = dev->dev_private;
1383 struct omap_gem_object *omap_obj;
1384 struct drm_gem_object *obj;
1385 union omap_gem_size gsize;
1386
1387 /* Without a DMM only physically contiguous buffers can be supported. */
1388 if (sgt->orig_nents != 1 && !priv->has_dmm)
1389 return ERR_PTR(-EINVAL);
1390
1391 gsize.bytes = PAGE_ALIGN(size);
1392 obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
1393 if (!obj)
1394 return ERR_PTR(-ENOMEM);
1395
1396 omap_obj = to_omap_bo(obj);
1397
1398 mutex_lock(&omap_obj->lock);
1399
1400 omap_obj->sgt = sgt;
1401
1402 if (sgt->orig_nents == 1) {
1403 omap_obj->dma_addr = sg_dma_address(sgt->sgl);
1404 } else {
1405 /* Create pages list from sgt */
1406 struct page **pages;
1407 unsigned int npages;
1408 unsigned int ret;
1409
1410 npages = DIV_ROUND_UP(size, PAGE_SIZE);
1411 pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
1412 if (!pages) {
1413 omap_gem_free_object(obj);
1414 obj = ERR_PTR(-ENOMEM);
1415 goto done;
1416 }
1417
1418 omap_obj->pages = pages;
1419 ret = drm_prime_sg_to_page_array(sgt, pages, npages);
1420 if (ret) {
1421 omap_gem_free_object(obj);
1422 obj = ERR_PTR(-ENOMEM);
1423 goto done;
1424 }
1425 }
1426
1427 done:
1428 mutex_unlock(&omap_obj->lock);
1429 return obj;
1430 }
1431
1432 /* convenience method to construct a GEM buffer object, and userspace handle */
omap_gem_new_handle(struct drm_device * dev,struct drm_file * file,union omap_gem_size gsize,u32 flags,u32 * handle)1433 int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1434 union omap_gem_size gsize, u32 flags, u32 *handle)
1435 {
1436 struct drm_gem_object *obj;
1437 int ret;
1438
1439 obj = omap_gem_new(dev, gsize, flags);
1440 if (!obj)
1441 return -ENOMEM;
1442
1443 ret = drm_gem_handle_create(file, obj, handle);
1444 if (ret) {
1445 omap_gem_free_object(obj);
1446 return ret;
1447 }
1448
1449 /* drop reference from allocate - handle holds it now */
1450 drm_gem_object_put(obj);
1451
1452 return 0;
1453 }
1454
1455 /* -----------------------------------------------------------------------------
1456 * Init & Cleanup
1457 */
1458
1459 /* If DMM is used, we need to set some stuff up.. */
omap_gem_init(struct drm_device * dev)1460 void omap_gem_init(struct drm_device *dev)
1461 {
1462 struct omap_drm_private *priv = dev->dev_private;
1463 struct omap_drm_usergart *usergart;
1464 const enum tiler_fmt fmts[] = {
1465 TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1466 };
1467 int i, j;
1468
1469 if (!dmm_is_available()) {
1470 /* DMM only supported on OMAP4 and later, so this isn't fatal */
1471 dev_warn(dev->dev, "DMM not available, disable DMM support\n");
1472 return;
1473 }
1474
1475 usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
1476 if (!usergart)
1477 return;
1478
1479 /* reserve 4k aligned/wide regions for userspace mappings: */
1480 for (i = 0; i < ARRAY_SIZE(fmts); i++) {
1481 u16 h = 1, w = PAGE_SIZE >> i;
1482
1483 tiler_align(fmts[i], &w, &h);
1484 /* note: since each region is 1 4kb page wide, and minimum
1485 * number of rows, the height ends up being the same as the
1486 * # of pages in the region
1487 */
1488 usergart[i].height = h;
1489 usergart[i].height_shift = ilog2(h);
1490 usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
1491 usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1492 for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
1493 struct omap_drm_usergart_entry *entry;
1494 struct tiler_block *block;
1495
1496 entry = &usergart[i].entry[j];
1497 block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
1498 if (IS_ERR(block)) {
1499 dev_err(dev->dev,
1500 "reserve failed: %d, %d, %ld\n",
1501 i, j, PTR_ERR(block));
1502 return;
1503 }
1504 entry->dma_addr = tiler_ssptr(block);
1505 entry->block = block;
1506
1507 DBG("%d:%d: %dx%d: dma_addr=%pad stride=%d", i, j, w, h,
1508 &entry->dma_addr,
1509 usergart[i].stride_pfn << PAGE_SHIFT);
1510 }
1511 }
1512
1513 priv->usergart = usergart;
1514 priv->has_dmm = true;
1515 }
1516
omap_gem_deinit(struct drm_device * dev)1517 void omap_gem_deinit(struct drm_device *dev)
1518 {
1519 struct omap_drm_private *priv = dev->dev_private;
1520
1521 /* I believe we can rely on there being no more outstanding GEM
1522 * objects which could depend on usergart/dmm at this point.
1523 */
1524 kfree(priv->usergart);
1525 }
1526