1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
28bb0daffSRob Clark /*
31b409fdaSAlexander A. Klimov * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
48bb0daffSRob Clark * Author: Rob Clark <rob.clark@linaro.org>
58bb0daffSRob Clark */
68bb0daffSRob Clark
781f6156cSSam Ravnborg #include <linux/dma-mapping.h>
82d802453SArnd Bergmann #include <linux/seq_file.h>
98bb0daffSRob Clark #include <linux/shmem_fs.h>
102d278f54SLaurent Pinchart #include <linux/spinlock.h>
1101c8f1c4SDan Williams #include <linux/pfn_t.h>
122d278f54SLaurent Pinchart
1381f6156cSSam Ravnborg #include <drm/drm_prime.h>
140de23977SDavid Herrmann #include <drm/drm_vma_manager.h>
158bb0daffSRob Clark
168bb0daffSRob Clark #include "omap_drv.h"
178bb0daffSRob Clark #include "omap_dmm_tiler.h"
188bb0daffSRob Clark
198bb0daffSRob Clark /*
208bb0daffSRob Clark * GEM buffer object implementation.
218bb0daffSRob Clark */
228bb0daffSRob Clark
238bb0daffSRob Clark /* note: we use upper 8 bits of flags for driver-internal flags: */
24cdb0381dSLaurent Pinchart #define OMAP_BO_MEM_DMA_API 0x01000000 /* memory allocated with the dma_alloc_* API */
25cdb0381dSLaurent Pinchart #define OMAP_BO_MEM_SHMEM 0x02000000 /* memory allocated through shmem backing */
26b22e6690SLaurent Pinchart #define OMAP_BO_MEM_DMABUF 0x08000000 /* memory imported from a dmabuf */
278bb0daffSRob Clark
288bb0daffSRob Clark struct omap_gem_object {
298bb0daffSRob Clark struct drm_gem_object base;
308bb0daffSRob Clark
318bb0daffSRob Clark struct list_head mm_list;
328bb0daffSRob Clark
33dfe9cfccSLaurent Pinchart u32 flags;
348bb0daffSRob Clark
358bb0daffSRob Clark /** width/height for tiled formats (rounded up to slot boundaries) */
36dfe9cfccSLaurent Pinchart u16 width, height;
378bb0daffSRob Clark
388bb0daffSRob Clark /** roll applied when mapping to DMM */
39dfe9cfccSLaurent Pinchart u32 roll;
408bb0daffSRob Clark
411948d28dSIvaylo Dimitrov /** protects pin_cnt, block, pages, dma_addrs and vaddr */
423cbd0c58SLaurent Pinchart struct mutex lock;
433cbd0c58SLaurent Pinchart
448bb0daffSRob Clark /**
4516869083SLaurent Pinchart * dma_addr contains the buffer DMA address. It is valid for
468bb0daffSRob Clark *
47b22e6690SLaurent Pinchart * - buffers allocated through the DMA mapping API (with the
48b22e6690SLaurent Pinchart * OMAP_BO_MEM_DMA_API flag set)
49b22e6690SLaurent Pinchart *
50b22e6690SLaurent Pinchart * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
51b22e6690SLaurent Pinchart * if they are physically contiguous (when sgt->orig_nents == 1)
52b22e6690SLaurent Pinchart *
531948d28dSIvaylo Dimitrov * - buffers mapped through the TILER when pin_cnt is not zero, in which
541948d28dSIvaylo Dimitrov * case the DMA address points to the TILER aperture
55b22e6690SLaurent Pinchart *
56b22e6690SLaurent Pinchart * Physically contiguous buffers have their DMA address equal to the
57b22e6690SLaurent Pinchart * physical address as we don't remap those buffers through the TILER.
58b22e6690SLaurent Pinchart *
59b22e6690SLaurent Pinchart * Buffers mapped to the TILER have their DMA address pointing to the
601948d28dSIvaylo Dimitrov * TILER aperture. As TILER mappings are refcounted (through pin_cnt)
611948d28dSIvaylo Dimitrov * the DMA address must be accessed through omap_gem_pin() to ensure
621948d28dSIvaylo Dimitrov * that the mapping won't disappear unexpectedly. References must be
631948d28dSIvaylo Dimitrov * released with omap_gem_unpin().
648bb0daffSRob Clark */
6516869083SLaurent Pinchart dma_addr_t dma_addr;
668bb0daffSRob Clark
678bb0daffSRob Clark /**
681948d28dSIvaylo Dimitrov * # of users
698bb0daffSRob Clark */
701948d28dSIvaylo Dimitrov refcount_t pin_cnt;
718bb0daffSRob Clark
728bb0daffSRob Clark /**
73b22e6690SLaurent Pinchart * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
74b22e6690SLaurent Pinchart * is set and the sgt field is valid.
75b22e6690SLaurent Pinchart */
76b22e6690SLaurent Pinchart struct sg_table *sgt;
77b22e6690SLaurent Pinchart
78b22e6690SLaurent Pinchart /**
798bb0daffSRob Clark * tiler block used when buffer is remapped in DMM/TILER.
808bb0daffSRob Clark */
818bb0daffSRob Clark struct tiler_block *block;
828bb0daffSRob Clark
838bb0daffSRob Clark /**
848bb0daffSRob Clark * Array of backing pages, if allocated. Note that pages are never
858bb0daffSRob Clark * allocated for buffers originally allocated from contiguous memory
868bb0daffSRob Clark */
878bb0daffSRob Clark struct page **pages;
888bb0daffSRob Clark
898bb0daffSRob Clark /** addresses corresponding to pages in above array */
9057c22f7cSLaurent Pinchart dma_addr_t *dma_addrs;
918bb0daffSRob Clark
928bb0daffSRob Clark /**
938bb0daffSRob Clark * Virtual address, if mapped.
948bb0daffSRob Clark */
958bb0daffSRob Clark void *vaddr;
968bb0daffSRob Clark };
978bb0daffSRob Clark
987ef93b0aSLaurent Pinchart #define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
998bb0daffSRob Clark
1008bb0daffSRob Clark /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
1018bb0daffSRob Clark * not necessarily pinned in TILER all the time, and (b) when they are
1028bb0daffSRob Clark * they are not necessarily page aligned, we reserve one or more small
1038bb0daffSRob Clark * regions in each of the 2d containers to use as a user-GART where we
1048bb0daffSRob Clark * can create a second page-aligned mapping of parts of the buffer
1058bb0daffSRob Clark * being accessed from userspace.
1068bb0daffSRob Clark *
1078bb0daffSRob Clark * Note that we could optimize slightly when we know that multiple
1088bb0daffSRob Clark * tiler containers are backed by the same PAT.. but I'll leave that
1098bb0daffSRob Clark * for later..
1108bb0daffSRob Clark */
1118bb0daffSRob Clark #define NUM_USERGART_ENTRIES 2
112f4302747SLaurent Pinchart struct omap_drm_usergart_entry {
1138bb0daffSRob Clark struct tiler_block *block; /* the reserved tiler block */
11416869083SLaurent Pinchart dma_addr_t dma_addr;
1158bb0daffSRob Clark struct drm_gem_object *obj; /* the current pinned obj */
1168bb0daffSRob Clark pgoff_t obj_pgoff; /* page offset of obj currently
1178bb0daffSRob Clark mapped in */
1188bb0daffSRob Clark };
119f4302747SLaurent Pinchart
120f4302747SLaurent Pinchart struct omap_drm_usergart {
121f4302747SLaurent Pinchart struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES];
1228bb0daffSRob Clark int height; /* height in rows */
1238bb0daffSRob Clark int height_shift; /* ilog2(height in rows) */
1248bb0daffSRob Clark int slot_shift; /* ilog2(width per slot) */
1258bb0daffSRob Clark int stride_pfn; /* stride in pages */
1268bb0daffSRob Clark int last; /* index of last used entry */
127f4302747SLaurent Pinchart };
1288bb0daffSRob Clark
129b902f8f4SLaurent Pinchart /* -----------------------------------------------------------------------------
130b902f8f4SLaurent Pinchart * Helpers
131b902f8f4SLaurent Pinchart */
132b902f8f4SLaurent Pinchart
133b902f8f4SLaurent Pinchart /** get mmap offset */
omap_gem_mmap_offset(struct drm_gem_object * obj)134dc8c9aeeSLaurent Pinchart u64 omap_gem_mmap_offset(struct drm_gem_object *obj)
135b902f8f4SLaurent Pinchart {
136b902f8f4SLaurent Pinchart struct drm_device *dev = obj->dev;
137b902f8f4SLaurent Pinchart int ret;
138b902f8f4SLaurent Pinchart size_t size;
139b902f8f4SLaurent Pinchart
140b902f8f4SLaurent Pinchart /* Make it mmapable */
141b902f8f4SLaurent Pinchart size = omap_gem_mmap_size(obj);
142b902f8f4SLaurent Pinchart ret = drm_gem_create_mmap_offset_size(obj, size);
143b902f8f4SLaurent Pinchart if (ret) {
144b902f8f4SLaurent Pinchart dev_err(dev->dev, "could not allocate mmap offset\n");
145b902f8f4SLaurent Pinchart return 0;
146b902f8f4SLaurent Pinchart }
147b902f8f4SLaurent Pinchart
148b902f8f4SLaurent Pinchart return drm_vma_node_offset_addr(&obj->vma_node);
149b902f8f4SLaurent Pinchart }
150b902f8f4SLaurent Pinchart
omap_gem_is_contiguous(struct omap_gem_object * omap_obj)151620063e1SLaurent Pinchart static bool omap_gem_is_contiguous(struct omap_gem_object *omap_obj)
1527ef93b0aSLaurent Pinchart {
153b22e6690SLaurent Pinchart if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
154b22e6690SLaurent Pinchart return true;
155b22e6690SLaurent Pinchart
156b22e6690SLaurent Pinchart if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1)
157b22e6690SLaurent Pinchart return true;
158b22e6690SLaurent Pinchart
159b22e6690SLaurent Pinchart return false;
1607ef93b0aSLaurent Pinchart }
1617ef93b0aSLaurent Pinchart
1627ef93b0aSLaurent Pinchart /* -----------------------------------------------------------------------------
1637ef93b0aSLaurent Pinchart * Eviction
1647ef93b0aSLaurent Pinchart */
1658bb0daffSRob Clark
omap_gem_evict_entry(struct drm_gem_object * obj,enum tiler_fmt fmt,struct omap_drm_usergart_entry * entry)166620063e1SLaurent Pinchart static void omap_gem_evict_entry(struct drm_gem_object *obj,
167f4302747SLaurent Pinchart enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
1688bb0daffSRob Clark {
1698bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj);
170f4302747SLaurent Pinchart struct omap_drm_private *priv = obj->dev->dev_private;
171f4302747SLaurent Pinchart int n = priv->usergart[fmt].height;
1728bb0daffSRob Clark size_t size = PAGE_SIZE * n;
173dc8c9aeeSLaurent Pinchart loff_t off = omap_gem_mmap_offset(obj) +
1748bb0daffSRob Clark (entry->obj_pgoff << PAGE_SHIFT);
175cc8dd766STomi Valkeinen const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
1766796cb16SDavid Herrmann
1778bb0daffSRob Clark if (m > 1) {
1788bb0daffSRob Clark int i;
1798bb0daffSRob Clark /* if stride > than PAGE_SIZE then sparse mapping: */
1808bb0daffSRob Clark for (i = n; i > 0; i--) {
1816796cb16SDavid Herrmann unmap_mapping_range(obj->dev->anon_inode->i_mapping,
1828bb0daffSRob Clark off, PAGE_SIZE, 1);
1838bb0daffSRob Clark off += PAGE_SIZE * m;
1848bb0daffSRob Clark }
1858bb0daffSRob Clark } else {
1866796cb16SDavid Herrmann unmap_mapping_range(obj->dev->anon_inode->i_mapping,
1876796cb16SDavid Herrmann off, size, 1);
1888bb0daffSRob Clark }
1898bb0daffSRob Clark
1908bb0daffSRob Clark entry->obj = NULL;
1918bb0daffSRob Clark }
1928bb0daffSRob Clark
1938bb0daffSRob Clark /* Evict a buffer from usergart, if it is mapped there */
omap_gem_evict(struct drm_gem_object * obj)194620063e1SLaurent Pinchart static void omap_gem_evict(struct drm_gem_object *obj)
1958bb0daffSRob Clark {
1968bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj);
197f4302747SLaurent Pinchart struct omap_drm_private *priv = obj->dev->dev_private;
1988bb0daffSRob Clark
19948b34ac0STomi Valkeinen if (omap_obj->flags & OMAP_BO_TILED_MASK) {
2008bb0daffSRob Clark enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
2018bb0daffSRob Clark int i;
2028bb0daffSRob Clark
2038bb0daffSRob Clark for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
204f4302747SLaurent Pinchart struct omap_drm_usergart_entry *entry =
205f4302747SLaurent Pinchart &priv->usergart[fmt].entry[i];
206f4302747SLaurent Pinchart
2078bb0daffSRob Clark if (entry->obj == obj)
208620063e1SLaurent Pinchart omap_gem_evict_entry(obj, fmt, entry);
2098bb0daffSRob Clark }
2108bb0daffSRob Clark }
2118bb0daffSRob Clark }
2128bb0daffSRob Clark
2137ef93b0aSLaurent Pinchart /* -----------------------------------------------------------------------------
2147ef93b0aSLaurent Pinchart * Page Management
2158bb0daffSRob Clark */
2168bb0daffSRob Clark
2173cbd0c58SLaurent Pinchart /*
2183cbd0c58SLaurent Pinchart * Ensure backing pages are allocated. Must be called with the omap_obj.lock
2193cbd0c58SLaurent Pinchart * held.
2203cbd0c58SLaurent Pinchart */
omap_gem_attach_pages(struct drm_gem_object * obj)2218bb0daffSRob Clark static int omap_gem_attach_pages(struct drm_gem_object *obj)
2228bb0daffSRob Clark {
2238bb0daffSRob Clark struct drm_device *dev = obj->dev;
2248bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj);
2258bb0daffSRob Clark struct page **pages;
2268bb0daffSRob Clark int npages = obj->size >> PAGE_SHIFT;
2278bb0daffSRob Clark int i, ret;
2288bb0daffSRob Clark dma_addr_t *addrs;
2298bb0daffSRob Clark
2303cbd0c58SLaurent Pinchart lockdep_assert_held(&omap_obj->lock);
2313cbd0c58SLaurent Pinchart
2322491244dSLaurent Pinchart /*
2332491244dSLaurent Pinchart * If not using shmem (in which case backing pages don't need to be
2342491244dSLaurent Pinchart * allocated) or if pages are already allocated we're done.
2352491244dSLaurent Pinchart */
2362491244dSLaurent Pinchart if (!(omap_obj->flags & OMAP_BO_MEM_SHMEM) || omap_obj->pages)
2372491244dSLaurent Pinchart return 0;
2388bb0daffSRob Clark
2390cdbe8acSDavid Herrmann pages = drm_gem_get_pages(obj);
2408bb0daffSRob Clark if (IS_ERR(pages)) {
2418bb0daffSRob Clark dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
2428bb0daffSRob Clark return PTR_ERR(pages);
2438bb0daffSRob Clark }
2448bb0daffSRob Clark
2458bb0daffSRob Clark /* for non-cached buffers, ensure the new pages are clean because
2468bb0daffSRob Clark * DSS, GPU, etc. are not cache coherent:
2478bb0daffSRob Clark */
2488bb0daffSRob Clark if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
2496da2ec56SKees Cook addrs = kmalloc_array(npages, sizeof(*addrs), GFP_KERNEL);
2508bb0daffSRob Clark if (!addrs) {
2518bb0daffSRob Clark ret = -ENOMEM;
2528bb0daffSRob Clark goto free_pages;
2538bb0daffSRob Clark }
2548bb0daffSRob Clark
2558bb0daffSRob Clark for (i = 0; i < npages; i++) {
2568bb0daffSRob Clark addrs[i] = dma_map_page(dev->dev, pages[i],
25797817fd4SLaurent Pinchart 0, PAGE_SIZE, DMA_TO_DEVICE);
258579ef254STomi Valkeinen
259579ef254STomi Valkeinen if (dma_mapping_error(dev->dev, addrs[i])) {
260579ef254STomi Valkeinen dev_warn(dev->dev,
261579ef254STomi Valkeinen "%s: failed to map page\n", __func__);
262579ef254STomi Valkeinen
263579ef254STomi Valkeinen for (i = i - 1; i >= 0; --i) {
264579ef254STomi Valkeinen dma_unmap_page(dev->dev, addrs[i],
26597817fd4SLaurent Pinchart PAGE_SIZE, DMA_TO_DEVICE);
266579ef254STomi Valkeinen }
267579ef254STomi Valkeinen
268579ef254STomi Valkeinen ret = -ENOMEM;
269579ef254STomi Valkeinen goto free_addrs;
270579ef254STomi Valkeinen }
2718bb0daffSRob Clark }
2728bb0daffSRob Clark } else {
2736396bb22SKees Cook addrs = kcalloc(npages, sizeof(*addrs), GFP_KERNEL);
2748bb0daffSRob Clark if (!addrs) {
2758bb0daffSRob Clark ret = -ENOMEM;
2768bb0daffSRob Clark goto free_pages;
2778bb0daffSRob Clark }
2788bb0daffSRob Clark }
2798bb0daffSRob Clark
28057c22f7cSLaurent Pinchart omap_obj->dma_addrs = addrs;
2818bb0daffSRob Clark omap_obj->pages = pages;
2828bb0daffSRob Clark
2838bb0daffSRob Clark return 0;
2848bb0daffSRob Clark
285579ef254STomi Valkeinen free_addrs:
286579ef254STomi Valkeinen kfree(addrs);
2878bb0daffSRob Clark free_pages:
288ddcd09d6SRob Clark drm_gem_put_pages(obj, pages, true, false);
2898bb0daffSRob Clark
2908bb0daffSRob Clark return ret;
2918bb0daffSRob Clark }
2928bb0daffSRob Clark
2933cbd0c58SLaurent Pinchart /* Release backing pages. Must be called with the omap_obj.lock held. */
omap_gem_detach_pages(struct drm_gem_object * obj)2948bb0daffSRob Clark static void omap_gem_detach_pages(struct drm_gem_object *obj)
2958bb0daffSRob Clark {
2968bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj);
297930dc19cSLaurent Pinchart unsigned int npages = obj->size >> PAGE_SHIFT;
298930dc19cSLaurent Pinchart unsigned int i;
2998bb0daffSRob Clark
3003cbd0c58SLaurent Pinchart lockdep_assert_held(&omap_obj->lock);
3013cbd0c58SLaurent Pinchart
3028bb0daffSRob Clark for (i = 0; i < npages; i++) {
30357c22f7cSLaurent Pinchart if (omap_obj->dma_addrs[i])
304930dc19cSLaurent Pinchart dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i],
30597817fd4SLaurent Pinchart PAGE_SIZE, DMA_TO_DEVICE);
3068bb0daffSRob Clark }
3078bb0daffSRob Clark
30857c22f7cSLaurent Pinchart kfree(omap_obj->dma_addrs);
30957c22f7cSLaurent Pinchart omap_obj->dma_addrs = NULL;
3108bb0daffSRob Clark
311ddcd09d6SRob Clark drm_gem_put_pages(obj, omap_obj->pages, true, false);
3128bb0daffSRob Clark omap_obj->pages = NULL;
3138bb0daffSRob Clark }
3148bb0daffSRob Clark
3158bb0daffSRob Clark /* get buffer flags */
omap_gem_flags(struct drm_gem_object * obj)316dfe9cfccSLaurent Pinchart u32 omap_gem_flags(struct drm_gem_object *obj)
3178bb0daffSRob Clark {
3188bb0daffSRob Clark return to_omap_bo(obj)->flags;
3198bb0daffSRob Clark }
3208bb0daffSRob Clark
3218bb0daffSRob Clark /** get mmap size */
omap_gem_mmap_size(struct drm_gem_object * obj)3228bb0daffSRob Clark size_t omap_gem_mmap_size(struct drm_gem_object *obj)
3238bb0daffSRob Clark {
3248bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj);
3258bb0daffSRob Clark size_t size = obj->size;
3268bb0daffSRob Clark
32748b34ac0STomi Valkeinen if (omap_obj->flags & OMAP_BO_TILED_MASK) {
3288bb0daffSRob Clark /* for tiled buffers, the virtual size has stride rounded up
3298bb0daffSRob Clark * to 4kb.. (to hide the fact that row n+1 might start 16kb or
3308bb0daffSRob Clark * 32kb later!). But we don't back the entire buffer with
3318bb0daffSRob Clark * pages, only the valid picture part.. so need to adjust for
3328bb0daffSRob Clark * this in the size used to mmap and generate mmap offset
3338bb0daffSRob Clark */
3348bb0daffSRob Clark size = tiler_vsize(gem2fmt(omap_obj->flags),
3358bb0daffSRob Clark omap_obj->width, omap_obj->height);
3368bb0daffSRob Clark }
3378bb0daffSRob Clark
3388bb0daffSRob Clark return size;
3398bb0daffSRob Clark }
3408bb0daffSRob Clark
3417ef93b0aSLaurent Pinchart /* -----------------------------------------------------------------------------
3427ef93b0aSLaurent Pinchart * Fault Handling
3437ef93b0aSLaurent Pinchart */
3447ef93b0aSLaurent Pinchart
3458bb0daffSRob Clark /* Normal handling for the case of faulting in non-tiled buffers */
omap_gem_fault_1d(struct drm_gem_object * obj,struct vm_area_struct * vma,struct vm_fault * vmf)346620063e1SLaurent Pinchart static vm_fault_t omap_gem_fault_1d(struct drm_gem_object *obj,
3478bb0daffSRob Clark struct vm_area_struct *vma, struct vm_fault *vmf)
3488bb0daffSRob Clark {
3498bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj);
3508bb0daffSRob Clark unsigned long pfn;
3518bb0daffSRob Clark pgoff_t pgoff;
3528bb0daffSRob Clark
3538bb0daffSRob Clark /* We don't use vmf->pgoff since that has the fake offset: */
3541a29d85eSJan Kara pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
3558bb0daffSRob Clark
3568bb0daffSRob Clark if (omap_obj->pages) {
357d61ce7daSLaurent Pinchart omap_gem_cpu_sync_page(obj, pgoff);
3588bb0daffSRob Clark pfn = page_to_pfn(omap_obj->pages[pgoff]);
3598bb0daffSRob Clark } else {
360620063e1SLaurent Pinchart BUG_ON(!omap_gem_is_contiguous(omap_obj));
36116869083SLaurent Pinchart pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff;
3628bb0daffSRob Clark }
3638bb0daffSRob Clark
3641a29d85eSJan Kara VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
3658bb0daffSRob Clark pfn, pfn << PAGE_SHIFT);
3668bb0daffSRob Clark
3676ada1328SSouptick Joarder return vmf_insert_mixed(vma, vmf->address,
3686ada1328SSouptick Joarder __pfn_to_pfn_t(pfn, PFN_DEV));
3698bb0daffSRob Clark }
3708bb0daffSRob Clark
3718bb0daffSRob Clark /* Special handling for the case of faulting in 2d tiled buffers */
omap_gem_fault_2d(struct drm_gem_object * obj,struct vm_area_struct * vma,struct vm_fault * vmf)372620063e1SLaurent Pinchart static vm_fault_t omap_gem_fault_2d(struct drm_gem_object *obj,
3738bb0daffSRob Clark struct vm_area_struct *vma, struct vm_fault *vmf)
3748bb0daffSRob Clark {
3758bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj);
376f4302747SLaurent Pinchart struct omap_drm_private *priv = obj->dev->dev_private;
377f4302747SLaurent Pinchart struct omap_drm_usergart_entry *entry;
3788bb0daffSRob Clark enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
3798bb0daffSRob Clark struct page *pages[64]; /* XXX is this too much to have on stack? */
3808bb0daffSRob Clark unsigned long pfn;
3818bb0daffSRob Clark pgoff_t pgoff, base_pgoff;
3821a29d85eSJan Kara unsigned long vaddr;
3836ada1328SSouptick Joarder int i, err, slots;
3846ada1328SSouptick Joarder vm_fault_t ret = VM_FAULT_NOPAGE;
3858bb0daffSRob Clark
3868bb0daffSRob Clark /*
3878bb0daffSRob Clark * Note the height of the slot is also equal to the number of pages
3888bb0daffSRob Clark * that need to be mapped in to fill 4kb wide CPU page. If the slot
3898bb0daffSRob Clark * height is 64, then 64 pages fill a 4kb wide by 64 row region.
3908bb0daffSRob Clark */
391f4302747SLaurent Pinchart const int n = priv->usergart[fmt].height;
392f4302747SLaurent Pinchart const int n_shift = priv->usergart[fmt].height_shift;
3938bb0daffSRob Clark
3948bb0daffSRob Clark /*
3958bb0daffSRob Clark * If buffer width in bytes > PAGE_SIZE then the virtual stride is
3968bb0daffSRob Clark * rounded up to next multiple of PAGE_SIZE.. this need to be taken
3978bb0daffSRob Clark * into account in some of the math, so figure out virtual stride
3988bb0daffSRob Clark * in pages
3998bb0daffSRob Clark */
400cc8dd766STomi Valkeinen const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
4018bb0daffSRob Clark
4028bb0daffSRob Clark /* We don't use vmf->pgoff since that has the fake offset: */
4031a29d85eSJan Kara pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
4048bb0daffSRob Clark
4058bb0daffSRob Clark /*
4068bb0daffSRob Clark * Actual address we start mapping at is rounded down to previous slot
4078bb0daffSRob Clark * boundary in the y direction:
4088bb0daffSRob Clark */
4098bb0daffSRob Clark base_pgoff = round_down(pgoff, m << n_shift);
4108bb0daffSRob Clark
4118bb0daffSRob Clark /* figure out buffer width in slots */
412f4302747SLaurent Pinchart slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
4138bb0daffSRob Clark
4141a29d85eSJan Kara vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT);
4158bb0daffSRob Clark
416f4302747SLaurent Pinchart entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
4178bb0daffSRob Clark
4188bb0daffSRob Clark /* evict previous buffer using this usergart entry, if any: */
4198bb0daffSRob Clark if (entry->obj)
420620063e1SLaurent Pinchart omap_gem_evict_entry(entry->obj, fmt, entry);
4218bb0daffSRob Clark
4228bb0daffSRob Clark entry->obj = obj;
4238bb0daffSRob Clark entry->obj_pgoff = base_pgoff;
4248bb0daffSRob Clark
4258bb0daffSRob Clark /* now convert base_pgoff to phys offset from virt offset: */
4268bb0daffSRob Clark base_pgoff = (base_pgoff >> n_shift) * slots;
4278bb0daffSRob Clark
4288bb0daffSRob Clark /* for wider-than 4k.. figure out which part of the slot-row we want: */
4298bb0daffSRob Clark if (m > 1) {
4308bb0daffSRob Clark int off = pgoff % m;
4318bb0daffSRob Clark entry->obj_pgoff += off;
4328bb0daffSRob Clark base_pgoff /= m;
4338bb0daffSRob Clark slots = min(slots - (off << n_shift), n);
4348bb0daffSRob Clark base_pgoff += off << n_shift;
4358bb0daffSRob Clark vaddr += off << PAGE_SHIFT;
4368bb0daffSRob Clark }
4378bb0daffSRob Clark
4388bb0daffSRob Clark /*
4398bb0daffSRob Clark * Map in pages. Beyond the valid pixel part of the buffer, we set
4408bb0daffSRob Clark * pages[i] to NULL to get a dummy page mapped in.. if someone
4418bb0daffSRob Clark * reads/writes it they will get random/undefined content, but at
4428bb0daffSRob Clark * least it won't be corrupting whatever other random page used to
4438bb0daffSRob Clark * be mapped in, or other undefined behavior.
4448bb0daffSRob Clark */
4458bb0daffSRob Clark memcpy(pages, &omap_obj->pages[base_pgoff],
4468bb0daffSRob Clark sizeof(struct page *) * slots);
4478bb0daffSRob Clark memset(pages + slots, 0,
4488bb0daffSRob Clark sizeof(struct page *) * (n - slots));
4498bb0daffSRob Clark
4506ada1328SSouptick Joarder err = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
4516ada1328SSouptick Joarder if (err) {
4526ada1328SSouptick Joarder ret = vmf_error(err);
4536ada1328SSouptick Joarder dev_err(obj->dev->dev, "failed to pin: %d\n", err);
4548bb0daffSRob Clark return ret;
4558bb0daffSRob Clark }
4568bb0daffSRob Clark
45716869083SLaurent Pinchart pfn = entry->dma_addr >> PAGE_SHIFT;
4588bb0daffSRob Clark
4591a29d85eSJan Kara VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
4608bb0daffSRob Clark pfn, pfn << PAGE_SHIFT);
4618bb0daffSRob Clark
4628bb0daffSRob Clark for (i = n; i > 0; i--) {
4636ada1328SSouptick Joarder ret = vmf_insert_mixed(vma,
4646ada1328SSouptick Joarder vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
4656ada1328SSouptick Joarder if (ret & VM_FAULT_ERROR)
4666ada1328SSouptick Joarder break;
467f4302747SLaurent Pinchart pfn += priv->usergart[fmt].stride_pfn;
4688bb0daffSRob Clark vaddr += PAGE_SIZE * m;
4698bb0daffSRob Clark }
4708bb0daffSRob Clark
4718bb0daffSRob Clark /* simple round-robin: */
472f4302747SLaurent Pinchart priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
473f4302747SLaurent Pinchart % NUM_USERGART_ENTRIES;
4748bb0daffSRob Clark
4756ada1328SSouptick Joarder return ret;
4768bb0daffSRob Clark }
4778bb0daffSRob Clark
4788bb0daffSRob Clark /**
4798bb0daffSRob Clark * omap_gem_fault - pagefault handler for GEM objects
4808bb0daffSRob Clark * @vmf: fault detail
4818bb0daffSRob Clark *
4828bb0daffSRob Clark * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
4838bb0daffSRob Clark * does most of the work for us including the actual map/unmap calls
4848bb0daffSRob Clark * but we need to do the actual page work.
4858bb0daffSRob Clark *
4868bb0daffSRob Clark * The VMA was set up by GEM. In doing so it also ensured that the
4878bb0daffSRob Clark * vma->vm_private_data points to the GEM object that is backing this
4888bb0daffSRob Clark * mapping.
4898bb0daffSRob Clark */
omap_gem_fault(struct vm_fault * vmf)490c5ca5e02SThomas Zimmermann static vm_fault_t omap_gem_fault(struct vm_fault *vmf)
4918bb0daffSRob Clark {
49211bac800SDave Jiang struct vm_area_struct *vma = vmf->vma;
4938bb0daffSRob Clark struct drm_gem_object *obj = vma->vm_private_data;
4948bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj);
4956ada1328SSouptick Joarder int err;
4966ada1328SSouptick Joarder vm_fault_t ret;
4978bb0daffSRob Clark
4988bb0daffSRob Clark /* Make sure we don't parallel update on a fault, nor move or remove
4998bb0daffSRob Clark * something from beneath our feet
5008bb0daffSRob Clark */
5013cbd0c58SLaurent Pinchart mutex_lock(&omap_obj->lock);
5028bb0daffSRob Clark
5038bb0daffSRob Clark /* if a shmem backed object, make sure we have pages attached now */
5042491244dSLaurent Pinchart err = omap_gem_attach_pages(obj);
5056ada1328SSouptick Joarder if (err) {
5066ada1328SSouptick Joarder ret = vmf_error(err);
5078bb0daffSRob Clark goto fail;
5086ada1328SSouptick Joarder }
5098bb0daffSRob Clark
5108bb0daffSRob Clark /* where should we do corresponding put_pages().. we are mapping
5118bb0daffSRob Clark * the original page, rather than thru a GART, so we can't rely
5128bb0daffSRob Clark * on eviction to trigger this. But munmap() or all mappings should
5138bb0daffSRob Clark * probably trigger put_pages()?
5148bb0daffSRob Clark */
5158bb0daffSRob Clark
51648b34ac0STomi Valkeinen if (omap_obj->flags & OMAP_BO_TILED_MASK)
517620063e1SLaurent Pinchart ret = omap_gem_fault_2d(obj, vma, vmf);
5188bb0daffSRob Clark else
519620063e1SLaurent Pinchart ret = omap_gem_fault_1d(obj, vma, vmf);
5208bb0daffSRob Clark
5218bb0daffSRob Clark
5228bb0daffSRob Clark fail:
5233cbd0c58SLaurent Pinchart mutex_unlock(&omap_obj->lock);
5246ada1328SSouptick Joarder return ret;
5258bb0daffSRob Clark }
5268bb0daffSRob Clark
omap_gem_object_mmap(struct drm_gem_object * obj,struct vm_area_struct * vma)527*413b7574SThomas Zimmermann static int omap_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
5288bb0daffSRob Clark {
5298bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj);
5308bb0daffSRob Clark
531*413b7574SThomas Zimmermann vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP | VM_IO | VM_MIXEDMAP);
5328bb0daffSRob Clark
5338bb0daffSRob Clark if (omap_obj->flags & OMAP_BO_WC) {
5348bb0daffSRob Clark vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
5358bb0daffSRob Clark } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
5368bb0daffSRob Clark vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
5378bb0daffSRob Clark } else {
5388bb0daffSRob Clark /*
5398bb0daffSRob Clark * We do have some private objects, at least for scanout buffers
5408bb0daffSRob Clark * on hardware without DMM/TILER. But these are allocated write-
5418bb0daffSRob Clark * combine
5428bb0daffSRob Clark */
5438bb0daffSRob Clark if (WARN_ON(!obj->filp))
5448bb0daffSRob Clark return -EINVAL;
5458bb0daffSRob Clark
5468bb0daffSRob Clark /*
5478bb0daffSRob Clark * Shunt off cached objs to shmem file so they have their own
5488bb0daffSRob Clark * address_space (so unmap_mapping_range does what we want,
5498bb0daffSRob Clark * in particular in the case of mmap'd dmabufs)
5508bb0daffSRob Clark */
551*413b7574SThomas Zimmermann vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
552295992fbSChristian König vma_set_file(vma, obj->filp);
5538bb0daffSRob Clark
5548bb0daffSRob Clark vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5558bb0daffSRob Clark }
5568bb0daffSRob Clark
557*413b7574SThomas Zimmermann vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
558*413b7574SThomas Zimmermann
5598bb0daffSRob Clark return 0;
5608bb0daffSRob Clark }
5618bb0daffSRob Clark
5627ef93b0aSLaurent Pinchart /* -----------------------------------------------------------------------------
5637ef93b0aSLaurent Pinchart * Dumb Buffers
5647ef93b0aSLaurent Pinchart */
5658bb0daffSRob Clark
5668bb0daffSRob Clark /**
5678bb0daffSRob Clark * omap_gem_dumb_create - create a dumb buffer
568567cd704SLee Jones * @file: our client file
5698bb0daffSRob Clark * @dev: our device
5708bb0daffSRob Clark * @args: the requested arguments copied from userspace
5718bb0daffSRob Clark *
5728bb0daffSRob Clark * Allocate a buffer suitable for use for a frame buffer of the
5738bb0daffSRob Clark * form described by user space. Give userspace a handle by which
5748bb0daffSRob Clark * to reference it.
5758bb0daffSRob Clark */
omap_gem_dumb_create(struct drm_file * file,struct drm_device * dev,struct drm_mode_create_dumb * args)5768bb0daffSRob Clark int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
5778bb0daffSRob Clark struct drm_mode_create_dumb *args)
5788bb0daffSRob Clark {
5798bb0daffSRob Clark union omap_gem_size gsize;
5808bb0daffSRob Clark
581ce481edaSTomi Valkeinen args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
5826a5228fdSTomi Valkeinen
5838bb0daffSRob Clark args->size = PAGE_ALIGN(args->pitch * args->height);
5848bb0daffSRob Clark
5858bb0daffSRob Clark gsize = (union omap_gem_size){
5868bb0daffSRob Clark .bytes = args->size,
5878bb0daffSRob Clark };
5888bb0daffSRob Clark
5898bb0daffSRob Clark return omap_gem_new_handle(dev, file, gsize,
5908bb0daffSRob Clark OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
5918bb0daffSRob Clark }
5928bb0daffSRob Clark
5938bb0daffSRob Clark /**
5945f994ce5STomi Valkeinen * omap_gem_dumb_map_offset - create an offset for a dumb buffer
5958bb0daffSRob Clark * @file: our drm client file
5968bb0daffSRob Clark * @dev: drm device
5978bb0daffSRob Clark * @handle: GEM handle to the object (from dumb_create)
598567cd704SLee Jones * @offset: memory map offset placeholder
5998bb0daffSRob Clark *
6008bb0daffSRob Clark * Do the necessary setup to allow the mapping of the frame buffer
6018bb0daffSRob Clark * into user memory. We don't have to do much here at the moment.
6028bb0daffSRob Clark */
omap_gem_dumb_map_offset(struct drm_file * file,struct drm_device * dev,u32 handle,u64 * offset)6038bb0daffSRob Clark int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
604dfe9cfccSLaurent Pinchart u32 handle, u64 *offset)
6058bb0daffSRob Clark {
6068bb0daffSRob Clark struct drm_gem_object *obj;
6078bb0daffSRob Clark int ret = 0;
6088bb0daffSRob Clark
6098bb0daffSRob Clark /* GEM does all our handle to object mapping */
610a8ad0bd8SChris Wilson obj = drm_gem_object_lookup(file, handle);
6118bb0daffSRob Clark if (obj == NULL) {
6128bb0daffSRob Clark ret = -ENOENT;
6138bb0daffSRob Clark goto fail;
6148bb0daffSRob Clark }
6158bb0daffSRob Clark
6168bb0daffSRob Clark *offset = omap_gem_mmap_offset(obj);
6178bb0daffSRob Clark
618d742cdd6SEmil Velikov drm_gem_object_put(obj);
6198bb0daffSRob Clark
6208bb0daffSRob Clark fail:
6218bb0daffSRob Clark return ret;
6228bb0daffSRob Clark }
6238bb0daffSRob Clark
624e1c1174fSLaurent Pinchart #ifdef CONFIG_DRM_FBDEV_EMULATION
6258bb0daffSRob Clark /* Set scrolling position. This allows us to implement fast scrolling
6268bb0daffSRob Clark * for console.
6278bb0daffSRob Clark *
6288bb0daffSRob Clark * Call only from non-atomic contexts.
6298bb0daffSRob Clark */
omap_gem_roll(struct drm_gem_object * obj,u32 roll)630dfe9cfccSLaurent Pinchart int omap_gem_roll(struct drm_gem_object *obj, u32 roll)
6318bb0daffSRob Clark {
6328bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj);
633dfe9cfccSLaurent Pinchart u32 npages = obj->size >> PAGE_SHIFT;
6348bb0daffSRob Clark int ret = 0;
6358bb0daffSRob Clark
6368bb0daffSRob Clark if (roll > npages) {
6378bb0daffSRob Clark dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
6388bb0daffSRob Clark return -EINVAL;
6398bb0daffSRob Clark }
6408bb0daffSRob Clark
6418bb0daffSRob Clark omap_obj->roll = roll;
6428bb0daffSRob Clark
6433cbd0c58SLaurent Pinchart mutex_lock(&omap_obj->lock);
6448bb0daffSRob Clark
6458bb0daffSRob Clark /* if we aren't mapped yet, we don't need to do anything */
6468bb0daffSRob Clark if (omap_obj->block) {
6472491244dSLaurent Pinchart ret = omap_gem_attach_pages(obj);
6488bb0daffSRob Clark if (ret)
6498bb0daffSRob Clark goto fail;
6502491244dSLaurent Pinchart
6512491244dSLaurent Pinchart ret = tiler_pin(omap_obj->block, omap_obj->pages, npages,
6522491244dSLaurent Pinchart roll, true);
6538bb0daffSRob Clark if (ret)
6548bb0daffSRob Clark dev_err(obj->dev->dev, "could not repin: %d\n", ret);
6558bb0daffSRob Clark }
6568bb0daffSRob Clark
6578bb0daffSRob Clark fail:
6583cbd0c58SLaurent Pinchart mutex_unlock(&omap_obj->lock);
6598bb0daffSRob Clark
6608bb0daffSRob Clark return ret;
6618bb0daffSRob Clark }
662e1c1174fSLaurent Pinchart #endif
6638bb0daffSRob Clark
6647ef93b0aSLaurent Pinchart /* -----------------------------------------------------------------------------
6657ef93b0aSLaurent Pinchart * Memory Management & DMA Sync
6667ef93b0aSLaurent Pinchart */
6677ef93b0aSLaurent Pinchart
66824fbaca0SLaurent Pinchart /*
66924fbaca0SLaurent Pinchart * shmem buffers that are mapped cached are not coherent.
67024fbaca0SLaurent Pinchart *
67124fbaca0SLaurent Pinchart * We keep track of dirty pages using page faulting to perform cache management.
67224fbaca0SLaurent Pinchart * When a page is mapped to the CPU in read/write mode the device can't access
67324fbaca0SLaurent Pinchart * it and omap_obj->dma_addrs[i] is NULL. When a page is mapped to the device
67424fbaca0SLaurent Pinchart * the omap_obj->dma_addrs[i] is set to the DMA address, and the page is
67524fbaca0SLaurent Pinchart * unmapped from the CPU.
6767ef93b0aSLaurent Pinchart */
omap_gem_is_cached_coherent(struct drm_gem_object * obj)677620063e1SLaurent Pinchart static inline bool omap_gem_is_cached_coherent(struct drm_gem_object *obj)
6787ef93b0aSLaurent Pinchart {
6797ef93b0aSLaurent Pinchart struct omap_gem_object *omap_obj = to_omap_bo(obj);
680cdb0381dSLaurent Pinchart
68124fbaca0SLaurent Pinchart return !((omap_obj->flags & OMAP_BO_MEM_SHMEM) &&
68224fbaca0SLaurent Pinchart ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED));
6837ef93b0aSLaurent Pinchart }
6848bb0daffSRob Clark
6858bb0daffSRob Clark /* Sync the buffer for CPU access.. note pages should already be
6868bb0daffSRob Clark * attached, ie. omap_gem_get_pages()
6878bb0daffSRob Clark */
omap_gem_cpu_sync_page(struct drm_gem_object * obj,int pgoff)688d61ce7daSLaurent Pinchart void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff)
6898bb0daffSRob Clark {
6908bb0daffSRob Clark struct drm_device *dev = obj->dev;
6918bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj);
6928bb0daffSRob Clark
693620063e1SLaurent Pinchart if (omap_gem_is_cached_coherent(obj))
69424fbaca0SLaurent Pinchart return;
69524fbaca0SLaurent Pinchart
69624fbaca0SLaurent Pinchart if (omap_obj->dma_addrs[pgoff]) {
69757c22f7cSLaurent Pinchart dma_unmap_page(dev->dev, omap_obj->dma_addrs[pgoff],
69897817fd4SLaurent Pinchart PAGE_SIZE, DMA_TO_DEVICE);
69957c22f7cSLaurent Pinchart omap_obj->dma_addrs[pgoff] = 0;
7008bb0daffSRob Clark }
7018bb0daffSRob Clark }
7028bb0daffSRob Clark
7038bb0daffSRob Clark /* sync the buffer for DMA access */
omap_gem_dma_sync_buffer(struct drm_gem_object * obj,enum dma_data_direction dir)704d61ce7daSLaurent Pinchart void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
7058bb0daffSRob Clark enum dma_data_direction dir)
7068bb0daffSRob Clark {
7078bb0daffSRob Clark struct drm_device *dev = obj->dev;
7088bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj);
7098bb0daffSRob Clark int i, npages = obj->size >> PAGE_SHIFT;
7108bb0daffSRob Clark struct page **pages = omap_obj->pages;
7118bb0daffSRob Clark bool dirty = false;
7128bb0daffSRob Clark
713620063e1SLaurent Pinchart if (omap_gem_is_cached_coherent(obj))
7144fa6ce48SLaurent Pinchart return;
7154fa6ce48SLaurent Pinchart
7168bb0daffSRob Clark for (i = 0; i < npages; i++) {
71757c22f7cSLaurent Pinchart if (!omap_obj->dma_addrs[i]) {
718a3d6345dSTomi Valkeinen dma_addr_t addr;
719a3d6345dSTomi Valkeinen
720a3d6345dSTomi Valkeinen addr = dma_map_page(dev->dev, pages[i], 0,
72197817fd4SLaurent Pinchart PAGE_SIZE, dir);
722a3d6345dSTomi Valkeinen if (dma_mapping_error(dev->dev, addr)) {
7234fa6ce48SLaurent Pinchart dev_warn(dev->dev, "%s: failed to map page\n",
724a3d6345dSTomi Valkeinen __func__);
725a3d6345dSTomi Valkeinen break;
726a3d6345dSTomi Valkeinen }
727a3d6345dSTomi Valkeinen
7288bb0daffSRob Clark dirty = true;
72957c22f7cSLaurent Pinchart omap_obj->dma_addrs[i] = addr;
7308bb0daffSRob Clark }
7318bb0daffSRob Clark }
7328bb0daffSRob Clark
7338bb0daffSRob Clark if (dirty) {
7348bb0daffSRob Clark unmap_mapping_range(obj->filp->f_mapping, 0,
7358bb0daffSRob Clark omap_gem_mmap_size(obj), 1);
7368bb0daffSRob Clark }
7378bb0daffSRob Clark }
7388bb0daffSRob Clark
omap_gem_pin_tiler(struct drm_gem_object * obj)73986ad0397SIvaylo Dimitrov static int omap_gem_pin_tiler(struct drm_gem_object *obj)
74086ad0397SIvaylo Dimitrov {
74186ad0397SIvaylo Dimitrov struct omap_gem_object *omap_obj = to_omap_bo(obj);
74286ad0397SIvaylo Dimitrov u32 npages = obj->size >> PAGE_SHIFT;
74386ad0397SIvaylo Dimitrov enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
74486ad0397SIvaylo Dimitrov struct tiler_block *block;
74586ad0397SIvaylo Dimitrov int ret;
74686ad0397SIvaylo Dimitrov
74786ad0397SIvaylo Dimitrov BUG_ON(omap_obj->block);
74886ad0397SIvaylo Dimitrov
74986ad0397SIvaylo Dimitrov if (omap_obj->flags & OMAP_BO_TILED_MASK) {
75086ad0397SIvaylo Dimitrov block = tiler_reserve_2d(fmt, omap_obj->width, omap_obj->height,
75186ad0397SIvaylo Dimitrov PAGE_SIZE);
75286ad0397SIvaylo Dimitrov } else {
75386ad0397SIvaylo Dimitrov block = tiler_reserve_1d(obj->size);
75486ad0397SIvaylo Dimitrov }
75586ad0397SIvaylo Dimitrov
75686ad0397SIvaylo Dimitrov if (IS_ERR(block)) {
75786ad0397SIvaylo Dimitrov ret = PTR_ERR(block);
75886ad0397SIvaylo Dimitrov dev_err(obj->dev->dev, "could not remap: %d (%d)\n", ret, fmt);
75986ad0397SIvaylo Dimitrov goto fail;
76086ad0397SIvaylo Dimitrov }
76186ad0397SIvaylo Dimitrov
76286ad0397SIvaylo Dimitrov /* TODO: enable async refill.. */
76386ad0397SIvaylo Dimitrov ret = tiler_pin(block, omap_obj->pages, npages, omap_obj->roll, true);
76486ad0397SIvaylo Dimitrov if (ret) {
76586ad0397SIvaylo Dimitrov tiler_release(block);
76686ad0397SIvaylo Dimitrov dev_err(obj->dev->dev, "could not pin: %d\n", ret);
76786ad0397SIvaylo Dimitrov goto fail;
76886ad0397SIvaylo Dimitrov }
76986ad0397SIvaylo Dimitrov
77086ad0397SIvaylo Dimitrov omap_obj->dma_addr = tiler_ssptr(block);
77186ad0397SIvaylo Dimitrov omap_obj->block = block;
77286ad0397SIvaylo Dimitrov
77386ad0397SIvaylo Dimitrov DBG("got dma address: %pad", &omap_obj->dma_addr);
77486ad0397SIvaylo Dimitrov
77586ad0397SIvaylo Dimitrov fail:
77686ad0397SIvaylo Dimitrov return ret;
77786ad0397SIvaylo Dimitrov }
77886ad0397SIvaylo Dimitrov
779bc20c85cSLaurent Pinchart /**
780bc20c85cSLaurent Pinchart * omap_gem_pin() - Pin a GEM object in memory
781bc20c85cSLaurent Pinchart * @obj: the GEM object
782bc20c85cSLaurent Pinchart * @dma_addr: the DMA address
783bc20c85cSLaurent Pinchart *
784bc20c85cSLaurent Pinchart * Pin the given GEM object in memory and fill the dma_addr pointer with the
785bc20c85cSLaurent Pinchart * object's DMA address. If the buffer is not physically contiguous it will be
786bc20c85cSLaurent Pinchart * remapped through the TILER to provide a contiguous view.
787bc20c85cSLaurent Pinchart *
788bc20c85cSLaurent Pinchart * Pins are reference-counted, calling this function multiple times is allowed
789bc20c85cSLaurent Pinchart * as long the corresponding omap_gem_unpin() calls are balanced.
790bc20c85cSLaurent Pinchart *
791bc20c85cSLaurent Pinchart * Return 0 on success or a negative error code otherwise.
7928bb0daffSRob Clark */
omap_gem_pin(struct drm_gem_object * obj,dma_addr_t * dma_addr)793bc20c85cSLaurent Pinchart int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
7948bb0daffSRob Clark {
7958bb0daffSRob Clark struct omap_drm_private *priv = obj->dev->dev_private;
7968bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj);
7978bb0daffSRob Clark int ret = 0;
7988bb0daffSRob Clark
7993cbd0c58SLaurent Pinchart mutex_lock(&omap_obj->lock);
8008bb0daffSRob Clark
8011948d28dSIvaylo Dimitrov if (!omap_gem_is_contiguous(omap_obj)) {
8021948d28dSIvaylo Dimitrov if (refcount_read(&omap_obj->pin_cnt) == 0) {
8038bb0daffSRob Clark
8041948d28dSIvaylo Dimitrov refcount_set(&omap_obj->pin_cnt, 1);
805cec4fa75SJean-Jacques Hiblot
8062491244dSLaurent Pinchart ret = omap_gem_attach_pages(obj);
8078bb0daffSRob Clark if (ret)
8088bb0daffSRob Clark goto fail;
8098bb0daffSRob Clark
8103ccd59f9SIvaylo Dimitrov if (omap_obj->flags & OMAP_BO_SCANOUT) {
8111948d28dSIvaylo Dimitrov if (priv->has_dmm) {
81286ad0397SIvaylo Dimitrov ret = omap_gem_pin_tiler(obj);
81386ad0397SIvaylo Dimitrov if (ret)
8148bb0daffSRob Clark goto fail;
8151948d28dSIvaylo Dimitrov }
8163ccd59f9SIvaylo Dimitrov }
817cec4fa75SJean-Jacques Hiblot } else {
8181948d28dSIvaylo Dimitrov refcount_inc(&omap_obj->pin_cnt);
8191948d28dSIvaylo Dimitrov }
8208bb0daffSRob Clark }
8218bb0daffSRob Clark
822d6e52e28STomi Valkeinen if (dma_addr)
82316869083SLaurent Pinchart *dma_addr = omap_obj->dma_addr;
8248bb0daffSRob Clark
8258bb0daffSRob Clark fail:
8263cbd0c58SLaurent Pinchart mutex_unlock(&omap_obj->lock);
8278bb0daffSRob Clark
8288bb0daffSRob Clark return ret;
8298bb0daffSRob Clark }
8308bb0daffSRob Clark
831bc20c85cSLaurent Pinchart /**
832d3e4c46dSTomi Valkeinen * omap_gem_unpin_locked() - Unpin a GEM object from memory
833bc20c85cSLaurent Pinchart * @obj: the GEM object
834bc20c85cSLaurent Pinchart *
835d3e4c46dSTomi Valkeinen * omap_gem_unpin() without locking.
8368bb0daffSRob Clark */
omap_gem_unpin_locked(struct drm_gem_object * obj)837d3e4c46dSTomi Valkeinen static void omap_gem_unpin_locked(struct drm_gem_object *obj)
8388bb0daffSRob Clark {
839d9c148cfSTomi Valkeinen struct omap_drm_private *priv = obj->dev->dev_private;
8408bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj);
841393a949fSTomi Valkeinen int ret;
8428bb0daffSRob Clark
8431948d28dSIvaylo Dimitrov if (omap_gem_is_contiguous(omap_obj))
844d9c148cfSTomi Valkeinen return;
845d9c148cfSTomi Valkeinen
8461948d28dSIvaylo Dimitrov if (refcount_dec_and_test(&omap_obj->pin_cnt)) {
847fe4d0b63SIvaylo Dimitrov if (omap_obj->sgt) {
848fe4d0b63SIvaylo Dimitrov sg_free_table(omap_obj->sgt);
849fe4d0b63SIvaylo Dimitrov kfree(omap_obj->sgt);
850fe4d0b63SIvaylo Dimitrov omap_obj->sgt = NULL;
851fe4d0b63SIvaylo Dimitrov }
8523ccd59f9SIvaylo Dimitrov if (!(omap_obj->flags & OMAP_BO_SCANOUT))
8533ccd59f9SIvaylo Dimitrov return;
8541948d28dSIvaylo Dimitrov if (priv->has_dmm) {
8558bb0daffSRob Clark ret = tiler_unpin(omap_obj->block);
8568bb0daffSRob Clark if (ret) {
8578bb0daffSRob Clark dev_err(obj->dev->dev,
8588bb0daffSRob Clark "could not unpin pages: %d\n", ret);
8598bb0daffSRob Clark }
8608bb0daffSRob Clark ret = tiler_release(omap_obj->block);
8618bb0daffSRob Clark if (ret) {
8628bb0daffSRob Clark dev_err(obj->dev->dev,
8638bb0daffSRob Clark "could not release unmap: %d\n", ret);
8648bb0daffSRob Clark }
86516869083SLaurent Pinchart omap_obj->dma_addr = 0;
8668bb0daffSRob Clark omap_obj->block = NULL;
8678bb0daffSRob Clark }
868d3e4c46dSTomi Valkeinen }
8691948d28dSIvaylo Dimitrov }
870393a949fSTomi Valkeinen
871d3e4c46dSTomi Valkeinen /**
872d3e4c46dSTomi Valkeinen * omap_gem_unpin() - Unpin a GEM object from memory
873d3e4c46dSTomi Valkeinen * @obj: the GEM object
874d3e4c46dSTomi Valkeinen *
875d3e4c46dSTomi Valkeinen * Unpin the given GEM object previously pinned with omap_gem_pin(). Pins are
876d3e4c46dSTomi Valkeinen * reference-counted, the actual unpin will only be performed when the number
877d3e4c46dSTomi Valkeinen * of calls to this function matches the number of calls to omap_gem_pin().
878d3e4c46dSTomi Valkeinen */
omap_gem_unpin(struct drm_gem_object * obj)879d3e4c46dSTomi Valkeinen void omap_gem_unpin(struct drm_gem_object *obj)
880d3e4c46dSTomi Valkeinen {
881d3e4c46dSTomi Valkeinen struct omap_gem_object *omap_obj = to_omap_bo(obj);
882d3e4c46dSTomi Valkeinen
883d3e4c46dSTomi Valkeinen mutex_lock(&omap_obj->lock);
884d3e4c46dSTomi Valkeinen omap_gem_unpin_locked(obj);
8853cbd0c58SLaurent Pinchart mutex_unlock(&omap_obj->lock);
8868bb0daffSRob Clark }
8878bb0daffSRob Clark
8888bb0daffSRob Clark /* Get rotated scanout address (only valid if already pinned), at the
8898bb0daffSRob Clark * specified orientation and x,y offset from top-left corner of buffer
8908bb0daffSRob Clark * (only valid for tiled 2d buffers)
8918bb0daffSRob Clark */
omap_gem_rotated_dma_addr(struct drm_gem_object * obj,u32 orient,int x,int y,dma_addr_t * dma_addr)892dfe9cfccSLaurent Pinchart int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
89316869083SLaurent Pinchart int x, int y, dma_addr_t *dma_addr)
8948bb0daffSRob Clark {
8958bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj);
8968bb0daffSRob Clark int ret = -EINVAL;
8978bb0daffSRob Clark
8983cbd0c58SLaurent Pinchart mutex_lock(&omap_obj->lock);
8993cbd0c58SLaurent Pinchart
9001948d28dSIvaylo Dimitrov if ((refcount_read(&omap_obj->pin_cnt) > 0) && omap_obj->block &&
90148b34ac0STomi Valkeinen (omap_obj->flags & OMAP_BO_TILED_MASK)) {
90216869083SLaurent Pinchart *dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
9038bb0daffSRob Clark ret = 0;
9048bb0daffSRob Clark }
9053cbd0c58SLaurent Pinchart
9063cbd0c58SLaurent Pinchart mutex_unlock(&omap_obj->lock);
9073cbd0c58SLaurent Pinchart
9088bb0daffSRob Clark return ret;
9098bb0daffSRob Clark }
9108bb0daffSRob Clark
9118bb0daffSRob Clark /* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
omap_gem_tiled_stride(struct drm_gem_object * obj,u32 orient)912dfe9cfccSLaurent Pinchart int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient)
9138bb0daffSRob Clark {
9148bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj);
9158bb0daffSRob Clark int ret = -EINVAL;
91648b34ac0STomi Valkeinen if (omap_obj->flags & OMAP_BO_TILED_MASK)
9178bb0daffSRob Clark ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
9188bb0daffSRob Clark return ret;
9198bb0daffSRob Clark }
9208bb0daffSRob Clark
9218bb0daffSRob Clark /* if !remap, and we don't have pages backing, then fail, rather than
9228bb0daffSRob Clark * increasing the pin count (which we don't really do yet anyways,
9238bb0daffSRob Clark * because we don't support swapping pages back out). And 'remap'
9248bb0daffSRob Clark * might not be quite the right name, but I wanted to keep it working
925bc20c85cSLaurent Pinchart * similarly to omap_gem_pin(). Note though that mutex is not
9268bb0daffSRob Clark * aquired if !remap (because this can be called in atomic ctxt),
927bc20c85cSLaurent Pinchart * but probably omap_gem_unpin() should be changed to work in the
9288bb0daffSRob Clark * same way. If !remap, a matching omap_gem_put_pages() call is not
9298bb0daffSRob Clark * required (and should not be made).
9308bb0daffSRob Clark */
omap_gem_get_pages(struct drm_gem_object * obj,struct page *** pages,bool remap)9318bb0daffSRob Clark int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
9328bb0daffSRob Clark bool remap)
9338bb0daffSRob Clark {
9348bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj);
9353cbd0c58SLaurent Pinchart int ret = 0;
9362491244dSLaurent Pinchart
9373cbd0c58SLaurent Pinchart mutex_lock(&omap_obj->lock);
9383cbd0c58SLaurent Pinchart
9393cbd0c58SLaurent Pinchart if (remap) {
9402491244dSLaurent Pinchart ret = omap_gem_attach_pages(obj);
9413cbd0c58SLaurent Pinchart if (ret)
9423cbd0c58SLaurent Pinchart goto unlock;
9433cbd0c58SLaurent Pinchart }
9443cbd0c58SLaurent Pinchart
9453cbd0c58SLaurent Pinchart if (!omap_obj->pages) {
9463cbd0c58SLaurent Pinchart ret = -ENOMEM;
9473cbd0c58SLaurent Pinchart goto unlock;
9483cbd0c58SLaurent Pinchart }
9493cbd0c58SLaurent Pinchart
9502491244dSLaurent Pinchart *pages = omap_obj->pages;
9513cbd0c58SLaurent Pinchart
9523cbd0c58SLaurent Pinchart unlock:
9533cbd0c58SLaurent Pinchart mutex_unlock(&omap_obj->lock);
9543cbd0c58SLaurent Pinchart
9558bb0daffSRob Clark return ret;
9568bb0daffSRob Clark }
9578bb0daffSRob Clark
9588bb0daffSRob Clark /* release pages when DMA no longer being performed */
omap_gem_put_pages(struct drm_gem_object * obj)9598bb0daffSRob Clark int omap_gem_put_pages(struct drm_gem_object *obj)
9608bb0daffSRob Clark {
9618bb0daffSRob Clark /* do something here if we dynamically attach/detach pages.. at
9628bb0daffSRob Clark * least they would no longer need to be pinned if everyone has
9638bb0daffSRob Clark * released the pages..
9648bb0daffSRob Clark */
9658bb0daffSRob Clark return 0;
9668bb0daffSRob Clark }
9678bb0daffSRob Clark
omap_gem_get_sg(struct drm_gem_object * obj,enum dma_data_direction dir)9681948d28dSIvaylo Dimitrov struct sg_table *omap_gem_get_sg(struct drm_gem_object *obj,
9691948d28dSIvaylo Dimitrov enum dma_data_direction dir)
970fe4d0b63SIvaylo Dimitrov {
971fe4d0b63SIvaylo Dimitrov struct omap_gem_object *omap_obj = to_omap_bo(obj);
972fe4d0b63SIvaylo Dimitrov dma_addr_t addr;
973fe4d0b63SIvaylo Dimitrov struct sg_table *sgt;
974fe4d0b63SIvaylo Dimitrov struct scatterlist *sg;
975fe4d0b63SIvaylo Dimitrov unsigned int count, len, stride, i;
976fe4d0b63SIvaylo Dimitrov int ret;
977fe4d0b63SIvaylo Dimitrov
978fe4d0b63SIvaylo Dimitrov ret = omap_gem_pin(obj, &addr);
979fe4d0b63SIvaylo Dimitrov if (ret)
980fe4d0b63SIvaylo Dimitrov return ERR_PTR(ret);
981fe4d0b63SIvaylo Dimitrov
982fe4d0b63SIvaylo Dimitrov mutex_lock(&omap_obj->lock);
983fe4d0b63SIvaylo Dimitrov
984fe4d0b63SIvaylo Dimitrov sgt = omap_obj->sgt;
985fe4d0b63SIvaylo Dimitrov if (sgt)
986fe4d0b63SIvaylo Dimitrov goto out;
987fe4d0b63SIvaylo Dimitrov
988fe4d0b63SIvaylo Dimitrov sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
989fe4d0b63SIvaylo Dimitrov if (!sgt) {
990fe4d0b63SIvaylo Dimitrov ret = -ENOMEM;
991fe4d0b63SIvaylo Dimitrov goto err_unpin;
992fe4d0b63SIvaylo Dimitrov }
993fe4d0b63SIvaylo Dimitrov
9941948d28dSIvaylo Dimitrov if (addr) {
995fe4d0b63SIvaylo Dimitrov if (omap_obj->flags & OMAP_BO_TILED_MASK) {
996fe4d0b63SIvaylo Dimitrov enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
997fe4d0b63SIvaylo Dimitrov
998fe4d0b63SIvaylo Dimitrov len = omap_obj->width << (int)fmt;
999fe4d0b63SIvaylo Dimitrov count = omap_obj->height;
1000fe4d0b63SIvaylo Dimitrov stride = tiler_stride(fmt, 0);
1001fe4d0b63SIvaylo Dimitrov } else {
1002fe4d0b63SIvaylo Dimitrov len = obj->size;
1003fe4d0b63SIvaylo Dimitrov count = 1;
1004fe4d0b63SIvaylo Dimitrov stride = 0;
1005fe4d0b63SIvaylo Dimitrov }
10061948d28dSIvaylo Dimitrov } else {
10071948d28dSIvaylo Dimitrov count = obj->size >> PAGE_SHIFT;
10081948d28dSIvaylo Dimitrov }
1009fe4d0b63SIvaylo Dimitrov
1010fe4d0b63SIvaylo Dimitrov ret = sg_alloc_table(sgt, count, GFP_KERNEL);
1011fe4d0b63SIvaylo Dimitrov if (ret)
1012fe4d0b63SIvaylo Dimitrov goto err_free;
1013fe4d0b63SIvaylo Dimitrov
10141948d28dSIvaylo Dimitrov /* this must be after omap_gem_pin() to ensure we have pages attached */
10151948d28dSIvaylo Dimitrov omap_gem_dma_sync_buffer(obj, dir);
10161948d28dSIvaylo Dimitrov
10171948d28dSIvaylo Dimitrov if (addr) {
1018fe4d0b63SIvaylo Dimitrov for_each_sg(sgt->sgl, sg, count, i) {
10191948d28dSIvaylo Dimitrov sg_set_page(sg, phys_to_page(addr), len,
10201948d28dSIvaylo Dimitrov offset_in_page(addr));
1021fe4d0b63SIvaylo Dimitrov sg_dma_address(sg) = addr;
1022fe4d0b63SIvaylo Dimitrov sg_dma_len(sg) = len;
1023fe4d0b63SIvaylo Dimitrov
1024fe4d0b63SIvaylo Dimitrov addr += stride;
1025fe4d0b63SIvaylo Dimitrov }
10261948d28dSIvaylo Dimitrov } else {
10271948d28dSIvaylo Dimitrov for_each_sg(sgt->sgl, sg, count, i) {
10281948d28dSIvaylo Dimitrov sg_set_page(sg, omap_obj->pages[i], PAGE_SIZE, 0);
10291948d28dSIvaylo Dimitrov sg_dma_address(sg) = omap_obj->dma_addrs[i];
10301948d28dSIvaylo Dimitrov sg_dma_len(sg) = PAGE_SIZE;
10311948d28dSIvaylo Dimitrov }
10321948d28dSIvaylo Dimitrov }
1033fe4d0b63SIvaylo Dimitrov
1034fe4d0b63SIvaylo Dimitrov omap_obj->sgt = sgt;
1035fe4d0b63SIvaylo Dimitrov out:
1036fe4d0b63SIvaylo Dimitrov mutex_unlock(&omap_obj->lock);
1037fe4d0b63SIvaylo Dimitrov return sgt;
1038fe4d0b63SIvaylo Dimitrov
1039fe4d0b63SIvaylo Dimitrov err_free:
1040fe4d0b63SIvaylo Dimitrov kfree(sgt);
1041fe4d0b63SIvaylo Dimitrov err_unpin:
1042fe4d0b63SIvaylo Dimitrov mutex_unlock(&omap_obj->lock);
1043fe4d0b63SIvaylo Dimitrov omap_gem_unpin(obj);
1044fe4d0b63SIvaylo Dimitrov return ERR_PTR(ret);
1045fe4d0b63SIvaylo Dimitrov }
1046fe4d0b63SIvaylo Dimitrov
omap_gem_put_sg(struct drm_gem_object * obj,struct sg_table * sgt)1047fe4d0b63SIvaylo Dimitrov void omap_gem_put_sg(struct drm_gem_object *obj, struct sg_table *sgt)
1048fe4d0b63SIvaylo Dimitrov {
1049fe4d0b63SIvaylo Dimitrov struct omap_gem_object *omap_obj = to_omap_bo(obj);
1050fe4d0b63SIvaylo Dimitrov
1051fe4d0b63SIvaylo Dimitrov if (WARN_ON(omap_obj->sgt != sgt))
1052fe4d0b63SIvaylo Dimitrov return;
1053fe4d0b63SIvaylo Dimitrov
1054fe4d0b63SIvaylo Dimitrov omap_gem_unpin(obj);
1055fe4d0b63SIvaylo Dimitrov }
1056fe4d0b63SIvaylo Dimitrov
1057e1c1174fSLaurent Pinchart #ifdef CONFIG_DRM_FBDEV_EMULATION
10583cbd0c58SLaurent Pinchart /*
10593cbd0c58SLaurent Pinchart * Get kernel virtual address for CPU access.. this more or less only
10603cbd0c58SLaurent Pinchart * exists for omap_fbdev.
10618bb0daffSRob Clark */
omap_gem_vaddr(struct drm_gem_object * obj)10628bb0daffSRob Clark void *omap_gem_vaddr(struct drm_gem_object *obj)
10638bb0daffSRob Clark {
10648bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj);
10653cbd0c58SLaurent Pinchart void *vaddr;
1066620063e1SLaurent Pinchart int ret;
1067620063e1SLaurent Pinchart
10683cbd0c58SLaurent Pinchart mutex_lock(&omap_obj->lock);
10693cbd0c58SLaurent Pinchart
10703cbd0c58SLaurent Pinchart if (!omap_obj->vaddr) {
10712491244dSLaurent Pinchart ret = omap_gem_attach_pages(obj);
10723cbd0c58SLaurent Pinchart if (ret) {
10733cbd0c58SLaurent Pinchart vaddr = ERR_PTR(ret);
10743cbd0c58SLaurent Pinchart goto unlock;
10753cbd0c58SLaurent Pinchart }
10763cbd0c58SLaurent Pinchart
10772491244dSLaurent Pinchart omap_obj->vaddr = vmap(omap_obj->pages, obj->size >> PAGE_SHIFT,
10788bb0daffSRob Clark VM_MAP, pgprot_writecombine(PAGE_KERNEL));
10798bb0daffSRob Clark }
10803cbd0c58SLaurent Pinchart
10813cbd0c58SLaurent Pinchart vaddr = omap_obj->vaddr;
10823cbd0c58SLaurent Pinchart
10833cbd0c58SLaurent Pinchart unlock:
10843cbd0c58SLaurent Pinchart mutex_unlock(&omap_obj->lock);
10853cbd0c58SLaurent Pinchart return vaddr;
10868bb0daffSRob Clark }
1087e1c1174fSLaurent Pinchart #endif
10888bb0daffSRob Clark
10897ef93b0aSLaurent Pinchart /* -----------------------------------------------------------------------------
10907ef93b0aSLaurent Pinchart * Power Management
10917ef93b0aSLaurent Pinchart */
10928bb0daffSRob Clark
10938bb0daffSRob Clark #ifdef CONFIG_PM
10948bb0daffSRob Clark /* re-pin objects in DMM in resume path: */
omap_gem_resume(struct drm_device * dev)10957fb15c48SLaurent Pinchart int omap_gem_resume(struct drm_device *dev)
10968bb0daffSRob Clark {
10977fb15c48SLaurent Pinchart struct omap_drm_private *priv = dev->dev_private;
10988bb0daffSRob Clark struct omap_gem_object *omap_obj;
10998bb0daffSRob Clark int ret = 0;
11008bb0daffSRob Clark
11015117bd89SDaniel Vetter mutex_lock(&priv->list_lock);
11028bb0daffSRob Clark list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
11038bb0daffSRob Clark if (omap_obj->block) {
11048bb0daffSRob Clark struct drm_gem_object *obj = &omap_obj->base;
1105dfe9cfccSLaurent Pinchart u32 npages = obj->size >> PAGE_SHIFT;
1106dfe9cfccSLaurent Pinchart
11078bb0daffSRob Clark WARN_ON(!omap_obj->pages); /* this can't happen */
11088bb0daffSRob Clark ret = tiler_pin(omap_obj->block,
11098bb0daffSRob Clark omap_obj->pages, npages,
11108bb0daffSRob Clark omap_obj->roll, true);
11118bb0daffSRob Clark if (ret) {
11127fb15c48SLaurent Pinchart dev_err(dev->dev, "could not repin: %d\n", ret);
11135117bd89SDaniel Vetter goto done;
11148bb0daffSRob Clark }
11158bb0daffSRob Clark }
11168bb0daffSRob Clark }
11178bb0daffSRob Clark
11185117bd89SDaniel Vetter done:
11195117bd89SDaniel Vetter mutex_unlock(&priv->list_lock);
11205117bd89SDaniel Vetter return ret;
11218bb0daffSRob Clark }
11228bb0daffSRob Clark #endif
11238bb0daffSRob Clark
11247ef93b0aSLaurent Pinchart /* -----------------------------------------------------------------------------
11257ef93b0aSLaurent Pinchart * DebugFS
11267ef93b0aSLaurent Pinchart */
11277ef93b0aSLaurent Pinchart
11288bb0daffSRob Clark #ifdef CONFIG_DEBUG_FS
omap_gem_describe(struct drm_gem_object * obj,struct seq_file * m)11298bb0daffSRob Clark void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
11308bb0daffSRob Clark {
11318bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj);
1132dfe9cfccSLaurent Pinchart u64 off;
11338bb0daffSRob Clark
11340de23977SDavid Herrmann off = drm_vma_node_start(&obj->vma_node);
11358bb0daffSRob Clark
11363cbd0c58SLaurent Pinchart mutex_lock(&omap_obj->lock);
11373cbd0c58SLaurent Pinchart
11382d31ca3aSRussell King seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
11392c935bc5SPeter Zijlstra omap_obj->flags, obj->name, kref_read(&obj->refcount),
1140cec4fa75SJean-Jacques Hiblot off, &omap_obj->dma_addr,
11411948d28dSIvaylo Dimitrov refcount_read(&omap_obj->pin_cnt),
11428bb0daffSRob Clark omap_obj->vaddr, omap_obj->roll);
11438bb0daffSRob Clark
114448b34ac0STomi Valkeinen if (omap_obj->flags & OMAP_BO_TILED_MASK) {
11458bb0daffSRob Clark seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
11468bb0daffSRob Clark if (omap_obj->block) {
11478bb0daffSRob Clark struct tcm_area *area = &omap_obj->block->area;
11488bb0daffSRob Clark seq_printf(m, " (%dx%d, %dx%d)",
11498bb0daffSRob Clark area->p0.x, area->p0.y,
11508bb0daffSRob Clark area->p1.x, area->p1.y);
11518bb0daffSRob Clark }
11528bb0daffSRob Clark } else {
11532150c19bSTomi Valkeinen seq_printf(m, " %zu", obj->size);
11548bb0daffSRob Clark }
11558bb0daffSRob Clark
11563cbd0c58SLaurent Pinchart mutex_unlock(&omap_obj->lock);
11573cbd0c58SLaurent Pinchart
11588bb0daffSRob Clark seq_printf(m, "\n");
11598bb0daffSRob Clark }
11608bb0daffSRob Clark
omap_gem_describe_objects(struct list_head * list,struct seq_file * m)11618bb0daffSRob Clark void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
11628bb0daffSRob Clark {
11638bb0daffSRob Clark struct omap_gem_object *omap_obj;
11648bb0daffSRob Clark int count = 0;
11658bb0daffSRob Clark size_t size = 0;
11668bb0daffSRob Clark
11678bb0daffSRob Clark list_for_each_entry(omap_obj, list, mm_list) {
11688bb0daffSRob Clark struct drm_gem_object *obj = &omap_obj->base;
11698bb0daffSRob Clark seq_printf(m, " ");
11708bb0daffSRob Clark omap_gem_describe(obj, m);
11718bb0daffSRob Clark count++;
11728bb0daffSRob Clark size += obj->size;
11738bb0daffSRob Clark }
11748bb0daffSRob Clark
11758bb0daffSRob Clark seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
11768bb0daffSRob Clark }
11778bb0daffSRob Clark #endif
11788bb0daffSRob Clark
11797ef93b0aSLaurent Pinchart /* -----------------------------------------------------------------------------
11807ef93b0aSLaurent Pinchart * Constructor & Destructor
11817ef93b0aSLaurent Pinchart */
11827ef93b0aSLaurent Pinchart
omap_gem_free_object(struct drm_gem_object * obj)1183c5ca5e02SThomas Zimmermann static void omap_gem_free_object(struct drm_gem_object *obj)
11848bb0daffSRob Clark {
11858bb0daffSRob Clark struct drm_device *dev = obj->dev;
118676c4055fSTomi Valkeinen struct omap_drm_private *priv = dev->dev_private;
11878bb0daffSRob Clark struct omap_gem_object *omap_obj = to_omap_bo(obj);
11888bb0daffSRob Clark
1189620063e1SLaurent Pinchart omap_gem_evict(obj);
11908bb0daffSRob Clark
11915117bd89SDaniel Vetter mutex_lock(&priv->list_lock);
11928bb0daffSRob Clark list_del(&omap_obj->mm_list);
11935117bd89SDaniel Vetter mutex_unlock(&priv->list_lock);
11948bb0daffSRob Clark
11953cbd0c58SLaurent Pinchart /*
11963cbd0c58SLaurent Pinchart * We own the sole reference to the object at this point, but to keep
11973cbd0c58SLaurent Pinchart * lockdep happy, we must still take the omap_obj_lock to call
11983cbd0c58SLaurent Pinchart * omap_gem_detach_pages(). This should hardly make any difference as
11993cbd0c58SLaurent Pinchart * there can't be any lock contention.
12008bb0daffSRob Clark */
12013cbd0c58SLaurent Pinchart mutex_lock(&omap_obj->lock);
12023cbd0c58SLaurent Pinchart
12033cbd0c58SLaurent Pinchart /* The object should not be pinned. */
12041948d28dSIvaylo Dimitrov WARN_ON(refcount_read(&omap_obj->pin_cnt) > 0);
12058bb0daffSRob Clark
1206b22e6690SLaurent Pinchart if (omap_obj->pages) {
1207b22e6690SLaurent Pinchart if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
1208b22e6690SLaurent Pinchart kfree(omap_obj->pages);
1209b22e6690SLaurent Pinchart else
12108bb0daffSRob Clark omap_gem_detach_pages(obj);
1211b22e6690SLaurent Pinchart }
12128bb0daffSRob Clark
1213cdb0381dSLaurent Pinchart if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
1214f6e45661SLuis R. Rodriguez dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
121516869083SLaurent Pinchart omap_obj->dma_addr);
12168bb0daffSRob Clark } else if (omap_obj->vaddr) {
12178bb0daffSRob Clark vunmap(omap_obj->vaddr);
1218b22e6690SLaurent Pinchart } else if (obj->import_attach) {
1219b22e6690SLaurent Pinchart drm_prime_gem_destroy(obj, omap_obj->sgt);
12208bb0daffSRob Clark }
12218bb0daffSRob Clark
12223cbd0c58SLaurent Pinchart mutex_unlock(&omap_obj->lock);
12233cbd0c58SLaurent Pinchart
12248bb0daffSRob Clark drm_gem_object_release(obj);
12258bb0daffSRob Clark
12263cbd0c58SLaurent Pinchart mutex_destroy(&omap_obj->lock);
12273cbd0c58SLaurent Pinchart
122800e9c7c7SLaurent Pinchart kfree(omap_obj);
12298bb0daffSRob Clark }
12308bb0daffSRob Clark
omap_gem_validate_flags(struct drm_device * dev,u32 flags)12314ecc5fbcSTomi Valkeinen static bool omap_gem_validate_flags(struct drm_device *dev, u32 flags)
12324ecc5fbcSTomi Valkeinen {
12334ecc5fbcSTomi Valkeinen struct omap_drm_private *priv = dev->dev_private;
12344ecc5fbcSTomi Valkeinen
12354ecc5fbcSTomi Valkeinen switch (flags & OMAP_BO_CACHE_MASK) {
12364ecc5fbcSTomi Valkeinen case OMAP_BO_CACHED:
12374ecc5fbcSTomi Valkeinen case OMAP_BO_WC:
12384ecc5fbcSTomi Valkeinen case OMAP_BO_CACHE_MASK:
12394ecc5fbcSTomi Valkeinen break;
12404ecc5fbcSTomi Valkeinen
12414ecc5fbcSTomi Valkeinen default:
12424ecc5fbcSTomi Valkeinen return false;
12434ecc5fbcSTomi Valkeinen }
12444ecc5fbcSTomi Valkeinen
12454ecc5fbcSTomi Valkeinen if (flags & OMAP_BO_TILED_MASK) {
12464ecc5fbcSTomi Valkeinen if (!priv->usergart)
12474ecc5fbcSTomi Valkeinen return false;
12484ecc5fbcSTomi Valkeinen
12494ecc5fbcSTomi Valkeinen switch (flags & OMAP_BO_TILED_MASK) {
12504ecc5fbcSTomi Valkeinen case OMAP_BO_TILED_8:
12514ecc5fbcSTomi Valkeinen case OMAP_BO_TILED_16:
12524ecc5fbcSTomi Valkeinen case OMAP_BO_TILED_32:
12534ecc5fbcSTomi Valkeinen break;
12544ecc5fbcSTomi Valkeinen
12554ecc5fbcSTomi Valkeinen default:
12564ecc5fbcSTomi Valkeinen return false;
12574ecc5fbcSTomi Valkeinen }
12584ecc5fbcSTomi Valkeinen }
12594ecc5fbcSTomi Valkeinen
12604ecc5fbcSTomi Valkeinen return true;
12614ecc5fbcSTomi Valkeinen }
12624ecc5fbcSTomi Valkeinen
1263c5ca5e02SThomas Zimmermann static const struct vm_operations_struct omap_gem_vm_ops = {
1264c5ca5e02SThomas Zimmermann .fault = omap_gem_fault,
1265c5ca5e02SThomas Zimmermann .open = drm_gem_vm_open,
1266c5ca5e02SThomas Zimmermann .close = drm_gem_vm_close,
1267c5ca5e02SThomas Zimmermann };
1268c5ca5e02SThomas Zimmermann
1269c5ca5e02SThomas Zimmermann static const struct drm_gem_object_funcs omap_gem_object_funcs = {
1270c5ca5e02SThomas Zimmermann .free = omap_gem_free_object,
1271c5ca5e02SThomas Zimmermann .export = omap_gem_prime_export,
1272*413b7574SThomas Zimmermann .mmap = omap_gem_object_mmap,
1273c5ca5e02SThomas Zimmermann .vm_ops = &omap_gem_vm_ops,
1274c5ca5e02SThomas Zimmermann };
1275c5ca5e02SThomas Zimmermann
1276a96bf3cbSSean Paul /* GEM buffer object constructor */
omap_gem_new(struct drm_device * dev,union omap_gem_size gsize,u32 flags)12778bb0daffSRob Clark struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1278dfe9cfccSLaurent Pinchart union omap_gem_size gsize, u32 flags)
12798bb0daffSRob Clark {
12808bb0daffSRob Clark struct omap_drm_private *priv = dev->dev_private;
12818bb0daffSRob Clark struct omap_gem_object *omap_obj;
128292b4b445SLaurent Pinchart struct drm_gem_object *obj;
1283ab5a60c3SDavid Herrmann struct address_space *mapping;
12848bb0daffSRob Clark size_t size;
12858bb0daffSRob Clark int ret;
12868bb0daffSRob Clark
12874ecc5fbcSTomi Valkeinen if (!omap_gem_validate_flags(dev, flags))
12884ecc5fbcSTomi Valkeinen return NULL;
12894ecc5fbcSTomi Valkeinen
12909cba3b99SLaurent Pinchart /* Validate the flags and compute the memory and cache flags. */
129148b34ac0STomi Valkeinen if (flags & OMAP_BO_TILED_MASK) {
12929cba3b99SLaurent Pinchart /*
12939cba3b99SLaurent Pinchart * Tiled buffers are always shmem paged backed. When they are
12949cba3b99SLaurent Pinchart * scanned out, they are remapped into DMM/TILER.
12958bb0daffSRob Clark */
12969cba3b99SLaurent Pinchart flags |= OMAP_BO_MEM_SHMEM;
12978bb0daffSRob Clark
12989cba3b99SLaurent Pinchart /*
12999cba3b99SLaurent Pinchart * Currently don't allow cached buffers. There is some caching
13009cba3b99SLaurent Pinchart * stuff that needs to be handled better.
13018bb0daffSRob Clark */
13027cb0d6c1STomi Valkeinen flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
13037cb0d6c1STomi Valkeinen flags |= tiler_get_cpu_cache_flags();
1304a96bf3cbSSean Paul } else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
13059cba3b99SLaurent Pinchart /*
130618d7f5abSTomi Valkeinen * If we don't have DMM, we must allocate scanout buffers
130718d7f5abSTomi Valkeinen * from contiguous DMA memory.
13089cba3b99SLaurent Pinchart */
13099cba3b99SLaurent Pinchart flags |= OMAP_BO_MEM_DMA_API;
13103f50effdSTomi Valkeinen } else if (!(flags & OMAP_BO_MEM_DMABUF)) {
13119cba3b99SLaurent Pinchart /*
13123f50effdSTomi Valkeinen * All other buffers not backed by dma_buf are shmem-backed.
13139cba3b99SLaurent Pinchart */
13149cba3b99SLaurent Pinchart flags |= OMAP_BO_MEM_SHMEM;
13158bb0daffSRob Clark }
13168bb0daffSRob Clark
13179cba3b99SLaurent Pinchart /* Allocate the initialize the OMAP GEM object. */
13188bb0daffSRob Clark omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
1319fffddfd6SLinus Torvalds if (!omap_obj)
1320a903e3b6STomi Valkeinen return NULL;
13218bb0daffSRob Clark
13228bb0daffSRob Clark obj = &omap_obj->base;
13239cba3b99SLaurent Pinchart omap_obj->flags = flags;
13243cbd0c58SLaurent Pinchart mutex_init(&omap_obj->lock);
13258bb0daffSRob Clark
132648b34ac0STomi Valkeinen if (flags & OMAP_BO_TILED_MASK) {
13279cba3b99SLaurent Pinchart /*
13289cba3b99SLaurent Pinchart * For tiled buffers align dimensions to slot boundaries and
13299cba3b99SLaurent Pinchart * calculate size based on aligned dimensions.
13308bb0daffSRob Clark */
13319cba3b99SLaurent Pinchart tiler_align(gem2fmt(flags), &gsize.tiled.width,
13329cba3b99SLaurent Pinchart &gsize.tiled.height);
13338bb0daffSRob Clark
13349cba3b99SLaurent Pinchart size = tiler_size(gem2fmt(flags), gsize.tiled.width,
13359cba3b99SLaurent Pinchart gsize.tiled.height);
13368bb0daffSRob Clark
13379cba3b99SLaurent Pinchart omap_obj->width = gsize.tiled.width;
13389cba3b99SLaurent Pinchart omap_obj->height = gsize.tiled.height;
13399cba3b99SLaurent Pinchart } else {
13409cba3b99SLaurent Pinchart size = PAGE_ALIGN(gsize.bytes);
13418bb0daffSRob Clark }
13428bb0daffSRob Clark
1343c5ca5e02SThomas Zimmermann obj->funcs = &omap_gem_object_funcs;
1344c5ca5e02SThomas Zimmermann
1345c2eb77ffSLaurent Pinchart /* Initialize the GEM object. */
1346c2eb77ffSLaurent Pinchart if (!(flags & OMAP_BO_MEM_SHMEM)) {
1347c2eb77ffSLaurent Pinchart drm_gem_private_object_init(dev, obj, size);
1348c2eb77ffSLaurent Pinchart } else {
1349c2eb77ffSLaurent Pinchart ret = drm_gem_object_init(dev, obj, size);
1350c2eb77ffSLaurent Pinchart if (ret)
1351c2eb77ffSLaurent Pinchart goto err_free;
1352c2eb77ffSLaurent Pinchart
135393c76a3dSAl Viro mapping = obj->filp->f_mapping;
1354c2eb77ffSLaurent Pinchart mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
1355c2eb77ffSLaurent Pinchart }
1356a903e3b6STomi Valkeinen
13579cba3b99SLaurent Pinchart /* Allocate memory if needed. */
13589cba3b99SLaurent Pinchart if (flags & OMAP_BO_MEM_DMA_API) {
1359266c73b7SLinus Torvalds omap_obj->vaddr = dma_alloc_wc(dev->dev, size,
136016869083SLaurent Pinchart &omap_obj->dma_addr,
13619cba3b99SLaurent Pinchart GFP_KERNEL);
13629cba3b99SLaurent Pinchart if (!omap_obj->vaddr)
1363c2eb77ffSLaurent Pinchart goto err_release;
13648bb0daffSRob Clark }
13658bb0daffSRob Clark
13665117bd89SDaniel Vetter mutex_lock(&priv->list_lock);
13678bb0daffSRob Clark list_add(&omap_obj->mm_list, &priv->obj_list);
13685117bd89SDaniel Vetter mutex_unlock(&priv->list_lock);
13698bb0daffSRob Clark
13708bb0daffSRob Clark return obj;
13718bb0daffSRob Clark
1372c2eb77ffSLaurent Pinchart err_release:
1373c2eb77ffSLaurent Pinchart drm_gem_object_release(obj);
1374c2eb77ffSLaurent Pinchart err_free:
1375c2eb77ffSLaurent Pinchart kfree(omap_obj);
13768bb0daffSRob Clark return NULL;
13778bb0daffSRob Clark }
13788bb0daffSRob Clark
omap_gem_new_dmabuf(struct drm_device * dev,size_t size,struct sg_table * sgt)1379b22e6690SLaurent Pinchart struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
1380b22e6690SLaurent Pinchart struct sg_table *sgt)
1381b22e6690SLaurent Pinchart {
1382b22e6690SLaurent Pinchart struct omap_drm_private *priv = dev->dev_private;
1383b22e6690SLaurent Pinchart struct omap_gem_object *omap_obj;
1384b22e6690SLaurent Pinchart struct drm_gem_object *obj;
1385b22e6690SLaurent Pinchart union omap_gem_size gsize;
1386b22e6690SLaurent Pinchart
1387b22e6690SLaurent Pinchart /* Without a DMM only physically contiguous buffers can be supported. */
1388b22e6690SLaurent Pinchart if (sgt->orig_nents != 1 && !priv->has_dmm)
1389b22e6690SLaurent Pinchart return ERR_PTR(-EINVAL);
1390b22e6690SLaurent Pinchart
1391b22e6690SLaurent Pinchart gsize.bytes = PAGE_ALIGN(size);
1392b22e6690SLaurent Pinchart obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
13933cbd0c58SLaurent Pinchart if (!obj)
13943cbd0c58SLaurent Pinchart return ERR_PTR(-ENOMEM);
1395b22e6690SLaurent Pinchart
1396b22e6690SLaurent Pinchart omap_obj = to_omap_bo(obj);
13973cbd0c58SLaurent Pinchart
13983cbd0c58SLaurent Pinchart mutex_lock(&omap_obj->lock);
13993cbd0c58SLaurent Pinchart
1400b22e6690SLaurent Pinchart omap_obj->sgt = sgt;
1401b22e6690SLaurent Pinchart
1402b22e6690SLaurent Pinchart if (sgt->orig_nents == 1) {
140316869083SLaurent Pinchart omap_obj->dma_addr = sg_dma_address(sgt->sgl);
1404b22e6690SLaurent Pinchart } else {
1405b22e6690SLaurent Pinchart /* Create pages list from sgt */
1406b22e6690SLaurent Pinchart struct page **pages;
1407b22e6690SLaurent Pinchart unsigned int npages;
140853760655SMarek Szyprowski unsigned int ret;
1409b22e6690SLaurent Pinchart
1410b22e6690SLaurent Pinchart npages = DIV_ROUND_UP(size, PAGE_SIZE);
1411b22e6690SLaurent Pinchart pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
1412b22e6690SLaurent Pinchart if (!pages) {
1413b22e6690SLaurent Pinchart omap_gem_free_object(obj);
1414b22e6690SLaurent Pinchart obj = ERR_PTR(-ENOMEM);
1415b22e6690SLaurent Pinchart goto done;
1416b22e6690SLaurent Pinchart }
1417b22e6690SLaurent Pinchart
1418b22e6690SLaurent Pinchart omap_obj->pages = pages;
1419c67e6279SChristian König ret = drm_prime_sg_to_page_array(sgt, pages, npages);
142053760655SMarek Szyprowski if (ret) {
1421b22e6690SLaurent Pinchart omap_gem_free_object(obj);
1422b22e6690SLaurent Pinchart obj = ERR_PTR(-ENOMEM);
1423b22e6690SLaurent Pinchart goto done;
1424b22e6690SLaurent Pinchart }
1425b22e6690SLaurent Pinchart }
1426b22e6690SLaurent Pinchart
1427b22e6690SLaurent Pinchart done:
14283cbd0c58SLaurent Pinchart mutex_unlock(&omap_obj->lock);
1429b22e6690SLaurent Pinchart return obj;
1430b22e6690SLaurent Pinchart }
1431b22e6690SLaurent Pinchart
14327ef93b0aSLaurent Pinchart /* convenience method to construct a GEM buffer object, and userspace handle */
omap_gem_new_handle(struct drm_device * dev,struct drm_file * file,union omap_gem_size gsize,u32 flags,u32 * handle)14337ef93b0aSLaurent Pinchart int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1434dfe9cfccSLaurent Pinchart union omap_gem_size gsize, u32 flags, u32 *handle)
14357ef93b0aSLaurent Pinchart {
14367ef93b0aSLaurent Pinchart struct drm_gem_object *obj;
14377ef93b0aSLaurent Pinchart int ret;
14387ef93b0aSLaurent Pinchart
14397ef93b0aSLaurent Pinchart obj = omap_gem_new(dev, gsize, flags);
14407ef93b0aSLaurent Pinchart if (!obj)
14417ef93b0aSLaurent Pinchart return -ENOMEM;
14427ef93b0aSLaurent Pinchart
14437ef93b0aSLaurent Pinchart ret = drm_gem_handle_create(file, obj, handle);
14447ef93b0aSLaurent Pinchart if (ret) {
144574128a23SLaurent Pinchart omap_gem_free_object(obj);
14467ef93b0aSLaurent Pinchart return ret;
14477ef93b0aSLaurent Pinchart }
14487ef93b0aSLaurent Pinchart
14497ef93b0aSLaurent Pinchart /* drop reference from allocate - handle holds it now */
1450d742cdd6SEmil Velikov drm_gem_object_put(obj);
14517ef93b0aSLaurent Pinchart
14527ef93b0aSLaurent Pinchart return 0;
14537ef93b0aSLaurent Pinchart }
14547ef93b0aSLaurent Pinchart
14557ef93b0aSLaurent Pinchart /* -----------------------------------------------------------------------------
14567ef93b0aSLaurent Pinchart * Init & Cleanup
14577ef93b0aSLaurent Pinchart */
14587ef93b0aSLaurent Pinchart
14597ef93b0aSLaurent Pinchart /* If DMM is used, we need to set some stuff up.. */
omap_gem_init(struct drm_device * dev)14608bb0daffSRob Clark void omap_gem_init(struct drm_device *dev)
14618bb0daffSRob Clark {
14628bb0daffSRob Clark struct omap_drm_private *priv = dev->dev_private;
1463f4302747SLaurent Pinchart struct omap_drm_usergart *usergart;
14648bb0daffSRob Clark const enum tiler_fmt fmts[] = {
14658bb0daffSRob Clark TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
14668bb0daffSRob Clark };
14678bb0daffSRob Clark int i, j;
14688bb0daffSRob Clark
14698bb0daffSRob Clark if (!dmm_is_available()) {
14708bb0daffSRob Clark /* DMM only supported on OMAP4 and later, so this isn't fatal */
14718bb0daffSRob Clark dev_warn(dev->dev, "DMM not available, disable DMM support\n");
14728bb0daffSRob Clark return;
14738bb0daffSRob Clark }
14748bb0daffSRob Clark
1475fffddfd6SLinus Torvalds usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
1476fffddfd6SLinus Torvalds if (!usergart)
14778bb0daffSRob Clark return;
14788bb0daffSRob Clark
14798bb0daffSRob Clark /* reserve 4k aligned/wide regions for userspace mappings: */
14808bb0daffSRob Clark for (i = 0; i < ARRAY_SIZE(fmts); i++) {
1481dfe9cfccSLaurent Pinchart u16 h = 1, w = PAGE_SIZE >> i;
1482dfe9cfccSLaurent Pinchart
14838bb0daffSRob Clark tiler_align(fmts[i], &w, &h);
14848bb0daffSRob Clark /* note: since each region is 1 4kb page wide, and minimum
14858bb0daffSRob Clark * number of rows, the height ends up being the same as the
14868bb0daffSRob Clark * # of pages in the region
14878bb0daffSRob Clark */
14888bb0daffSRob Clark usergart[i].height = h;
14898bb0daffSRob Clark usergart[i].height_shift = ilog2(h);
14908bb0daffSRob Clark usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
14918bb0daffSRob Clark usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
14928bb0daffSRob Clark for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
1493f4302747SLaurent Pinchart struct omap_drm_usergart_entry *entry;
1494f4302747SLaurent Pinchart struct tiler_block *block;
1495f4302747SLaurent Pinchart
1496f4302747SLaurent Pinchart entry = &usergart[i].entry[j];
1497f4302747SLaurent Pinchart block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
14988bb0daffSRob Clark if (IS_ERR(block)) {
14998bb0daffSRob Clark dev_err(dev->dev,
15008bb0daffSRob Clark "reserve failed: %d, %d, %ld\n",
15018bb0daffSRob Clark i, j, PTR_ERR(block));
15028bb0daffSRob Clark return;
15038bb0daffSRob Clark }
150416869083SLaurent Pinchart entry->dma_addr = tiler_ssptr(block);
15058bb0daffSRob Clark entry->block = block;
15068bb0daffSRob Clark
150716869083SLaurent Pinchart DBG("%d:%d: %dx%d: dma_addr=%pad stride=%d", i, j, w, h,
150816869083SLaurent Pinchart &entry->dma_addr,
15098bb0daffSRob Clark usergart[i].stride_pfn << PAGE_SHIFT);
15108bb0daffSRob Clark }
15118bb0daffSRob Clark }
15128bb0daffSRob Clark
1513f4302747SLaurent Pinchart priv->usergart = usergart;
15148bb0daffSRob Clark priv->has_dmm = true;
15158bb0daffSRob Clark }
15168bb0daffSRob Clark
omap_gem_deinit(struct drm_device * dev)15178bb0daffSRob Clark void omap_gem_deinit(struct drm_device *dev)
15188bb0daffSRob Clark {
1519f4302747SLaurent Pinchart struct omap_drm_private *priv = dev->dev_private;
1520f4302747SLaurent Pinchart
15218bb0daffSRob Clark /* I believe we can rely on there being no more outstanding GEM
15228bb0daffSRob Clark * objects which could depend on usergart/dmm at this point.
15238bb0daffSRob Clark */
1524f4302747SLaurent Pinchart kfree(priv->usergart);
15258bb0daffSRob Clark }
1526