126f56895STomasz Figa // SPDX-License-Identifier: GPL-2.0
226f56895STomasz Figa /*
326f56895STomasz Figa  * Copyright (C) 2018 Intel Corporation.
426f56895STomasz Figa  * Copyright 2018 Google LLC.
526f56895STomasz Figa  *
626f56895STomasz Figa  * Author: Tuukka Toivonen <tuukka.toivonen@intel.com>
726f56895STomasz Figa  * Author: Sakari Ailus <sakari.ailus@linux.intel.com>
826f56895STomasz Figa  * Author: Samu Onkalo <samu.onkalo@intel.com>
926f56895STomasz Figa  * Author: Tomasz Figa <tfiga@chromium.org>
1026f56895STomasz Figa  *
1126f56895STomasz Figa  */
1226f56895STomasz Figa 
1326f56895STomasz Figa #include <linux/dma-mapping.h>
1426f56895STomasz Figa #include <linux/iopoll.h>
1526f56895STomasz Figa #include <linux/pm_runtime.h>
1626f56895STomasz Figa #include <linux/slab.h>
1726f56895STomasz Figa #include <linux/vmalloc.h>
1826f56895STomasz Figa 
1926f56895STomasz Figa #include <asm/set_memory.h>
2026f56895STomasz Figa 
2126f56895STomasz Figa #include "ipu3-mmu.h"
2226f56895STomasz Figa 
2326f56895STomasz Figa #define IPU3_PT_BITS		10
2426f56895STomasz Figa #define IPU3_PT_PTES		(1UL << IPU3_PT_BITS)
2526f56895STomasz Figa #define IPU3_PT_SIZE		(IPU3_PT_PTES << 2)
2626f56895STomasz Figa #define IPU3_PT_ORDER		(IPU3_PT_SIZE >> PAGE_SHIFT)
2726f56895STomasz Figa 
2826f56895STomasz Figa #define IPU3_ADDR2PTE(addr)	((addr) >> IPU3_PAGE_SHIFT)
2926f56895STomasz Figa #define IPU3_PTE2ADDR(pte)	((phys_addr_t)(pte) << IPU3_PAGE_SHIFT)
3026f56895STomasz Figa 
3126f56895STomasz Figa #define IPU3_L2PT_SHIFT		IPU3_PT_BITS
3226f56895STomasz Figa #define IPU3_L2PT_MASK		((1UL << IPU3_L2PT_SHIFT) - 1)
3326f56895STomasz Figa 
3426f56895STomasz Figa #define IPU3_L1PT_SHIFT		IPU3_PT_BITS
3526f56895STomasz Figa #define IPU3_L1PT_MASK		((1UL << IPU3_L1PT_SHIFT) - 1)
3626f56895STomasz Figa 
3726f56895STomasz Figa #define IPU3_MMU_ADDRESS_BITS	(IPU3_PAGE_SHIFT + \
3826f56895STomasz Figa 				 IPU3_L2PT_SHIFT + \
3926f56895STomasz Figa 				 IPU3_L1PT_SHIFT)
4026f56895STomasz Figa 
4126f56895STomasz Figa #define IMGU_REG_BASE		0x4000
4226f56895STomasz Figa #define REG_TLB_INVALIDATE	(IMGU_REG_BASE + 0x300)
4326f56895STomasz Figa #define TLB_INVALIDATE		1
4426f56895STomasz Figa #define REG_L1_PHYS		(IMGU_REG_BASE + 0x304) /* 27-bit pfn */
4526f56895STomasz Figa #define REG_GP_HALT		(IMGU_REG_BASE + 0x5dc)
4626f56895STomasz Figa #define REG_GP_HALTED		(IMGU_REG_BASE + 0x5e0)
4726f56895STomasz Figa 
4827b795adSYong Zhi struct imgu_mmu {
4926f56895STomasz Figa 	struct device *dev;
5026f56895STomasz Figa 	void __iomem *base;
5126f56895STomasz Figa 	/* protect access to l2pts, l1pt */
5226f56895STomasz Figa 	spinlock_t lock;
5326f56895STomasz Figa 
5426f56895STomasz Figa 	void *dummy_page;
5526f56895STomasz Figa 	u32 dummy_page_pteval;
5626f56895STomasz Figa 
5726f56895STomasz Figa 	u32 *dummy_l2pt;
5826f56895STomasz Figa 	u32 dummy_l2pt_pteval;
5926f56895STomasz Figa 
6026f56895STomasz Figa 	u32 **l2pts;
6126f56895STomasz Figa 	u32 *l1pt;
6226f56895STomasz Figa 
6327b795adSYong Zhi 	struct imgu_mmu_info geometry;
6426f56895STomasz Figa };
6526f56895STomasz Figa 
to_imgu_mmu(struct imgu_mmu_info * info)6627b795adSYong Zhi static inline struct imgu_mmu *to_imgu_mmu(struct imgu_mmu_info *info)
6726f56895STomasz Figa {
6827b795adSYong Zhi 	return container_of(info, struct imgu_mmu, geometry);
6926f56895STomasz Figa }
7026f56895STomasz Figa 
7126f56895STomasz Figa /**
7227b795adSYong Zhi  * imgu_mmu_tlb_invalidate - invalidate translation look-aside buffer
7326f56895STomasz Figa  * @mmu: MMU to perform the invalidate operation on
7426f56895STomasz Figa  *
7526f56895STomasz Figa  * This function invalidates the whole TLB. Must be called when the hardware
7626f56895STomasz Figa  * is powered on.
7726f56895STomasz Figa  */
imgu_mmu_tlb_invalidate(struct imgu_mmu * mmu)7827b795adSYong Zhi static void imgu_mmu_tlb_invalidate(struct imgu_mmu *mmu)
7926f56895STomasz Figa {
8026f56895STomasz Figa 	writel(TLB_INVALIDATE, mmu->base + REG_TLB_INVALIDATE);
8126f56895STomasz Figa }
8226f56895STomasz Figa 
call_if_imgu_is_powered(struct imgu_mmu * mmu,void (* func)(struct imgu_mmu * mmu))8327b795adSYong Zhi static void call_if_imgu_is_powered(struct imgu_mmu *mmu,
8427b795adSYong Zhi 				    void (*func)(struct imgu_mmu *mmu))
8526f56895STomasz Figa {
8626f56895STomasz Figa 	if (!pm_runtime_get_if_in_use(mmu->dev))
8726f56895STomasz Figa 		return;
8826f56895STomasz Figa 
8926f56895STomasz Figa 	func(mmu);
9026f56895STomasz Figa 	pm_runtime_put(mmu->dev);
9126f56895STomasz Figa }
9226f56895STomasz Figa 
9326f56895STomasz Figa /**
9427b795adSYong Zhi  * imgu_mmu_set_halt - set CIO gate halt bit
9526f56895STomasz Figa  * @mmu: MMU to set the CIO gate bit in.
9626f56895STomasz Figa  * @halt: Desired state of the gate bit.
9726f56895STomasz Figa  *
9826f56895STomasz Figa  * This function sets the CIO gate bit that controls whether external memory
9926f56895STomasz Figa  * accesses are allowed. Must be called when the hardware is powered on.
10026f56895STomasz Figa  */
imgu_mmu_set_halt(struct imgu_mmu * mmu,bool halt)10127b795adSYong Zhi static void imgu_mmu_set_halt(struct imgu_mmu *mmu, bool halt)
10226f56895STomasz Figa {
10326f56895STomasz Figa 	int ret;
10426f56895STomasz Figa 	u32 val;
10526f56895STomasz Figa 
10626f56895STomasz Figa 	writel(halt, mmu->base + REG_GP_HALT);
10726f56895STomasz Figa 	ret = readl_poll_timeout(mmu->base + REG_GP_HALTED,
10826f56895STomasz Figa 				 val, (val & 1) == halt, 1000, 100000);
10926f56895STomasz Figa 
11026f56895STomasz Figa 	if (ret)
11126f56895STomasz Figa 		dev_err(mmu->dev, "failed to %s CIO gate halt\n",
11226f56895STomasz Figa 			halt ? "set" : "clear");
11326f56895STomasz Figa }
11426f56895STomasz Figa 
11526f56895STomasz Figa /**
11627b795adSYong Zhi  * imgu_mmu_alloc_page_table - allocate a pre-filled page table
11726f56895STomasz Figa  * @pteval: Value to initialize for page table entries with.
11826f56895STomasz Figa  *
11926f56895STomasz Figa  * Return: Pointer to allocated page table or NULL on failure.
12026f56895STomasz Figa  */
imgu_mmu_alloc_page_table(u32 pteval)12127b795adSYong Zhi static u32 *imgu_mmu_alloc_page_table(u32 pteval)
12226f56895STomasz Figa {
12326f56895STomasz Figa 	u32 *pt;
12426f56895STomasz Figa 	int pte;
12526f56895STomasz Figa 
12626f56895STomasz Figa 	pt = (u32 *)__get_free_page(GFP_KERNEL);
12726f56895STomasz Figa 	if (!pt)
12826f56895STomasz Figa 		return NULL;
12926f56895STomasz Figa 
13026f56895STomasz Figa 	for (pte = 0; pte < IPU3_PT_PTES; pte++)
13126f56895STomasz Figa 		pt[pte] = pteval;
13226f56895STomasz Figa 
13331e0a455SJean-Baptiste Jouband 	set_memory_uc((unsigned long)pt, IPU3_PT_ORDER);
13426f56895STomasz Figa 
13526f56895STomasz Figa 	return pt;
13626f56895STomasz Figa }
13726f56895STomasz Figa 
13826f56895STomasz Figa /**
13927b795adSYong Zhi  * imgu_mmu_free_page_table - free page table
14026f56895STomasz Figa  * @pt: Page table to free.
14126f56895STomasz Figa  */
imgu_mmu_free_page_table(u32 * pt)14227b795adSYong Zhi static void imgu_mmu_free_page_table(u32 *pt)
14326f56895STomasz Figa {
14431e0a455SJean-Baptiste Jouband 	set_memory_wb((unsigned long)pt, IPU3_PT_ORDER);
14526f56895STomasz Figa 	free_page((unsigned long)pt);
14626f56895STomasz Figa }
14726f56895STomasz Figa 
14826f56895STomasz Figa /**
14926f56895STomasz Figa  * address_to_pte_idx - split IOVA into L1 and L2 page table indices
15026f56895STomasz Figa  * @iova: IOVA to split.
15126f56895STomasz Figa  * @l1pt_idx: Output for the L1 page table index.
15226f56895STomasz Figa  * @l2pt_idx: Output for the L2 page index.
15326f56895STomasz Figa  */
address_to_pte_idx(unsigned long iova,u32 * l1pt_idx,u32 * l2pt_idx)15426f56895STomasz Figa static inline void address_to_pte_idx(unsigned long iova, u32 *l1pt_idx,
15526f56895STomasz Figa 				      u32 *l2pt_idx)
15626f56895STomasz Figa {
15726f56895STomasz Figa 	iova >>= IPU3_PAGE_SHIFT;
15826f56895STomasz Figa 
15926f56895STomasz Figa 	if (l2pt_idx)
16026f56895STomasz Figa 		*l2pt_idx = iova & IPU3_L2PT_MASK;
16126f56895STomasz Figa 
16226f56895STomasz Figa 	iova >>= IPU3_L2PT_SHIFT;
16326f56895STomasz Figa 
16426f56895STomasz Figa 	if (l1pt_idx)
16526f56895STomasz Figa 		*l1pt_idx = iova & IPU3_L1PT_MASK;
16626f56895STomasz Figa }
16726f56895STomasz Figa 
imgu_mmu_get_l2pt(struct imgu_mmu * mmu,u32 l1pt_idx)16827b795adSYong Zhi static u32 *imgu_mmu_get_l2pt(struct imgu_mmu *mmu, u32 l1pt_idx)
16926f56895STomasz Figa {
17026f56895STomasz Figa 	unsigned long flags;
17126f56895STomasz Figa 	u32 *l2pt, *new_l2pt;
17226f56895STomasz Figa 	u32 pteval;
17326f56895STomasz Figa 
17426f56895STomasz Figa 	spin_lock_irqsave(&mmu->lock, flags);
17526f56895STomasz Figa 
17626f56895STomasz Figa 	l2pt = mmu->l2pts[l1pt_idx];
177e1ebe9f9SBingbu Cao 	if (l2pt) {
178e1ebe9f9SBingbu Cao 		spin_unlock_irqrestore(&mmu->lock, flags);
179e1ebe9f9SBingbu Cao 		return l2pt;
180e1ebe9f9SBingbu Cao 	}
18126f56895STomasz Figa 
18226f56895STomasz Figa 	spin_unlock_irqrestore(&mmu->lock, flags);
18326f56895STomasz Figa 
18427b795adSYong Zhi 	new_l2pt = imgu_mmu_alloc_page_table(mmu->dummy_page_pteval);
18526f56895STomasz Figa 	if (!new_l2pt)
18626f56895STomasz Figa 		return NULL;
18726f56895STomasz Figa 
18826f56895STomasz Figa 	spin_lock_irqsave(&mmu->lock, flags);
18926f56895STomasz Figa 
19026f56895STomasz Figa 	dev_dbg(mmu->dev, "allocated page table %p for l1pt_idx %u\n",
19126f56895STomasz Figa 		new_l2pt, l1pt_idx);
19226f56895STomasz Figa 
19326f56895STomasz Figa 	l2pt = mmu->l2pts[l1pt_idx];
19426f56895STomasz Figa 	if (l2pt) {
195e1ebe9f9SBingbu Cao 		spin_unlock_irqrestore(&mmu->lock, flags);
19627b795adSYong Zhi 		imgu_mmu_free_page_table(new_l2pt);
197e1ebe9f9SBingbu Cao 		return l2pt;
19826f56895STomasz Figa 	}
19926f56895STomasz Figa 
20026f56895STomasz Figa 	l2pt = new_l2pt;
20126f56895STomasz Figa 	mmu->l2pts[l1pt_idx] = new_l2pt;
20226f56895STomasz Figa 
20326f56895STomasz Figa 	pteval = IPU3_ADDR2PTE(virt_to_phys(new_l2pt));
20426f56895STomasz Figa 	mmu->l1pt[l1pt_idx] = pteval;
20526f56895STomasz Figa 
20626f56895STomasz Figa 	spin_unlock_irqrestore(&mmu->lock, flags);
20726f56895STomasz Figa 	return l2pt;
20826f56895STomasz Figa }
20926f56895STomasz Figa 
__imgu_mmu_map(struct imgu_mmu * mmu,unsigned long iova,phys_addr_t paddr)21027b795adSYong Zhi static int __imgu_mmu_map(struct imgu_mmu *mmu, unsigned long iova,
21126f56895STomasz Figa 			  phys_addr_t paddr)
21226f56895STomasz Figa {
21326f56895STomasz Figa 	u32 l1pt_idx, l2pt_idx;
21426f56895STomasz Figa 	unsigned long flags;
21526f56895STomasz Figa 	u32 *l2pt;
21626f56895STomasz Figa 
21726f56895STomasz Figa 	if (!mmu)
21826f56895STomasz Figa 		return -ENODEV;
21926f56895STomasz Figa 
22026f56895STomasz Figa 	address_to_pte_idx(iova, &l1pt_idx, &l2pt_idx);
22126f56895STomasz Figa 
22227b795adSYong Zhi 	l2pt = imgu_mmu_get_l2pt(mmu, l1pt_idx);
22326f56895STomasz Figa 	if (!l2pt)
22426f56895STomasz Figa 		return -ENOMEM;
22526f56895STomasz Figa 
22626f56895STomasz Figa 	spin_lock_irqsave(&mmu->lock, flags);
22726f56895STomasz Figa 
22826f56895STomasz Figa 	if (l2pt[l2pt_idx] != mmu->dummy_page_pteval) {
22926f56895STomasz Figa 		spin_unlock_irqrestore(&mmu->lock, flags);
23026f56895STomasz Figa 		return -EBUSY;
23126f56895STomasz Figa 	}
23226f56895STomasz Figa 
23326f56895STomasz Figa 	l2pt[l2pt_idx] = IPU3_ADDR2PTE(paddr);
23426f56895STomasz Figa 
23526f56895STomasz Figa 	spin_unlock_irqrestore(&mmu->lock, flags);
23626f56895STomasz Figa 
23726f56895STomasz Figa 	return 0;
23826f56895STomasz Figa }
23926f56895STomasz Figa 
2403efcbe3eSSakari Ailus /**
2413efcbe3eSSakari Ailus  * imgu_mmu_map - map a buffer to a physical address
2423efcbe3eSSakari Ailus  *
2433efcbe3eSSakari Ailus  * @info: MMU mappable range
2443efcbe3eSSakari Ailus  * @iova: the virtual address
2453efcbe3eSSakari Ailus  * @paddr: the physical address
2463efcbe3eSSakari Ailus  * @size: length of the mappable area
2473efcbe3eSSakari Ailus  *
2483efcbe3eSSakari Ailus  * The function has been adapted from iommu_map() in
2493efcbe3eSSakari Ailus  * drivers/iommu/iommu.c .
2503efcbe3eSSakari Ailus  */
imgu_mmu_map(struct imgu_mmu_info * info,unsigned long iova,phys_addr_t paddr,size_t size)25127b795adSYong Zhi int imgu_mmu_map(struct imgu_mmu_info *info, unsigned long iova,
25226f56895STomasz Figa 		 phys_addr_t paddr, size_t size)
25326f56895STomasz Figa {
25427b795adSYong Zhi 	struct imgu_mmu *mmu = to_imgu_mmu(info);
25526f56895STomasz Figa 	int ret = 0;
25626f56895STomasz Figa 
25726f56895STomasz Figa 	/*
25826f56895STomasz Figa 	 * both the virtual address and the physical one, as well as
25926f56895STomasz Figa 	 * the size of the mapping, must be aligned (at least) to the
26026f56895STomasz Figa 	 * size of the smallest page supported by the hardware
26126f56895STomasz Figa 	 */
26217f61abbSSakari Ailus 	if (!IS_ALIGNED(iova | paddr | size, IPU3_PAGE_SIZE)) {
26317f61abbSSakari Ailus 		dev_err(mmu->dev, "unaligned: iova 0x%lx pa %pa size 0x%zx\n",
26417f61abbSSakari Ailus 			iova, &paddr, size);
26526f56895STomasz Figa 		return -EINVAL;
26626f56895STomasz Figa 	}
26726f56895STomasz Figa 
26826f56895STomasz Figa 	dev_dbg(mmu->dev, "map: iova 0x%lx pa %pa size 0x%zx\n",
26926f56895STomasz Figa 		iova, &paddr, size);
27026f56895STomasz Figa 
27126f56895STomasz Figa 	while (size) {
27217f61abbSSakari Ailus 		dev_dbg(mmu->dev, "mapping: iova 0x%lx pa %pa\n", iova, &paddr);
27326f56895STomasz Figa 
27427b795adSYong Zhi 		ret = __imgu_mmu_map(mmu, iova, paddr);
27526f56895STomasz Figa 		if (ret)
27626f56895STomasz Figa 			break;
27726f56895STomasz Figa 
27817f61abbSSakari Ailus 		iova += IPU3_PAGE_SIZE;
27917f61abbSSakari Ailus 		paddr += IPU3_PAGE_SIZE;
28017f61abbSSakari Ailus 		size -= IPU3_PAGE_SIZE;
28126f56895STomasz Figa 	}
28226f56895STomasz Figa 
28327b795adSYong Zhi 	call_if_imgu_is_powered(mmu, imgu_mmu_tlb_invalidate);
28426f56895STomasz Figa 
28526f56895STomasz Figa 	return ret;
28626f56895STomasz Figa }
28726f56895STomasz Figa 
2883efcbe3eSSakari Ailus /**
2893efcbe3eSSakari Ailus  * imgu_mmu_map_sg - Map a scatterlist
2903efcbe3eSSakari Ailus  *
2913efcbe3eSSakari Ailus  * @info: MMU mappable range
2923efcbe3eSSakari Ailus  * @iova: the virtual address
2933efcbe3eSSakari Ailus  * @sg: the scatterlist to map
2943efcbe3eSSakari Ailus  * @nents: number of entries in the scatterlist
2953efcbe3eSSakari Ailus  *
2963efcbe3eSSakari Ailus  * The function has been adapted from default_iommu_map_sg() in
2973efcbe3eSSakari Ailus  * drivers/iommu/iommu.c .
2983efcbe3eSSakari Ailus  */
imgu_mmu_map_sg(struct imgu_mmu_info * info,unsigned long iova,struct scatterlist * sg,unsigned int nents)29927b795adSYong Zhi size_t imgu_mmu_map_sg(struct imgu_mmu_info *info, unsigned long iova,
30026f56895STomasz Figa 		       struct scatterlist *sg, unsigned int nents)
30126f56895STomasz Figa {
30227b795adSYong Zhi 	struct imgu_mmu *mmu = to_imgu_mmu(info);
30326f56895STomasz Figa 	struct scatterlist *s;
30426f56895STomasz Figa 	size_t s_length, mapped = 0;
30517f61abbSSakari Ailus 	unsigned int i;
30626f56895STomasz Figa 	int ret;
30726f56895STomasz Figa 
30826f56895STomasz Figa 	for_each_sg(sg, s, nents, i) {
30926f56895STomasz Figa 		phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
31026f56895STomasz Figa 
31126f56895STomasz Figa 		s_length = s->length;
31226f56895STomasz Figa 
31317f61abbSSakari Ailus 		if (!IS_ALIGNED(s->offset, IPU3_PAGE_SIZE))
31426f56895STomasz Figa 			goto out_err;
31526f56895STomasz Figa 
31617f61abbSSakari Ailus 		/* must be IPU3_PAGE_SIZE aligned to be mapped singlely */
31717f61abbSSakari Ailus 		if (i == nents - 1 && !IS_ALIGNED(s->length, IPU3_PAGE_SIZE))
31826f56895STomasz Figa 			s_length = PAGE_ALIGN(s->length);
31926f56895STomasz Figa 
32027b795adSYong Zhi 		ret = imgu_mmu_map(info, iova + mapped, phys, s_length);
32126f56895STomasz Figa 		if (ret)
32226f56895STomasz Figa 			goto out_err;
32326f56895STomasz Figa 
32426f56895STomasz Figa 		mapped += s_length;
32526f56895STomasz Figa 	}
32626f56895STomasz Figa 
32727b795adSYong Zhi 	call_if_imgu_is_powered(mmu, imgu_mmu_tlb_invalidate);
32826f56895STomasz Figa 
32926f56895STomasz Figa 	return mapped;
33026f56895STomasz Figa 
33126f56895STomasz Figa out_err:
33226f56895STomasz Figa 	/* undo mappings already done */
33327b795adSYong Zhi 	imgu_mmu_unmap(info, iova, mapped);
33426f56895STomasz Figa 
33526f56895STomasz Figa 	return 0;
33626f56895STomasz Figa }
33726f56895STomasz Figa 
__imgu_mmu_unmap(struct imgu_mmu * mmu,unsigned long iova,size_t size)33827b795adSYong Zhi static size_t __imgu_mmu_unmap(struct imgu_mmu *mmu,
33926f56895STomasz Figa 			       unsigned long iova, size_t size)
34026f56895STomasz Figa {
34126f56895STomasz Figa 	u32 l1pt_idx, l2pt_idx;
34226f56895STomasz Figa 	unsigned long flags;
34326f56895STomasz Figa 	size_t unmap = size;
34426f56895STomasz Figa 	u32 *l2pt;
34526f56895STomasz Figa 
34626f56895STomasz Figa 	if (!mmu)
34726f56895STomasz Figa 		return 0;
34826f56895STomasz Figa 
34926f56895STomasz Figa 	address_to_pte_idx(iova, &l1pt_idx, &l2pt_idx);
35026f56895STomasz Figa 
35126f56895STomasz Figa 	spin_lock_irqsave(&mmu->lock, flags);
35226f56895STomasz Figa 
35326f56895STomasz Figa 	l2pt = mmu->l2pts[l1pt_idx];
35426f56895STomasz Figa 	if (!l2pt) {
35526f56895STomasz Figa 		spin_unlock_irqrestore(&mmu->lock, flags);
35626f56895STomasz Figa 		return 0;
35726f56895STomasz Figa 	}
35826f56895STomasz Figa 
35926f56895STomasz Figa 	if (l2pt[l2pt_idx] == mmu->dummy_page_pteval)
36026f56895STomasz Figa 		unmap = 0;
36126f56895STomasz Figa 
36226f56895STomasz Figa 	l2pt[l2pt_idx] = mmu->dummy_page_pteval;
36326f56895STomasz Figa 
36426f56895STomasz Figa 	spin_unlock_irqrestore(&mmu->lock, flags);
36526f56895STomasz Figa 
36626f56895STomasz Figa 	return unmap;
36726f56895STomasz Figa }
36826f56895STomasz Figa 
3693efcbe3eSSakari Ailus /**
3703efcbe3eSSakari Ailus  * imgu_mmu_unmap - Unmap a buffer
3713efcbe3eSSakari Ailus  *
3723efcbe3eSSakari Ailus  * @info: MMU mappable range
3733efcbe3eSSakari Ailus  * @iova: the virtual address
3743efcbe3eSSakari Ailus  * @size: the length of the buffer
3753efcbe3eSSakari Ailus  *
3763efcbe3eSSakari Ailus  * The function has been adapted from iommu_unmap() in
3773efcbe3eSSakari Ailus  * drivers/iommu/iommu.c .
3783efcbe3eSSakari Ailus  */
imgu_mmu_unmap(struct imgu_mmu_info * info,unsigned long iova,size_t size)37927b795adSYong Zhi size_t imgu_mmu_unmap(struct imgu_mmu_info *info, unsigned long iova,
38026f56895STomasz Figa 		      size_t size)
38126f56895STomasz Figa {
38227b795adSYong Zhi 	struct imgu_mmu *mmu = to_imgu_mmu(info);
38326f56895STomasz Figa 	size_t unmapped_page, unmapped = 0;
38426f56895STomasz Figa 
38526f56895STomasz Figa 	/*
38626f56895STomasz Figa 	 * The virtual address, as well as the size of the mapping, must be
38726f56895STomasz Figa 	 * aligned (at least) to the size of the smallest page supported
38826f56895STomasz Figa 	 * by the hardware
38926f56895STomasz Figa 	 */
39017f61abbSSakari Ailus 	if (!IS_ALIGNED(iova | size, IPU3_PAGE_SIZE)) {
39117f61abbSSakari Ailus 		dev_err(mmu->dev, "unaligned: iova 0x%lx size 0x%zx\n",
39217f61abbSSakari Ailus 			iova, size);
39326f56895STomasz Figa 		return -EINVAL;
39426f56895STomasz Figa 	}
39526f56895STomasz Figa 
39626f56895STomasz Figa 	dev_dbg(mmu->dev, "unmap this: iova 0x%lx size 0x%zx\n", iova, size);
39726f56895STomasz Figa 
39826f56895STomasz Figa 	/*
39926f56895STomasz Figa 	 * Keep iterating until we either unmap 'size' bytes (or more)
40026f56895STomasz Figa 	 * or we hit an area that isn't mapped.
40126f56895STomasz Figa 	 */
40226f56895STomasz Figa 	while (unmapped < size) {
40317f61abbSSakari Ailus 		unmapped_page = __imgu_mmu_unmap(mmu, iova, IPU3_PAGE_SIZE);
40426f56895STomasz Figa 		if (!unmapped_page)
40526f56895STomasz Figa 			break;
40626f56895STomasz Figa 
40726f56895STomasz Figa 		dev_dbg(mmu->dev, "unmapped: iova 0x%lx size 0x%zx\n",
40826f56895STomasz Figa 			iova, unmapped_page);
40926f56895STomasz Figa 
41026f56895STomasz Figa 		iova += unmapped_page;
41126f56895STomasz Figa 		unmapped += unmapped_page;
41226f56895STomasz Figa 	}
41326f56895STomasz Figa 
41427b795adSYong Zhi 	call_if_imgu_is_powered(mmu, imgu_mmu_tlb_invalidate);
41526f56895STomasz Figa 
41626f56895STomasz Figa 	return unmapped;
41726f56895STomasz Figa }
41826f56895STomasz Figa 
41926f56895STomasz Figa /**
42027b795adSYong Zhi  * imgu_mmu_init() - initialize IPU3 MMU block
4213efcbe3eSSakari Ailus  *
4229fabe1d1SMauro Carvalho Chehab  * @parent:	struct device parent
42326f56895STomasz Figa  * @base:	IOMEM base of hardware registers.
42426f56895STomasz Figa  *
42526f56895STomasz Figa  * Return: Pointer to IPU3 MMU private data pointer or ERR_PTR() on error.
42626f56895STomasz Figa  */
imgu_mmu_init(struct device * parent,void __iomem * base)42727b795adSYong Zhi struct imgu_mmu_info *imgu_mmu_init(struct device *parent, void __iomem *base)
42826f56895STomasz Figa {
42927b795adSYong Zhi 	struct imgu_mmu *mmu;
43026f56895STomasz Figa 	u32 pteval;
43126f56895STomasz Figa 
43226f56895STomasz Figa 	mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
43326f56895STomasz Figa 	if (!mmu)
43426f56895STomasz Figa 		return ERR_PTR(-ENOMEM);
43526f56895STomasz Figa 
43626f56895STomasz Figa 	mmu->dev = parent;
43726f56895STomasz Figa 	mmu->base = base;
43826f56895STomasz Figa 	spin_lock_init(&mmu->lock);
43926f56895STomasz Figa 
44026f56895STomasz Figa 	/* Disallow external memory access when having no valid page tables. */
44127b795adSYong Zhi 	imgu_mmu_set_halt(mmu, true);
44226f56895STomasz Figa 
44326f56895STomasz Figa 	/*
44426f56895STomasz Figa 	 * The MMU does not have a "valid" bit, so we have to use a dummy
44526f56895STomasz Figa 	 * page for invalid entries.
44626f56895STomasz Figa 	 */
44726f56895STomasz Figa 	mmu->dummy_page = (void *)__get_free_page(GFP_KERNEL);
44826f56895STomasz Figa 	if (!mmu->dummy_page)
44926f56895STomasz Figa 		goto fail_group;
45026f56895STomasz Figa 	pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->dummy_page));
45126f56895STomasz Figa 	mmu->dummy_page_pteval = pteval;
45226f56895STomasz Figa 
45326f56895STomasz Figa 	/*
45426f56895STomasz Figa 	 * Allocate a dummy L2 page table with all entries pointing to
45526f56895STomasz Figa 	 * the dummy page.
45626f56895STomasz Figa 	 */
45727b795adSYong Zhi 	mmu->dummy_l2pt = imgu_mmu_alloc_page_table(pteval);
45826f56895STomasz Figa 	if (!mmu->dummy_l2pt)
45926f56895STomasz Figa 		goto fail_dummy_page;
46026f56895STomasz Figa 	pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->dummy_l2pt));
46126f56895STomasz Figa 	mmu->dummy_l2pt_pteval = pteval;
46226f56895STomasz Figa 
46326f56895STomasz Figa 	/*
46426f56895STomasz Figa 	 * Allocate the array of L2PT CPU pointers, initialized to zero,
46526f56895STomasz Figa 	 * which means the dummy L2PT allocated above.
46626f56895STomasz Figa 	 */
46726f56895STomasz Figa 	mmu->l2pts = vzalloc(IPU3_PT_PTES * sizeof(*mmu->l2pts));
46826f56895STomasz Figa 	if (!mmu->l2pts)
46926f56895STomasz Figa 		goto fail_l2pt;
47026f56895STomasz Figa 
47126f56895STomasz Figa 	/* Allocate the L1 page table. */
47227b795adSYong Zhi 	mmu->l1pt = imgu_mmu_alloc_page_table(mmu->dummy_l2pt_pteval);
47326f56895STomasz Figa 	if (!mmu->l1pt)
47426f56895STomasz Figa 		goto fail_l2pts;
47526f56895STomasz Figa 
47626f56895STomasz Figa 	pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->l1pt));
47726f56895STomasz Figa 	writel(pteval, mmu->base + REG_L1_PHYS);
47827b795adSYong Zhi 	imgu_mmu_tlb_invalidate(mmu);
47927b795adSYong Zhi 	imgu_mmu_set_halt(mmu, false);
48026f56895STomasz Figa 
48126f56895STomasz Figa 	mmu->geometry.aperture_start = 0;
48226f56895STomasz Figa 	mmu->geometry.aperture_end = DMA_BIT_MASK(IPU3_MMU_ADDRESS_BITS);
48326f56895STomasz Figa 
48426f56895STomasz Figa 	return &mmu->geometry;
48526f56895STomasz Figa 
48626f56895STomasz Figa fail_l2pts:
48726f56895STomasz Figa 	vfree(mmu->l2pts);
48826f56895STomasz Figa fail_l2pt:
48927b795adSYong Zhi 	imgu_mmu_free_page_table(mmu->dummy_l2pt);
49026f56895STomasz Figa fail_dummy_page:
49126f56895STomasz Figa 	free_page((unsigned long)mmu->dummy_page);
49226f56895STomasz Figa fail_group:
49326f56895STomasz Figa 	kfree(mmu);
49426f56895STomasz Figa 
49526f56895STomasz Figa 	return ERR_PTR(-ENOMEM);
49626f56895STomasz Figa }
49726f56895STomasz Figa 
49826f56895STomasz Figa /**
49927b795adSYong Zhi  * imgu_mmu_exit() - clean up IPU3 MMU block
5003efcbe3eSSakari Ailus  *
5013efcbe3eSSakari Ailus  * @info: MMU mappable range
50226f56895STomasz Figa  */
imgu_mmu_exit(struct imgu_mmu_info * info)50327b795adSYong Zhi void imgu_mmu_exit(struct imgu_mmu_info *info)
50426f56895STomasz Figa {
50527b795adSYong Zhi 	struct imgu_mmu *mmu = to_imgu_mmu(info);
50626f56895STomasz Figa 
50726f56895STomasz Figa 	/* We are going to free our page tables, no more memory access. */
50827b795adSYong Zhi 	imgu_mmu_set_halt(mmu, true);
50927b795adSYong Zhi 	imgu_mmu_tlb_invalidate(mmu);
51026f56895STomasz Figa 
51127b795adSYong Zhi 	imgu_mmu_free_page_table(mmu->l1pt);
51226f56895STomasz Figa 	vfree(mmu->l2pts);
51327b795adSYong Zhi 	imgu_mmu_free_page_table(mmu->dummy_l2pt);
51426f56895STomasz Figa 	free_page((unsigned long)mmu->dummy_page);
51526f56895STomasz Figa 	kfree(mmu);
51626f56895STomasz Figa }
51726f56895STomasz Figa 
imgu_mmu_suspend(struct imgu_mmu_info * info)51827b795adSYong Zhi void imgu_mmu_suspend(struct imgu_mmu_info *info)
51926f56895STomasz Figa {
52027b795adSYong Zhi 	struct imgu_mmu *mmu = to_imgu_mmu(info);
52126f56895STomasz Figa 
52227b795adSYong Zhi 	imgu_mmu_set_halt(mmu, true);
52326f56895STomasz Figa }
52426f56895STomasz Figa 
imgu_mmu_resume(struct imgu_mmu_info * info)52527b795adSYong Zhi void imgu_mmu_resume(struct imgu_mmu_info *info)
52626f56895STomasz Figa {
52727b795adSYong Zhi 	struct imgu_mmu *mmu = to_imgu_mmu(info);
52826f56895STomasz Figa 	u32 pteval;
52926f56895STomasz Figa 
53027b795adSYong Zhi 	imgu_mmu_set_halt(mmu, true);
53126f56895STomasz Figa 
53226f56895STomasz Figa 	pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->l1pt));
53326f56895STomasz Figa 	writel(pteval, mmu->base + REG_L1_PHYS);
53426f56895STomasz Figa 
53527b795adSYong Zhi 	imgu_mmu_tlb_invalidate(mmu);
53627b795adSYong Zhi 	imgu_mmu_set_halt(mmu, false);
53726f56895STomasz Figa }
538