126f56895STomasz Figa // SPDX-License-Identifier: GPL-2.0
226f56895STomasz Figa /*
326f56895STomasz Figa  * Copyright (C) 2018 Intel Corporation.
426f56895STomasz Figa  * Copyright 2018 Google LLC.
526f56895STomasz Figa  *
626f56895STomasz Figa  * Author: Tuukka Toivonen <tuukka.toivonen@intel.com>
726f56895STomasz Figa  * Author: Sakari Ailus <sakari.ailus@linux.intel.com>
826f56895STomasz Figa  * Author: Samu Onkalo <samu.onkalo@intel.com>
926f56895STomasz Figa  * Author: Tomasz Figa <tfiga@chromium.org>
1026f56895STomasz Figa  *
1126f56895STomasz Figa  */
1226f56895STomasz Figa 
1326f56895STomasz Figa #include <linux/dma-mapping.h>
1426f56895STomasz Figa #include <linux/iopoll.h>
1526f56895STomasz Figa #include <linux/pm_runtime.h>
1626f56895STomasz Figa #include <linux/slab.h>
1726f56895STomasz Figa #include <linux/vmalloc.h>
1826f56895STomasz Figa 
1926f56895STomasz Figa #include <asm/set_memory.h>
2026f56895STomasz Figa 
2126f56895STomasz Figa #include "ipu3-mmu.h"
2226f56895STomasz Figa 
2326f56895STomasz Figa #define IPU3_PT_BITS		10
2426f56895STomasz Figa #define IPU3_PT_PTES		(1UL << IPU3_PT_BITS)
2526f56895STomasz Figa #define IPU3_PT_SIZE		(IPU3_PT_PTES << 2)
2626f56895STomasz Figa #define IPU3_PT_ORDER		(IPU3_PT_SIZE >> PAGE_SHIFT)
2726f56895STomasz Figa 
2826f56895STomasz Figa #define IPU3_ADDR2PTE(addr)	((addr) >> IPU3_PAGE_SHIFT)
2926f56895STomasz Figa #define IPU3_PTE2ADDR(pte)	((phys_addr_t)(pte) << IPU3_PAGE_SHIFT)
3026f56895STomasz Figa 
3126f56895STomasz Figa #define IPU3_L2PT_SHIFT		IPU3_PT_BITS
3226f56895STomasz Figa #define IPU3_L2PT_MASK		((1UL << IPU3_L2PT_SHIFT) - 1)
3326f56895STomasz Figa 
3426f56895STomasz Figa #define IPU3_L1PT_SHIFT		IPU3_PT_BITS
3526f56895STomasz Figa #define IPU3_L1PT_MASK		((1UL << IPU3_L1PT_SHIFT) - 1)
3626f56895STomasz Figa 
3726f56895STomasz Figa #define IPU3_MMU_ADDRESS_BITS	(IPU3_PAGE_SHIFT + \
3826f56895STomasz Figa 				 IPU3_L2PT_SHIFT + \
3926f56895STomasz Figa 				 IPU3_L1PT_SHIFT)
4026f56895STomasz Figa 
4126f56895STomasz Figa #define IMGU_REG_BASE		0x4000
4226f56895STomasz Figa #define REG_TLB_INVALIDATE	(IMGU_REG_BASE + 0x300)
4326f56895STomasz Figa #define TLB_INVALIDATE		1
4426f56895STomasz Figa #define REG_L1_PHYS		(IMGU_REG_BASE + 0x304) /* 27-bit pfn */
4526f56895STomasz Figa #define REG_GP_HALT		(IMGU_REG_BASE + 0x5dc)
4626f56895STomasz Figa #define REG_GP_HALTED		(IMGU_REG_BASE + 0x5e0)
4726f56895STomasz Figa 
4827b795adSYong Zhi struct imgu_mmu {
4926f56895STomasz Figa 	struct device *dev;
5026f56895STomasz Figa 	void __iomem *base;
5126f56895STomasz Figa 	/* protect access to l2pts, l1pt */
5226f56895STomasz Figa 	spinlock_t lock;
5326f56895STomasz Figa 
5426f56895STomasz Figa 	void *dummy_page;
5526f56895STomasz Figa 	u32 dummy_page_pteval;
5626f56895STomasz Figa 
5726f56895STomasz Figa 	u32 *dummy_l2pt;
5826f56895STomasz Figa 	u32 dummy_l2pt_pteval;
5926f56895STomasz Figa 
6026f56895STomasz Figa 	u32 **l2pts;
6126f56895STomasz Figa 	u32 *l1pt;
6226f56895STomasz Figa 
6327b795adSYong Zhi 	struct imgu_mmu_info geometry;
6426f56895STomasz Figa };
6526f56895STomasz Figa 
6627b795adSYong Zhi static inline struct imgu_mmu *to_imgu_mmu(struct imgu_mmu_info *info)
6726f56895STomasz Figa {
6827b795adSYong Zhi 	return container_of(info, struct imgu_mmu, geometry);
6926f56895STomasz Figa }
7026f56895STomasz Figa 
7126f56895STomasz Figa /**
7227b795adSYong Zhi  * imgu_mmu_tlb_invalidate - invalidate translation look-aside buffer
7326f56895STomasz Figa  * @mmu: MMU to perform the invalidate operation on
7426f56895STomasz Figa  *
7526f56895STomasz Figa  * This function invalidates the whole TLB. Must be called when the hardware
7626f56895STomasz Figa  * is powered on.
7726f56895STomasz Figa  */
7827b795adSYong Zhi static void imgu_mmu_tlb_invalidate(struct imgu_mmu *mmu)
7926f56895STomasz Figa {
8026f56895STomasz Figa 	writel(TLB_INVALIDATE, mmu->base + REG_TLB_INVALIDATE);
8126f56895STomasz Figa }
8226f56895STomasz Figa 
8327b795adSYong Zhi static void call_if_imgu_is_powered(struct imgu_mmu *mmu,
8427b795adSYong Zhi 				    void (*func)(struct imgu_mmu *mmu))
8526f56895STomasz Figa {
8626f56895STomasz Figa 	if (!pm_runtime_get_if_in_use(mmu->dev))
8726f56895STomasz Figa 		return;
8826f56895STomasz Figa 
8926f56895STomasz Figa 	func(mmu);
9026f56895STomasz Figa 	pm_runtime_put(mmu->dev);
9126f56895STomasz Figa }
9226f56895STomasz Figa 
9326f56895STomasz Figa /**
9427b795adSYong Zhi  * imgu_mmu_set_halt - set CIO gate halt bit
9526f56895STomasz Figa  * @mmu: MMU to set the CIO gate bit in.
9626f56895STomasz Figa  * @halt: Desired state of the gate bit.
9726f56895STomasz Figa  *
9826f56895STomasz Figa  * This function sets the CIO gate bit that controls whether external memory
9926f56895STomasz Figa  * accesses are allowed. Must be called when the hardware is powered on.
10026f56895STomasz Figa  */
10127b795adSYong Zhi static void imgu_mmu_set_halt(struct imgu_mmu *mmu, bool halt)
10226f56895STomasz Figa {
10326f56895STomasz Figa 	int ret;
10426f56895STomasz Figa 	u32 val;
10526f56895STomasz Figa 
10626f56895STomasz Figa 	writel(halt, mmu->base + REG_GP_HALT);
10726f56895STomasz Figa 	ret = readl_poll_timeout(mmu->base + REG_GP_HALTED,
10826f56895STomasz Figa 				 val, (val & 1) == halt, 1000, 100000);
10926f56895STomasz Figa 
11026f56895STomasz Figa 	if (ret)
11126f56895STomasz Figa 		dev_err(mmu->dev, "failed to %s CIO gate halt\n",
11226f56895STomasz Figa 			halt ? "set" : "clear");
11326f56895STomasz Figa }
11426f56895STomasz Figa 
11526f56895STomasz Figa /**
11627b795adSYong Zhi  * imgu_mmu_alloc_page_table - allocate a pre-filled page table
11726f56895STomasz Figa  * @pteval: Value to initialize for page table entries with.
11826f56895STomasz Figa  *
11926f56895STomasz Figa  * Return: Pointer to allocated page table or NULL on failure.
12026f56895STomasz Figa  */
12127b795adSYong Zhi static u32 *imgu_mmu_alloc_page_table(u32 pteval)
12226f56895STomasz Figa {
12326f56895STomasz Figa 	u32 *pt;
12426f56895STomasz Figa 	int pte;
12526f56895STomasz Figa 
12626f56895STomasz Figa 	pt = (u32 *)__get_free_page(GFP_KERNEL);
12726f56895STomasz Figa 	if (!pt)
12826f56895STomasz Figa 		return NULL;
12926f56895STomasz Figa 
13026f56895STomasz Figa 	for (pte = 0; pte < IPU3_PT_PTES; pte++)
13126f56895STomasz Figa 		pt[pte] = pteval;
13226f56895STomasz Figa 
13331e0a455SJean-Baptiste Jouband 	set_memory_uc((unsigned long)pt, IPU3_PT_ORDER);
13426f56895STomasz Figa 
13526f56895STomasz Figa 	return pt;
13626f56895STomasz Figa }
13726f56895STomasz Figa 
13826f56895STomasz Figa /**
13927b795adSYong Zhi  * imgu_mmu_free_page_table - free page table
14026f56895STomasz Figa  * @pt: Page table to free.
14126f56895STomasz Figa  */
14227b795adSYong Zhi static void imgu_mmu_free_page_table(u32 *pt)
14326f56895STomasz Figa {
14431e0a455SJean-Baptiste Jouband 	set_memory_wb((unsigned long)pt, IPU3_PT_ORDER);
14526f56895STomasz Figa 	free_page((unsigned long)pt);
14626f56895STomasz Figa }
14726f56895STomasz Figa 
14826f56895STomasz Figa /**
14926f56895STomasz Figa  * address_to_pte_idx - split IOVA into L1 and L2 page table indices
15026f56895STomasz Figa  * @iova: IOVA to split.
15126f56895STomasz Figa  * @l1pt_idx: Output for the L1 page table index.
15226f56895STomasz Figa  * @l2pt_idx: Output for the L2 page index.
15326f56895STomasz Figa  */
15426f56895STomasz Figa static inline void address_to_pte_idx(unsigned long iova, u32 *l1pt_idx,
15526f56895STomasz Figa 				      u32 *l2pt_idx)
15626f56895STomasz Figa {
15726f56895STomasz Figa 	iova >>= IPU3_PAGE_SHIFT;
15826f56895STomasz Figa 
15926f56895STomasz Figa 	if (l2pt_idx)
16026f56895STomasz Figa 		*l2pt_idx = iova & IPU3_L2PT_MASK;
16126f56895STomasz Figa 
16226f56895STomasz Figa 	iova >>= IPU3_L2PT_SHIFT;
16326f56895STomasz Figa 
16426f56895STomasz Figa 	if (l1pt_idx)
16526f56895STomasz Figa 		*l1pt_idx = iova & IPU3_L1PT_MASK;
16626f56895STomasz Figa }
16726f56895STomasz Figa 
16827b795adSYong Zhi static u32 *imgu_mmu_get_l2pt(struct imgu_mmu *mmu, u32 l1pt_idx)
16926f56895STomasz Figa {
17026f56895STomasz Figa 	unsigned long flags;
17126f56895STomasz Figa 	u32 *l2pt, *new_l2pt;
17226f56895STomasz Figa 	u32 pteval;
17326f56895STomasz Figa 
17426f56895STomasz Figa 	spin_lock_irqsave(&mmu->lock, flags);
17526f56895STomasz Figa 
17626f56895STomasz Figa 	l2pt = mmu->l2pts[l1pt_idx];
17726f56895STomasz Figa 	if (l2pt)
17826f56895STomasz Figa 		goto done;
17926f56895STomasz Figa 
18026f56895STomasz Figa 	spin_unlock_irqrestore(&mmu->lock, flags);
18126f56895STomasz Figa 
18227b795adSYong Zhi 	new_l2pt = imgu_mmu_alloc_page_table(mmu->dummy_page_pteval);
18326f56895STomasz Figa 	if (!new_l2pt)
18426f56895STomasz Figa 		return NULL;
18526f56895STomasz Figa 
18626f56895STomasz Figa 	spin_lock_irqsave(&mmu->lock, flags);
18726f56895STomasz Figa 
18826f56895STomasz Figa 	dev_dbg(mmu->dev, "allocated page table %p for l1pt_idx %u\n",
18926f56895STomasz Figa 		new_l2pt, l1pt_idx);
19026f56895STomasz Figa 
19126f56895STomasz Figa 	l2pt = mmu->l2pts[l1pt_idx];
19226f56895STomasz Figa 	if (l2pt) {
19327b795adSYong Zhi 		imgu_mmu_free_page_table(new_l2pt);
19426f56895STomasz Figa 		goto done;
19526f56895STomasz Figa 	}
19626f56895STomasz Figa 
19726f56895STomasz Figa 	l2pt = new_l2pt;
19826f56895STomasz Figa 	mmu->l2pts[l1pt_idx] = new_l2pt;
19926f56895STomasz Figa 
20026f56895STomasz Figa 	pteval = IPU3_ADDR2PTE(virt_to_phys(new_l2pt));
20126f56895STomasz Figa 	mmu->l1pt[l1pt_idx] = pteval;
20226f56895STomasz Figa 
20326f56895STomasz Figa done:
20426f56895STomasz Figa 	spin_unlock_irqrestore(&mmu->lock, flags);
20526f56895STomasz Figa 	return l2pt;
20626f56895STomasz Figa }
20726f56895STomasz Figa 
20827b795adSYong Zhi static int __imgu_mmu_map(struct imgu_mmu *mmu, unsigned long iova,
20926f56895STomasz Figa 			  phys_addr_t paddr)
21026f56895STomasz Figa {
21126f56895STomasz Figa 	u32 l1pt_idx, l2pt_idx;
21226f56895STomasz Figa 	unsigned long flags;
21326f56895STomasz Figa 	u32 *l2pt;
21426f56895STomasz Figa 
21526f56895STomasz Figa 	if (!mmu)
21626f56895STomasz Figa 		return -ENODEV;
21726f56895STomasz Figa 
21826f56895STomasz Figa 	address_to_pte_idx(iova, &l1pt_idx, &l2pt_idx);
21926f56895STomasz Figa 
22027b795adSYong Zhi 	l2pt = imgu_mmu_get_l2pt(mmu, l1pt_idx);
22126f56895STomasz Figa 	if (!l2pt)
22226f56895STomasz Figa 		return -ENOMEM;
22326f56895STomasz Figa 
22426f56895STomasz Figa 	spin_lock_irqsave(&mmu->lock, flags);
22526f56895STomasz Figa 
22626f56895STomasz Figa 	if (l2pt[l2pt_idx] != mmu->dummy_page_pteval) {
22726f56895STomasz Figa 		spin_unlock_irqrestore(&mmu->lock, flags);
22826f56895STomasz Figa 		return -EBUSY;
22926f56895STomasz Figa 	}
23026f56895STomasz Figa 
23126f56895STomasz Figa 	l2pt[l2pt_idx] = IPU3_ADDR2PTE(paddr);
23226f56895STomasz Figa 
23326f56895STomasz Figa 	spin_unlock_irqrestore(&mmu->lock, flags);
23426f56895STomasz Figa 
23526f56895STomasz Figa 	return 0;
23626f56895STomasz Figa }
23726f56895STomasz Figa 
2383efcbe3eSSakari Ailus /**
2393efcbe3eSSakari Ailus  * imgu_mmu_map - map a buffer to a physical address
2403efcbe3eSSakari Ailus  *
2413efcbe3eSSakari Ailus  * @info: MMU mappable range
2423efcbe3eSSakari Ailus  * @iova: the virtual address
2433efcbe3eSSakari Ailus  * @paddr: the physical address
2443efcbe3eSSakari Ailus  * @size: length of the mappable area
2453efcbe3eSSakari Ailus  *
2463efcbe3eSSakari Ailus  * The function has been adapted from iommu_map() in
2473efcbe3eSSakari Ailus  * drivers/iommu/iommu.c .
2483efcbe3eSSakari Ailus  */
24927b795adSYong Zhi int imgu_mmu_map(struct imgu_mmu_info *info, unsigned long iova,
25026f56895STomasz Figa 		 phys_addr_t paddr, size_t size)
25126f56895STomasz Figa {
25227b795adSYong Zhi 	struct imgu_mmu *mmu = to_imgu_mmu(info);
25326f56895STomasz Figa 	int ret = 0;
25426f56895STomasz Figa 
25526f56895STomasz Figa 	/*
25626f56895STomasz Figa 	 * both the virtual address and the physical one, as well as
25726f56895STomasz Figa 	 * the size of the mapping, must be aligned (at least) to the
25826f56895STomasz Figa 	 * size of the smallest page supported by the hardware
25926f56895STomasz Figa 	 */
26017f61abbSSakari Ailus 	if (!IS_ALIGNED(iova | paddr | size, IPU3_PAGE_SIZE)) {
26117f61abbSSakari Ailus 		dev_err(mmu->dev, "unaligned: iova 0x%lx pa %pa size 0x%zx\n",
26217f61abbSSakari Ailus 			iova, &paddr, size);
26326f56895STomasz Figa 		return -EINVAL;
26426f56895STomasz Figa 	}
26526f56895STomasz Figa 
26626f56895STomasz Figa 	dev_dbg(mmu->dev, "map: iova 0x%lx pa %pa size 0x%zx\n",
26726f56895STomasz Figa 		iova, &paddr, size);
26826f56895STomasz Figa 
26926f56895STomasz Figa 	while (size) {
27017f61abbSSakari Ailus 		dev_dbg(mmu->dev, "mapping: iova 0x%lx pa %pa\n", iova, &paddr);
27126f56895STomasz Figa 
27227b795adSYong Zhi 		ret = __imgu_mmu_map(mmu, iova, paddr);
27326f56895STomasz Figa 		if (ret)
27426f56895STomasz Figa 			break;
27526f56895STomasz Figa 
27617f61abbSSakari Ailus 		iova += IPU3_PAGE_SIZE;
27717f61abbSSakari Ailus 		paddr += IPU3_PAGE_SIZE;
27817f61abbSSakari Ailus 		size -= IPU3_PAGE_SIZE;
27926f56895STomasz Figa 	}
28026f56895STomasz Figa 
28127b795adSYong Zhi 	call_if_imgu_is_powered(mmu, imgu_mmu_tlb_invalidate);
28226f56895STomasz Figa 
28326f56895STomasz Figa 	return ret;
28426f56895STomasz Figa }
28526f56895STomasz Figa 
2863efcbe3eSSakari Ailus /**
2873efcbe3eSSakari Ailus  * imgu_mmu_map_sg - Map a scatterlist
2883efcbe3eSSakari Ailus  *
2893efcbe3eSSakari Ailus  * @info: MMU mappable range
2903efcbe3eSSakari Ailus  * @iova: the virtual address
2913efcbe3eSSakari Ailus  * @sg: the scatterlist to map
2923efcbe3eSSakari Ailus  * @nents: number of entries in the scatterlist
2933efcbe3eSSakari Ailus  *
2943efcbe3eSSakari Ailus  * The function has been adapted from default_iommu_map_sg() in
2953efcbe3eSSakari Ailus  * drivers/iommu/iommu.c .
2963efcbe3eSSakari Ailus  */
29727b795adSYong Zhi size_t imgu_mmu_map_sg(struct imgu_mmu_info *info, unsigned long iova,
29826f56895STomasz Figa 		       struct scatterlist *sg, unsigned int nents)
29926f56895STomasz Figa {
30027b795adSYong Zhi 	struct imgu_mmu *mmu = to_imgu_mmu(info);
30126f56895STomasz Figa 	struct scatterlist *s;
30226f56895STomasz Figa 	size_t s_length, mapped = 0;
30317f61abbSSakari Ailus 	unsigned int i;
30426f56895STomasz Figa 	int ret;
30526f56895STomasz Figa 
30626f56895STomasz Figa 	for_each_sg(sg, s, nents, i) {
30726f56895STomasz Figa 		phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
30826f56895STomasz Figa 
30926f56895STomasz Figa 		s_length = s->length;
31026f56895STomasz Figa 
31117f61abbSSakari Ailus 		if (!IS_ALIGNED(s->offset, IPU3_PAGE_SIZE))
31226f56895STomasz Figa 			goto out_err;
31326f56895STomasz Figa 
31417f61abbSSakari Ailus 		/* must be IPU3_PAGE_SIZE aligned to be mapped singlely */
31517f61abbSSakari Ailus 		if (i == nents - 1 && !IS_ALIGNED(s->length, IPU3_PAGE_SIZE))
31626f56895STomasz Figa 			s_length = PAGE_ALIGN(s->length);
31726f56895STomasz Figa 
31827b795adSYong Zhi 		ret = imgu_mmu_map(info, iova + mapped, phys, s_length);
31926f56895STomasz Figa 		if (ret)
32026f56895STomasz Figa 			goto out_err;
32126f56895STomasz Figa 
32226f56895STomasz Figa 		mapped += s_length;
32326f56895STomasz Figa 	}
32426f56895STomasz Figa 
32527b795adSYong Zhi 	call_if_imgu_is_powered(mmu, imgu_mmu_tlb_invalidate);
32626f56895STomasz Figa 
32726f56895STomasz Figa 	return mapped;
32826f56895STomasz Figa 
32926f56895STomasz Figa out_err:
33026f56895STomasz Figa 	/* undo mappings already done */
33127b795adSYong Zhi 	imgu_mmu_unmap(info, iova, mapped);
33226f56895STomasz Figa 
33326f56895STomasz Figa 	return 0;
33426f56895STomasz Figa }
33526f56895STomasz Figa 
33627b795adSYong Zhi static size_t __imgu_mmu_unmap(struct imgu_mmu *mmu,
33726f56895STomasz Figa 			       unsigned long iova, size_t size)
33826f56895STomasz Figa {
33926f56895STomasz Figa 	u32 l1pt_idx, l2pt_idx;
34026f56895STomasz Figa 	unsigned long flags;
34126f56895STomasz Figa 	size_t unmap = size;
34226f56895STomasz Figa 	u32 *l2pt;
34326f56895STomasz Figa 
34426f56895STomasz Figa 	if (!mmu)
34526f56895STomasz Figa 		return 0;
34626f56895STomasz Figa 
34726f56895STomasz Figa 	address_to_pte_idx(iova, &l1pt_idx, &l2pt_idx);
34826f56895STomasz Figa 
34926f56895STomasz Figa 	spin_lock_irqsave(&mmu->lock, flags);
35026f56895STomasz Figa 
35126f56895STomasz Figa 	l2pt = mmu->l2pts[l1pt_idx];
35226f56895STomasz Figa 	if (!l2pt) {
35326f56895STomasz Figa 		spin_unlock_irqrestore(&mmu->lock, flags);
35426f56895STomasz Figa 		return 0;
35526f56895STomasz Figa 	}
35626f56895STomasz Figa 
35726f56895STomasz Figa 	if (l2pt[l2pt_idx] == mmu->dummy_page_pteval)
35826f56895STomasz Figa 		unmap = 0;
35926f56895STomasz Figa 
36026f56895STomasz Figa 	l2pt[l2pt_idx] = mmu->dummy_page_pteval;
36126f56895STomasz Figa 
36226f56895STomasz Figa 	spin_unlock_irqrestore(&mmu->lock, flags);
36326f56895STomasz Figa 
36426f56895STomasz Figa 	return unmap;
36526f56895STomasz Figa }
36626f56895STomasz Figa 
3673efcbe3eSSakari Ailus /**
3683efcbe3eSSakari Ailus  * imgu_mmu_unmap - Unmap a buffer
3693efcbe3eSSakari Ailus  *
3703efcbe3eSSakari Ailus  * @info: MMU mappable range
3713efcbe3eSSakari Ailus  * @iova: the virtual address
3723efcbe3eSSakari Ailus  * @size: the length of the buffer
3733efcbe3eSSakari Ailus  *
3743efcbe3eSSakari Ailus  * The function has been adapted from iommu_unmap() in
3753efcbe3eSSakari Ailus  * drivers/iommu/iommu.c .
3763efcbe3eSSakari Ailus  */
37727b795adSYong Zhi size_t imgu_mmu_unmap(struct imgu_mmu_info *info, unsigned long iova,
37826f56895STomasz Figa 		      size_t size)
37926f56895STomasz Figa {
38027b795adSYong Zhi 	struct imgu_mmu *mmu = to_imgu_mmu(info);
38126f56895STomasz Figa 	size_t unmapped_page, unmapped = 0;
38226f56895STomasz Figa 
38326f56895STomasz Figa 	/*
38426f56895STomasz Figa 	 * The virtual address, as well as the size of the mapping, must be
38526f56895STomasz Figa 	 * aligned (at least) to the size of the smallest page supported
38626f56895STomasz Figa 	 * by the hardware
38726f56895STomasz Figa 	 */
38817f61abbSSakari Ailus 	if (!IS_ALIGNED(iova | size, IPU3_PAGE_SIZE)) {
38917f61abbSSakari Ailus 		dev_err(mmu->dev, "unaligned: iova 0x%lx size 0x%zx\n",
39017f61abbSSakari Ailus 			iova, size);
39126f56895STomasz Figa 		return -EINVAL;
39226f56895STomasz Figa 	}
39326f56895STomasz Figa 
39426f56895STomasz Figa 	dev_dbg(mmu->dev, "unmap this: iova 0x%lx size 0x%zx\n", iova, size);
39526f56895STomasz Figa 
39626f56895STomasz Figa 	/*
39726f56895STomasz Figa 	 * Keep iterating until we either unmap 'size' bytes (or more)
39826f56895STomasz Figa 	 * or we hit an area that isn't mapped.
39926f56895STomasz Figa 	 */
40026f56895STomasz Figa 	while (unmapped < size) {
40117f61abbSSakari Ailus 		unmapped_page = __imgu_mmu_unmap(mmu, iova, IPU3_PAGE_SIZE);
40226f56895STomasz Figa 		if (!unmapped_page)
40326f56895STomasz Figa 			break;
40426f56895STomasz Figa 
40526f56895STomasz Figa 		dev_dbg(mmu->dev, "unmapped: iova 0x%lx size 0x%zx\n",
40626f56895STomasz Figa 			iova, unmapped_page);
40726f56895STomasz Figa 
40826f56895STomasz Figa 		iova += unmapped_page;
40926f56895STomasz Figa 		unmapped += unmapped_page;
41026f56895STomasz Figa 	}
41126f56895STomasz Figa 
41227b795adSYong Zhi 	call_if_imgu_is_powered(mmu, imgu_mmu_tlb_invalidate);
41326f56895STomasz Figa 
41426f56895STomasz Figa 	return unmapped;
41526f56895STomasz Figa }
41626f56895STomasz Figa 
41726f56895STomasz Figa /**
41827b795adSYong Zhi  * imgu_mmu_init() - initialize IPU3 MMU block
4193efcbe3eSSakari Ailus  *
4209fabe1d1SMauro Carvalho Chehab  * @parent:	struct device parent
42126f56895STomasz Figa  * @base:	IOMEM base of hardware registers.
42226f56895STomasz Figa  *
42326f56895STomasz Figa  * Return: Pointer to IPU3 MMU private data pointer or ERR_PTR() on error.
42426f56895STomasz Figa  */
42527b795adSYong Zhi struct imgu_mmu_info *imgu_mmu_init(struct device *parent, void __iomem *base)
42626f56895STomasz Figa {
42727b795adSYong Zhi 	struct imgu_mmu *mmu;
42826f56895STomasz Figa 	u32 pteval;
42926f56895STomasz Figa 
43026f56895STomasz Figa 	mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
43126f56895STomasz Figa 	if (!mmu)
43226f56895STomasz Figa 		return ERR_PTR(-ENOMEM);
43326f56895STomasz Figa 
43426f56895STomasz Figa 	mmu->dev = parent;
43526f56895STomasz Figa 	mmu->base = base;
43626f56895STomasz Figa 	spin_lock_init(&mmu->lock);
43726f56895STomasz Figa 
43826f56895STomasz Figa 	/* Disallow external memory access when having no valid page tables. */
43927b795adSYong Zhi 	imgu_mmu_set_halt(mmu, true);
44026f56895STomasz Figa 
44126f56895STomasz Figa 	/*
44226f56895STomasz Figa 	 * The MMU does not have a "valid" bit, so we have to use a dummy
44326f56895STomasz Figa 	 * page for invalid entries.
44426f56895STomasz Figa 	 */
44526f56895STomasz Figa 	mmu->dummy_page = (void *)__get_free_page(GFP_KERNEL);
44626f56895STomasz Figa 	if (!mmu->dummy_page)
44726f56895STomasz Figa 		goto fail_group;
44826f56895STomasz Figa 	pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->dummy_page));
44926f56895STomasz Figa 	mmu->dummy_page_pteval = pteval;
45026f56895STomasz Figa 
45126f56895STomasz Figa 	/*
45226f56895STomasz Figa 	 * Allocate a dummy L2 page table with all entries pointing to
45326f56895STomasz Figa 	 * the dummy page.
45426f56895STomasz Figa 	 */
45527b795adSYong Zhi 	mmu->dummy_l2pt = imgu_mmu_alloc_page_table(pteval);
45626f56895STomasz Figa 	if (!mmu->dummy_l2pt)
45726f56895STomasz Figa 		goto fail_dummy_page;
45826f56895STomasz Figa 	pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->dummy_l2pt));
45926f56895STomasz Figa 	mmu->dummy_l2pt_pteval = pteval;
46026f56895STomasz Figa 
46126f56895STomasz Figa 	/*
46226f56895STomasz Figa 	 * Allocate the array of L2PT CPU pointers, initialized to zero,
46326f56895STomasz Figa 	 * which means the dummy L2PT allocated above.
46426f56895STomasz Figa 	 */
46526f56895STomasz Figa 	mmu->l2pts = vzalloc(IPU3_PT_PTES * sizeof(*mmu->l2pts));
46626f56895STomasz Figa 	if (!mmu->l2pts)
46726f56895STomasz Figa 		goto fail_l2pt;
46826f56895STomasz Figa 
46926f56895STomasz Figa 	/* Allocate the L1 page table. */
47027b795adSYong Zhi 	mmu->l1pt = imgu_mmu_alloc_page_table(mmu->dummy_l2pt_pteval);
47126f56895STomasz Figa 	if (!mmu->l1pt)
47226f56895STomasz Figa 		goto fail_l2pts;
47326f56895STomasz Figa 
47426f56895STomasz Figa 	pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->l1pt));
47526f56895STomasz Figa 	writel(pteval, mmu->base + REG_L1_PHYS);
47627b795adSYong Zhi 	imgu_mmu_tlb_invalidate(mmu);
47727b795adSYong Zhi 	imgu_mmu_set_halt(mmu, false);
47826f56895STomasz Figa 
47926f56895STomasz Figa 	mmu->geometry.aperture_start = 0;
48026f56895STomasz Figa 	mmu->geometry.aperture_end = DMA_BIT_MASK(IPU3_MMU_ADDRESS_BITS);
48126f56895STomasz Figa 
48226f56895STomasz Figa 	return &mmu->geometry;
48326f56895STomasz Figa 
48426f56895STomasz Figa fail_l2pts:
48526f56895STomasz Figa 	vfree(mmu->l2pts);
48626f56895STomasz Figa fail_l2pt:
48727b795adSYong Zhi 	imgu_mmu_free_page_table(mmu->dummy_l2pt);
48826f56895STomasz Figa fail_dummy_page:
48926f56895STomasz Figa 	free_page((unsigned long)mmu->dummy_page);
49026f56895STomasz Figa fail_group:
49126f56895STomasz Figa 	kfree(mmu);
49226f56895STomasz Figa 
49326f56895STomasz Figa 	return ERR_PTR(-ENOMEM);
49426f56895STomasz Figa }
49526f56895STomasz Figa 
49626f56895STomasz Figa /**
49727b795adSYong Zhi  * imgu_mmu_exit() - clean up IPU3 MMU block
4983efcbe3eSSakari Ailus  *
4993efcbe3eSSakari Ailus  * @info: MMU mappable range
50026f56895STomasz Figa  */
50127b795adSYong Zhi void imgu_mmu_exit(struct imgu_mmu_info *info)
50226f56895STomasz Figa {
50327b795adSYong Zhi 	struct imgu_mmu *mmu = to_imgu_mmu(info);
50426f56895STomasz Figa 
50526f56895STomasz Figa 	/* We are going to free our page tables, no more memory access. */
50627b795adSYong Zhi 	imgu_mmu_set_halt(mmu, true);
50727b795adSYong Zhi 	imgu_mmu_tlb_invalidate(mmu);
50826f56895STomasz Figa 
50927b795adSYong Zhi 	imgu_mmu_free_page_table(mmu->l1pt);
51026f56895STomasz Figa 	vfree(mmu->l2pts);
51127b795adSYong Zhi 	imgu_mmu_free_page_table(mmu->dummy_l2pt);
51226f56895STomasz Figa 	free_page((unsigned long)mmu->dummy_page);
51326f56895STomasz Figa 	kfree(mmu);
51426f56895STomasz Figa }
51526f56895STomasz Figa 
51627b795adSYong Zhi void imgu_mmu_suspend(struct imgu_mmu_info *info)
51726f56895STomasz Figa {
51827b795adSYong Zhi 	struct imgu_mmu *mmu = to_imgu_mmu(info);
51926f56895STomasz Figa 
52027b795adSYong Zhi 	imgu_mmu_set_halt(mmu, true);
52126f56895STomasz Figa }
52226f56895STomasz Figa 
52327b795adSYong Zhi void imgu_mmu_resume(struct imgu_mmu_info *info)
52426f56895STomasz Figa {
52527b795adSYong Zhi 	struct imgu_mmu *mmu = to_imgu_mmu(info);
52626f56895STomasz Figa 	u32 pteval;
52726f56895STomasz Figa 
52827b795adSYong Zhi 	imgu_mmu_set_halt(mmu, true);
52926f56895STomasz Figa 
53026f56895STomasz Figa 	pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->l1pt));
53126f56895STomasz Figa 	writel(pteval, mmu->base + REG_L1_PHYS);
53226f56895STomasz Figa 
53327b795adSYong Zhi 	imgu_mmu_tlb_invalidate(mmu);
53427b795adSYong Zhi 	imgu_mmu_set_halt(mmu, false);
53526f56895STomasz Figa }
536