1f3ba9122SRob Herring // SPDX-License-Identifier: GPL-2.0 2f3ba9122SRob Herring /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */ 37282f764SRob Herring #include <linux/atomic.h> 4f3ba9122SRob Herring #include <linux/bitfield.h> 5f3ba9122SRob Herring #include <linux/delay.h> 6187d2929SRob Herring #include <linux/dma-mapping.h> 7f3ba9122SRob Herring #include <linux/interrupt.h> 8f3ba9122SRob Herring #include <linux/io.h> 9f3ba9122SRob Herring #include <linux/iopoll.h> 10f3ba9122SRob Herring #include <linux/io-pgtable.h> 11f3ba9122SRob Herring #include <linux/iommu.h> 12f3ba9122SRob Herring #include <linux/platform_device.h> 13f3ba9122SRob Herring #include <linux/pm_runtime.h> 14187d2929SRob Herring #include <linux/shmem_fs.h> 15f3ba9122SRob Herring #include <linux/sizes.h> 16f3ba9122SRob Herring 17f3ba9122SRob Herring #include "panfrost_device.h" 18f3ba9122SRob Herring #include "panfrost_mmu.h" 19f3ba9122SRob Herring #include "panfrost_gem.h" 20f3ba9122SRob Herring #include "panfrost_features.h" 21f3ba9122SRob Herring #include "panfrost_regs.h" 22f3ba9122SRob Herring 23f3ba9122SRob Herring #define mmu_write(dev, reg, data) writel(data, dev->iomem + reg) 24f3ba9122SRob Herring #define mmu_read(dev, reg) readl(dev->iomem + reg) 25f3ba9122SRob Herring 26f3ba9122SRob Herring static int wait_ready(struct panfrost_device *pfdev, u32 as_nr) 27f3ba9122SRob Herring { 28f3ba9122SRob Herring int ret; 29f3ba9122SRob Herring u32 val; 30f3ba9122SRob Herring 31f3ba9122SRob Herring /* Wait for the MMU status to indicate there is no active command, in 32f3ba9122SRob Herring * case one is pending. */ 33f3ba9122SRob Herring ret = readl_relaxed_poll_timeout_atomic(pfdev->iomem + AS_STATUS(as_nr), 34f3ba9122SRob Herring val, !(val & AS_STATUS_AS_ACTIVE), 10, 1000); 35f3ba9122SRob Herring 36f3ba9122SRob Herring if (ret) 37f3ba9122SRob Herring dev_err(pfdev->dev, "AS_ACTIVE bit stuck\n"); 38f3ba9122SRob Herring 39f3ba9122SRob Herring return ret; 40f3ba9122SRob Herring } 41f3ba9122SRob Herring 42f3ba9122SRob Herring static int write_cmd(struct panfrost_device *pfdev, u32 as_nr, u32 cmd) 43f3ba9122SRob Herring { 44f3ba9122SRob Herring int status; 45f3ba9122SRob Herring 46f3ba9122SRob Herring /* write AS_COMMAND when MMU is ready to accept another command */ 47f3ba9122SRob Herring status = wait_ready(pfdev, as_nr); 48f3ba9122SRob Herring if (!status) 49f3ba9122SRob Herring mmu_write(pfdev, AS_COMMAND(as_nr), cmd); 50f3ba9122SRob Herring 51f3ba9122SRob Herring return status; 52f3ba9122SRob Herring } 53f3ba9122SRob Herring 54f3ba9122SRob Herring static void lock_region(struct panfrost_device *pfdev, u32 as_nr, 55f3ba9122SRob Herring u64 iova, size_t size) 56f3ba9122SRob Herring { 57f3ba9122SRob Herring u8 region_width; 58f3ba9122SRob Herring u64 region = iova & PAGE_MASK; 59f3ba9122SRob Herring /* 60f3ba9122SRob Herring * fls returns: 61f3ba9122SRob Herring * 1 .. 32 62f3ba9122SRob Herring * 63f3ba9122SRob Herring * 10 + fls(num_pages) 64f3ba9122SRob Herring * results in the range (11 .. 42) 65f3ba9122SRob Herring */ 66f3ba9122SRob Herring 67f3ba9122SRob Herring size = round_up(size, PAGE_SIZE); 68f3ba9122SRob Herring 69f3ba9122SRob Herring region_width = 10 + fls(size >> PAGE_SHIFT); 70f3ba9122SRob Herring if ((size >> PAGE_SHIFT) != (1ul << (region_width - 11))) { 71f3ba9122SRob Herring /* not pow2, so must go up to the next pow2 */ 72f3ba9122SRob Herring region_width += 1; 73f3ba9122SRob Herring } 74f3ba9122SRob Herring region |= region_width; 75f3ba9122SRob Herring 76f3ba9122SRob Herring /* Lock the region that needs to be updated */ 77f3ba9122SRob Herring mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), region & 0xFFFFFFFFUL); 78f3ba9122SRob Herring mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), (region >> 32) & 0xFFFFFFFFUL); 79f3ba9122SRob Herring write_cmd(pfdev, as_nr, AS_COMMAND_LOCK); 80f3ba9122SRob Herring } 81f3ba9122SRob Herring 82f3ba9122SRob Herring 837282f764SRob Herring static int mmu_hw_do_operation(struct panfrost_device *pfdev, 847282f764SRob Herring struct panfrost_mmu *mmu, 85f3ba9122SRob Herring u64 iova, size_t size, u32 op) 86f3ba9122SRob Herring { 877282f764SRob Herring int ret, as_nr; 88f3ba9122SRob Herring 897282f764SRob Herring spin_lock(&pfdev->as_lock); 907282f764SRob Herring as_nr = mmu->as; 917282f764SRob Herring 927282f764SRob Herring if (as_nr < 0) { 937282f764SRob Herring spin_unlock(&pfdev->as_lock); 947282f764SRob Herring return 0; 957282f764SRob Herring } 96f3ba9122SRob Herring 97f3ba9122SRob Herring if (op != AS_COMMAND_UNLOCK) 98f3ba9122SRob Herring lock_region(pfdev, as_nr, iova, size); 99f3ba9122SRob Herring 100f3ba9122SRob Herring /* Run the MMU operation */ 101f3ba9122SRob Herring write_cmd(pfdev, as_nr, op); 102f3ba9122SRob Herring 103f3ba9122SRob Herring /* Wait for the flush to complete */ 104f3ba9122SRob Herring ret = wait_ready(pfdev, as_nr); 105f3ba9122SRob Herring 1067282f764SRob Herring spin_unlock(&pfdev->as_lock); 107f3ba9122SRob Herring 108f3ba9122SRob Herring return ret; 109f3ba9122SRob Herring } 110f3ba9122SRob Herring 1117282f764SRob Herring static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) 112f3ba9122SRob Herring { 1137282f764SRob Herring int as_nr = mmu->as; 1147282f764SRob Herring struct io_pgtable_cfg *cfg = &mmu->pgtbl_cfg; 115f3ba9122SRob Herring u64 transtab = cfg->arm_mali_lpae_cfg.transtab; 116f3ba9122SRob Herring u64 memattr = cfg->arm_mali_lpae_cfg.memattr; 117f3ba9122SRob Herring 118f3ba9122SRob Herring mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL); 119f3ba9122SRob Herring mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32); 120f3ba9122SRob Herring 121f3ba9122SRob Herring /* Need to revisit mem attrs. 122f3ba9122SRob Herring * NC is the default, Mali driver is inner WT. 123f3ba9122SRob Herring */ 124f3ba9122SRob Herring mmu_write(pfdev, AS_MEMATTR_LO(as_nr), memattr & 0xffffffffUL); 125f3ba9122SRob Herring mmu_write(pfdev, AS_MEMATTR_HI(as_nr), memattr >> 32); 126f3ba9122SRob Herring 127f3ba9122SRob Herring write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE); 128f3ba9122SRob Herring } 129f3ba9122SRob Herring 130f3ba9122SRob Herring static void mmu_disable(struct panfrost_device *pfdev, u32 as_nr) 131f3ba9122SRob Herring { 132f3ba9122SRob Herring mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0); 133f3ba9122SRob Herring mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0); 134f3ba9122SRob Herring 135f3ba9122SRob Herring mmu_write(pfdev, AS_MEMATTR_LO(as_nr), 0); 136f3ba9122SRob Herring mmu_write(pfdev, AS_MEMATTR_HI(as_nr), 0); 137f3ba9122SRob Herring 138f3ba9122SRob Herring write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE); 139f3ba9122SRob Herring } 140f3ba9122SRob Herring 1417282f764SRob Herring u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) 1427282f764SRob Herring { 1437282f764SRob Herring int as; 1447282f764SRob Herring 1457282f764SRob Herring spin_lock(&pfdev->as_lock); 1467282f764SRob Herring 1477282f764SRob Herring as = mmu->as; 1487282f764SRob Herring if (as >= 0) { 1497282f764SRob Herring int en = atomic_inc_return(&mmu->as_count); 1507282f764SRob Herring WARN_ON(en >= NUM_JOB_SLOTS); 1517282f764SRob Herring 1527282f764SRob Herring list_move(&mmu->list, &pfdev->as_lru_list); 1537282f764SRob Herring goto out; 1547282f764SRob Herring } 1557282f764SRob Herring 1567282f764SRob Herring /* Check for a free AS */ 1577282f764SRob Herring as = ffz(pfdev->as_alloc_mask); 1587282f764SRob Herring if (!(BIT(as) & pfdev->features.as_present)) { 1597282f764SRob Herring struct panfrost_mmu *lru_mmu; 1607282f764SRob Herring 1617282f764SRob Herring list_for_each_entry_reverse(lru_mmu, &pfdev->as_lru_list, list) { 1627282f764SRob Herring if (!atomic_read(&lru_mmu->as_count)) 1637282f764SRob Herring break; 1647282f764SRob Herring } 1657282f764SRob Herring WARN_ON(&lru_mmu->list == &pfdev->as_lru_list); 1667282f764SRob Herring 1677282f764SRob Herring list_del_init(&lru_mmu->list); 1687282f764SRob Herring as = lru_mmu->as; 1697282f764SRob Herring 1707282f764SRob Herring WARN_ON(as < 0); 1717282f764SRob Herring lru_mmu->as = -1; 1727282f764SRob Herring } 1737282f764SRob Herring 1747282f764SRob Herring /* Assign the free or reclaimed AS to the FD */ 1757282f764SRob Herring mmu->as = as; 1767282f764SRob Herring set_bit(as, &pfdev->as_alloc_mask); 1777282f764SRob Herring atomic_set(&mmu->as_count, 1); 1787282f764SRob Herring list_add(&mmu->list, &pfdev->as_lru_list); 1797282f764SRob Herring 1807282f764SRob Herring dev_dbg(pfdev->dev, "Assigned AS%d to mmu %p, alloc_mask=%lx", as, mmu, pfdev->as_alloc_mask); 1817282f764SRob Herring 1827282f764SRob Herring panfrost_mmu_enable(pfdev, mmu); 1837282f764SRob Herring 1847282f764SRob Herring out: 1857282f764SRob Herring spin_unlock(&pfdev->as_lock); 1867282f764SRob Herring return as; 1877282f764SRob Herring } 1887282f764SRob Herring 1897282f764SRob Herring void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) 1907282f764SRob Herring { 1917282f764SRob Herring atomic_dec(&mmu->as_count); 1927282f764SRob Herring WARN_ON(atomic_read(&mmu->as_count) < 0); 1937282f764SRob Herring } 1947282f764SRob Herring 19573e467f6SRob Herring void panfrost_mmu_reset(struct panfrost_device *pfdev) 19673e467f6SRob Herring { 1977282f764SRob Herring struct panfrost_mmu *mmu, *mmu_tmp; 1987282f764SRob Herring 1997282f764SRob Herring spin_lock(&pfdev->as_lock); 2007282f764SRob Herring 2017282f764SRob Herring pfdev->as_alloc_mask = 0; 2027282f764SRob Herring 2037282f764SRob Herring list_for_each_entry_safe(mmu, mmu_tmp, &pfdev->as_lru_list, list) { 2047282f764SRob Herring mmu->as = -1; 2057282f764SRob Herring atomic_set(&mmu->as_count, 0); 2067282f764SRob Herring list_del_init(&mmu->list); 2077282f764SRob Herring } 2087282f764SRob Herring 2097282f764SRob Herring spin_unlock(&pfdev->as_lock); 21073e467f6SRob Herring 21173e467f6SRob Herring mmu_write(pfdev, MMU_INT_CLEAR, ~0); 21273e467f6SRob Herring mmu_write(pfdev, MMU_INT_MASK, ~0); 21373e467f6SRob Herring } 21473e467f6SRob Herring 2154dff47c7SRob Herring static size_t get_pgsize(u64 addr, size_t size) 2164dff47c7SRob Herring { 2174dff47c7SRob Herring if (addr & (SZ_2M - 1) || size < SZ_2M) 2184dff47c7SRob Herring return SZ_4K; 2194dff47c7SRob Herring 2204dff47c7SRob Herring return SZ_2M; 2214dff47c7SRob Herring } 2224dff47c7SRob Herring 2237282f764SRob Herring static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu, 2247282f764SRob Herring u64 iova, int prot, struct sg_table *sgt) 225f3ba9122SRob Herring { 226f3ba9122SRob Herring unsigned int count; 227f3ba9122SRob Herring struct scatterlist *sgl; 2287282f764SRob Herring struct io_pgtable_ops *ops = mmu->pgtbl_ops; 229eecbb3cdSRob Herring u64 start_iova = iova; 230f3ba9122SRob Herring 2317282f764SRob Herring mutex_lock(&mmu->lock); 232f3ba9122SRob Herring 233f3ba9122SRob Herring for_each_sg(sgt->sgl, sgl, sgt->nents, count) { 234f3ba9122SRob Herring unsigned long paddr = sg_dma_address(sgl); 235f3ba9122SRob Herring size_t len = sg_dma_len(sgl); 236f3ba9122SRob Herring 2377282f764SRob Herring dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx", mmu->as, iova, paddr, len); 238f3ba9122SRob Herring 239f3ba9122SRob Herring while (len) { 2404dff47c7SRob Herring size_t pgsize = get_pgsize(iova | paddr, len); 2414dff47c7SRob Herring 242eecbb3cdSRob Herring ops->map(ops, iova, paddr, pgsize, prot); 2434dff47c7SRob Herring iova += pgsize; 2444dff47c7SRob Herring paddr += pgsize; 2454dff47c7SRob Herring len -= pgsize; 246f3ba9122SRob Herring } 247f3ba9122SRob Herring } 248f3ba9122SRob Herring 2497282f764SRob Herring mmu_hw_do_operation(pfdev, mmu, start_iova, iova - start_iova, 250eecbb3cdSRob Herring AS_COMMAND_FLUSH_PT); 251f3ba9122SRob Herring 2527282f764SRob Herring mutex_unlock(&mmu->lock); 253f3ba9122SRob Herring 254eecbb3cdSRob Herring return 0; 255eecbb3cdSRob Herring } 256eecbb3cdSRob Herring 257eecbb3cdSRob Herring int panfrost_mmu_map(struct panfrost_gem_object *bo) 258eecbb3cdSRob Herring { 259eecbb3cdSRob Herring struct drm_gem_object *obj = &bo->base.base; 260eecbb3cdSRob Herring struct panfrost_device *pfdev = to_panfrost_device(obj->dev); 261eecbb3cdSRob Herring struct sg_table *sgt; 262eecbb3cdSRob Herring int ret; 263eecbb3cdSRob Herring int prot = IOMMU_READ | IOMMU_WRITE; 264eecbb3cdSRob Herring 265eecbb3cdSRob Herring if (WARN_ON(bo->is_mapped)) 266eecbb3cdSRob Herring return 0; 267eecbb3cdSRob Herring 268203270c0SRob Herring if (bo->noexec) 269203270c0SRob Herring prot |= IOMMU_NOEXEC; 270203270c0SRob Herring 271eecbb3cdSRob Herring sgt = drm_gem_shmem_get_pages_sgt(obj); 272eecbb3cdSRob Herring if (WARN_ON(IS_ERR(sgt))) 273eecbb3cdSRob Herring return PTR_ERR(sgt); 274eecbb3cdSRob Herring 275eecbb3cdSRob Herring ret = pm_runtime_get_sync(pfdev->dev); 276eecbb3cdSRob Herring if (ret < 0) 277eecbb3cdSRob Herring return ret; 278eecbb3cdSRob Herring 2797282f764SRob Herring mmu_map_sg(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, prot, sgt); 280eecbb3cdSRob Herring 281f3ba9122SRob Herring pm_runtime_mark_last_busy(pfdev->dev); 282f3ba9122SRob Herring pm_runtime_put_autosuspend(pfdev->dev); 2839870dc39SBoris Brezillon bo->is_mapped = true; 284f3ba9122SRob Herring 285f3ba9122SRob Herring return 0; 286f3ba9122SRob Herring } 287f3ba9122SRob Herring 288f3ba9122SRob Herring void panfrost_mmu_unmap(struct panfrost_gem_object *bo) 289f3ba9122SRob Herring { 290f3ba9122SRob Herring struct drm_gem_object *obj = &bo->base.base; 291f3ba9122SRob Herring struct panfrost_device *pfdev = to_panfrost_device(obj->dev); 2927282f764SRob Herring struct io_pgtable_ops *ops = bo->mmu->pgtbl_ops; 293f3ba9122SRob Herring u64 iova = bo->node.start << PAGE_SHIFT; 294f3ba9122SRob Herring size_t len = bo->node.size << PAGE_SHIFT; 295f3ba9122SRob Herring size_t unmapped_len = 0; 296f3ba9122SRob Herring int ret; 297f3ba9122SRob Herring 2989870dc39SBoris Brezillon if (WARN_ON(!bo->is_mapped)) 2999870dc39SBoris Brezillon return; 3009870dc39SBoris Brezillon 3017282f764SRob Herring dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx", bo->mmu->as, iova, len); 302f3ba9122SRob Herring 303f3ba9122SRob Herring ret = pm_runtime_get_sync(pfdev->dev); 304f3ba9122SRob Herring if (ret < 0) 305f3ba9122SRob Herring return; 306f3ba9122SRob Herring 3077282f764SRob Herring mutex_lock(&bo->mmu->lock); 308f3ba9122SRob Herring 309f3ba9122SRob Herring while (unmapped_len < len) { 3104dff47c7SRob Herring size_t unmapped_page; 3114dff47c7SRob Herring size_t pgsize = get_pgsize(iova, len - unmapped_len); 3124dff47c7SRob Herring 313187d2929SRob Herring if (ops->iova_to_phys(ops, iova)) { 3144dff47c7SRob Herring unmapped_page = ops->unmap(ops, iova, pgsize); 315187d2929SRob Herring WARN_ON(unmapped_page != pgsize); 316187d2929SRob Herring } 317187d2929SRob Herring iova += pgsize; 318187d2929SRob Herring unmapped_len += pgsize; 319f3ba9122SRob Herring } 320f3ba9122SRob Herring 3217282f764SRob Herring mmu_hw_do_operation(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, 322f3ba9122SRob Herring bo->node.size << PAGE_SHIFT, AS_COMMAND_FLUSH_PT); 323f3ba9122SRob Herring 3247282f764SRob Herring mutex_unlock(&bo->mmu->lock); 325f3ba9122SRob Herring 326f3ba9122SRob Herring pm_runtime_mark_last_busy(pfdev->dev); 327f3ba9122SRob Herring pm_runtime_put_autosuspend(pfdev->dev); 3289870dc39SBoris Brezillon bo->is_mapped = false; 329f3ba9122SRob Herring } 330f3ba9122SRob Herring 331f3ba9122SRob Herring static void mmu_tlb_inv_context_s1(void *cookie) 332f3ba9122SRob Herring { 3337282f764SRob Herring struct panfrost_file_priv *priv = cookie; 334f3ba9122SRob Herring 3357282f764SRob Herring mmu_hw_do_operation(priv->pfdev, &priv->mmu, 0, ~0UL, AS_COMMAND_FLUSH_MEM); 336f3ba9122SRob Herring } 337f3ba9122SRob Herring 338f3ba9122SRob Herring static void mmu_tlb_inv_range_nosync(unsigned long iova, size_t size, 339f3ba9122SRob Herring size_t granule, bool leaf, void *cookie) 340f3ba9122SRob Herring {} 341f3ba9122SRob Herring 342f3ba9122SRob Herring static void mmu_tlb_sync_context(void *cookie) 343f3ba9122SRob Herring { 344f3ba9122SRob Herring //struct panfrost_device *pfdev = cookie; 345f3ba9122SRob Herring // TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X 346f3ba9122SRob Herring } 347f3ba9122SRob Herring 348f3ba9122SRob Herring static const struct iommu_gather_ops mmu_tlb_ops = { 349f3ba9122SRob Herring .tlb_flush_all = mmu_tlb_inv_context_s1, 350f3ba9122SRob Herring .tlb_add_flush = mmu_tlb_inv_range_nosync, 351f3ba9122SRob Herring .tlb_sync = mmu_tlb_sync_context, 352f3ba9122SRob Herring }; 353f3ba9122SRob Herring 3547282f764SRob Herring int panfrost_mmu_pgtable_alloc(struct panfrost_file_priv *priv) 3557282f764SRob Herring { 3567282f764SRob Herring struct panfrost_mmu *mmu = &priv->mmu; 3577282f764SRob Herring struct panfrost_device *pfdev = priv->pfdev; 3587282f764SRob Herring 3597282f764SRob Herring mutex_init(&mmu->lock); 3607282f764SRob Herring INIT_LIST_HEAD(&mmu->list); 3617282f764SRob Herring mmu->as = -1; 3627282f764SRob Herring 3637282f764SRob Herring mmu->pgtbl_cfg = (struct io_pgtable_cfg) { 3647282f764SRob Herring .pgsize_bitmap = SZ_4K | SZ_2M, 3657282f764SRob Herring .ias = FIELD_GET(0xff, pfdev->features.mmu_features), 3667282f764SRob Herring .oas = FIELD_GET(0xff00, pfdev->features.mmu_features), 3677282f764SRob Herring .tlb = &mmu_tlb_ops, 3687282f764SRob Herring .iommu_dev = pfdev->dev, 3697282f764SRob Herring }; 3707282f764SRob Herring 3717282f764SRob Herring mmu->pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &mmu->pgtbl_cfg, 3727282f764SRob Herring priv); 3737282f764SRob Herring if (!mmu->pgtbl_ops) 3747282f764SRob Herring return -EINVAL; 3757282f764SRob Herring 3767282f764SRob Herring return 0; 3777282f764SRob Herring } 3787282f764SRob Herring 3797282f764SRob Herring void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv) 3807282f764SRob Herring { 3817282f764SRob Herring struct panfrost_device *pfdev = priv->pfdev; 3827282f764SRob Herring struct panfrost_mmu *mmu = &priv->mmu; 3837282f764SRob Herring 3847282f764SRob Herring spin_lock(&pfdev->as_lock); 3857282f764SRob Herring if (mmu->as >= 0) { 3867282f764SRob Herring clear_bit(mmu->as, &pfdev->as_alloc_mask); 3877282f764SRob Herring clear_bit(mmu->as, &pfdev->as_in_use_mask); 3887282f764SRob Herring list_del(&mmu->list); 3897282f764SRob Herring } 3907282f764SRob Herring spin_unlock(&pfdev->as_lock); 3917282f764SRob Herring 3927282f764SRob Herring free_io_pgtable_ops(mmu->pgtbl_ops); 3937282f764SRob Herring } 3947282f764SRob Herring 395187d2929SRob Herring static struct drm_mm_node *addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr) 396187d2929SRob Herring { 3977282f764SRob Herring struct drm_mm_node *node = NULL; 398187d2929SRob Herring u64 offset = addr >> PAGE_SHIFT; 3997282f764SRob Herring struct panfrost_mmu *mmu; 400187d2929SRob Herring 4017282f764SRob Herring spin_lock(&pfdev->as_lock); 4027282f764SRob Herring list_for_each_entry(mmu, &pfdev->as_lru_list, list) { 4037282f764SRob Herring struct panfrost_file_priv *priv; 4047282f764SRob Herring if (as != mmu->as) 4057282f764SRob Herring continue; 4067282f764SRob Herring 4077282f764SRob Herring priv = container_of(mmu, struct panfrost_file_priv, mmu); 4087282f764SRob Herring drm_mm_for_each_node(node, &priv->mm) { 409187d2929SRob Herring if (offset >= node->start && offset < (node->start + node->size)) 4107282f764SRob Herring goto out; 411187d2929SRob Herring } 4127282f764SRob Herring } 4137282f764SRob Herring 4147282f764SRob Herring out: 4157282f764SRob Herring spin_unlock(&pfdev->as_lock); 4167282f764SRob Herring return node; 417187d2929SRob Herring } 418187d2929SRob Herring 419187d2929SRob Herring #define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE) 420187d2929SRob Herring 421187d2929SRob Herring int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr) 422187d2929SRob Herring { 423187d2929SRob Herring int ret, i; 424187d2929SRob Herring struct drm_mm_node *node; 425187d2929SRob Herring struct panfrost_gem_object *bo; 426187d2929SRob Herring struct address_space *mapping; 427187d2929SRob Herring pgoff_t page_offset; 428187d2929SRob Herring struct sg_table *sgt; 429187d2929SRob Herring struct page **pages; 430187d2929SRob Herring 431187d2929SRob Herring node = addr_to_drm_mm_node(pfdev, as, addr); 432187d2929SRob Herring if (!node) 433187d2929SRob Herring return -ENOENT; 434187d2929SRob Herring 435187d2929SRob Herring bo = drm_mm_node_to_panfrost_bo(node); 436187d2929SRob Herring if (!bo->is_heap) { 437187d2929SRob Herring dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)", 438187d2929SRob Herring node->start << PAGE_SHIFT); 439187d2929SRob Herring return -EINVAL; 440187d2929SRob Herring } 4417282f764SRob Herring WARN_ON(bo->mmu->as != as); 4427282f764SRob Herring 443187d2929SRob Herring /* Assume 2MB alignment and size multiple */ 444187d2929SRob Herring addr &= ~((u64)SZ_2M - 1); 445187d2929SRob Herring page_offset = addr >> PAGE_SHIFT; 446187d2929SRob Herring page_offset -= node->start; 447187d2929SRob Herring 448187d2929SRob Herring mutex_lock(&bo->base.pages_lock); 449187d2929SRob Herring 450187d2929SRob Herring if (!bo->base.pages) { 451187d2929SRob Herring bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M, 452187d2929SRob Herring sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO); 4533efdf83cSWei Yongjun if (!bo->sgts) { 4543efdf83cSWei Yongjun mutex_unlock(&bo->base.pages_lock); 455187d2929SRob Herring return -ENOMEM; 4563efdf83cSWei Yongjun } 457187d2929SRob Herring 458187d2929SRob Herring pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT, 459187d2929SRob Herring sizeof(struct page *), GFP_KERNEL | __GFP_ZERO); 460187d2929SRob Herring if (!pages) { 461187d2929SRob Herring kfree(bo->sgts); 462187d2929SRob Herring bo->sgts = NULL; 4633efdf83cSWei Yongjun mutex_unlock(&bo->base.pages_lock); 464187d2929SRob Herring return -ENOMEM; 465187d2929SRob Herring } 466187d2929SRob Herring bo->base.pages = pages; 467187d2929SRob Herring bo->base.pages_use_count = 1; 468187d2929SRob Herring } else 469187d2929SRob Herring pages = bo->base.pages; 470187d2929SRob Herring 471187d2929SRob Herring mapping = bo->base.base.filp->f_mapping; 472187d2929SRob Herring mapping_set_unevictable(mapping); 473187d2929SRob Herring 474187d2929SRob Herring for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) { 475187d2929SRob Herring pages[i] = shmem_read_mapping_page(mapping, i); 476187d2929SRob Herring if (IS_ERR(pages[i])) { 477187d2929SRob Herring mutex_unlock(&bo->base.pages_lock); 478187d2929SRob Herring ret = PTR_ERR(pages[i]); 479187d2929SRob Herring goto err_pages; 480187d2929SRob Herring } 481187d2929SRob Herring } 482187d2929SRob Herring 483187d2929SRob Herring mutex_unlock(&bo->base.pages_lock); 484187d2929SRob Herring 485187d2929SRob Herring sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)]; 486187d2929SRob Herring ret = sg_alloc_table_from_pages(sgt, pages + page_offset, 487187d2929SRob Herring NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL); 488187d2929SRob Herring if (ret) 489187d2929SRob Herring goto err_pages; 490187d2929SRob Herring 491187d2929SRob Herring if (!dma_map_sg(pfdev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL)) { 492187d2929SRob Herring ret = -EINVAL; 493187d2929SRob Herring goto err_map; 494187d2929SRob Herring } 495187d2929SRob Herring 4967282f764SRob Herring mmu_map_sg(pfdev, bo->mmu, addr, IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt); 497187d2929SRob Herring 498187d2929SRob Herring bo->is_mapped = true; 499187d2929SRob Herring 5007282f764SRob Herring dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr); 501187d2929SRob Herring 502187d2929SRob Herring return 0; 503187d2929SRob Herring 504187d2929SRob Herring err_map: 505187d2929SRob Herring sg_free_table(sgt); 506187d2929SRob Herring err_pages: 507187d2929SRob Herring drm_gem_shmem_put_pages(&bo->base); 508187d2929SRob Herring return ret; 509187d2929SRob Herring } 510187d2929SRob Herring 511f3ba9122SRob Herring static const char *access_type_name(struct panfrost_device *pfdev, 512f3ba9122SRob Herring u32 fault_status) 513f3ba9122SRob Herring { 514f3ba9122SRob Herring switch (fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) { 515f3ba9122SRob Herring case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC: 516f3ba9122SRob Herring if (panfrost_has_hw_feature(pfdev, HW_FEATURE_AARCH64_MMU)) 517f3ba9122SRob Herring return "ATOMIC"; 518f3ba9122SRob Herring else 519f3ba9122SRob Herring return "UNKNOWN"; 520f3ba9122SRob Herring case AS_FAULTSTATUS_ACCESS_TYPE_READ: 521f3ba9122SRob Herring return "READ"; 522f3ba9122SRob Herring case AS_FAULTSTATUS_ACCESS_TYPE_WRITE: 523f3ba9122SRob Herring return "WRITE"; 524f3ba9122SRob Herring case AS_FAULTSTATUS_ACCESS_TYPE_EX: 525f3ba9122SRob Herring return "EXECUTE"; 526f3ba9122SRob Herring default: 527f3ba9122SRob Herring WARN_ON(1); 528f3ba9122SRob Herring return NULL; 529f3ba9122SRob Herring } 530f3ba9122SRob Herring } 531f3ba9122SRob Herring 532f3ba9122SRob Herring static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data) 533f3ba9122SRob Herring { 534f3ba9122SRob Herring struct panfrost_device *pfdev = data; 535f3ba9122SRob Herring 536b31bdd13SRob Herring if (!mmu_read(pfdev, MMU_INT_STAT)) 537f3ba9122SRob Herring return IRQ_NONE; 538f3ba9122SRob Herring 539b31bdd13SRob Herring mmu_write(pfdev, MMU_INT_MASK, 0); 540b31bdd13SRob Herring return IRQ_WAKE_THREAD; 541b31bdd13SRob Herring } 542b31bdd13SRob Herring 543b31bdd13SRob Herring static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data) 544b31bdd13SRob Herring { 545b31bdd13SRob Herring struct panfrost_device *pfdev = data; 546b31bdd13SRob Herring u32 status = mmu_read(pfdev, MMU_INT_RAWSTAT); 547187d2929SRob Herring int i, ret; 548f3ba9122SRob Herring 549f3ba9122SRob Herring for (i = 0; status; i++) { 550f3ba9122SRob Herring u32 mask = BIT(i) | BIT(i + 16); 551f3ba9122SRob Herring u64 addr; 552f3ba9122SRob Herring u32 fault_status; 553f3ba9122SRob Herring u32 exception_type; 554f3ba9122SRob Herring u32 access_type; 555f3ba9122SRob Herring u32 source_id; 556f3ba9122SRob Herring 557f3ba9122SRob Herring if (!(status & mask)) 558f3ba9122SRob Herring continue; 559f3ba9122SRob Herring 560f3ba9122SRob Herring fault_status = mmu_read(pfdev, AS_FAULTSTATUS(i)); 561f3ba9122SRob Herring addr = mmu_read(pfdev, AS_FAULTADDRESS_LO(i)); 562f3ba9122SRob Herring addr |= (u64)mmu_read(pfdev, AS_FAULTADDRESS_HI(i)) << 32; 563f3ba9122SRob Herring 564f3ba9122SRob Herring /* decode the fault status */ 565f3ba9122SRob Herring exception_type = fault_status & 0xFF; 566f3ba9122SRob Herring access_type = (fault_status >> 8) & 0x3; 567f3ba9122SRob Herring source_id = (fault_status >> 16); 568f3ba9122SRob Herring 569187d2929SRob Herring /* Page fault only */ 570187d2929SRob Herring if ((status & mask) == BIT(i)) { 571187d2929SRob Herring WARN_ON(exception_type < 0xC1 || exception_type > 0xC4); 572187d2929SRob Herring 573187d2929SRob Herring ret = panfrost_mmu_map_fault_addr(pfdev, i, addr); 574187d2929SRob Herring if (!ret) { 575187d2929SRob Herring mmu_write(pfdev, MMU_INT_CLEAR, BIT(i)); 576187d2929SRob Herring status &= ~mask; 577187d2929SRob Herring continue; 578187d2929SRob Herring } 579187d2929SRob Herring } 580187d2929SRob Herring 581f3ba9122SRob Herring /* terminal fault, print info about the fault */ 582f3ba9122SRob Herring dev_err(pfdev->dev, 583f3ba9122SRob Herring "Unhandled Page fault in AS%d at VA 0x%016llX\n" 584f3ba9122SRob Herring "Reason: %s\n" 585f3ba9122SRob Herring "raw fault status: 0x%X\n" 586f3ba9122SRob Herring "decoded fault status: %s\n" 587f3ba9122SRob Herring "exception type 0x%X: %s\n" 588f3ba9122SRob Herring "access type 0x%X: %s\n" 589f3ba9122SRob Herring "source id 0x%X\n", 590f3ba9122SRob Herring i, addr, 591f3ba9122SRob Herring "TODO", 592f3ba9122SRob Herring fault_status, 593f3ba9122SRob Herring (fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"), 594f3ba9122SRob Herring exception_type, panfrost_exception_name(pfdev, exception_type), 595f3ba9122SRob Herring access_type, access_type_name(pfdev, fault_status), 596f3ba9122SRob Herring source_id); 597f3ba9122SRob Herring 598f3ba9122SRob Herring mmu_write(pfdev, MMU_INT_CLEAR, mask); 599f3ba9122SRob Herring 600f3ba9122SRob Herring status &= ~mask; 601f3ba9122SRob Herring } 602f3ba9122SRob Herring 603b31bdd13SRob Herring mmu_write(pfdev, MMU_INT_MASK, ~0); 604f3ba9122SRob Herring return IRQ_HANDLED; 605f3ba9122SRob Herring }; 606f3ba9122SRob Herring 607f3ba9122SRob Herring int panfrost_mmu_init(struct panfrost_device *pfdev) 608f3ba9122SRob Herring { 609f3ba9122SRob Herring int err, irq; 610f3ba9122SRob Herring 611f3ba9122SRob Herring irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu"); 612f3ba9122SRob Herring if (irq <= 0) 613f3ba9122SRob Herring return -ENODEV; 614f3ba9122SRob Herring 615b31bdd13SRob Herring err = devm_request_threaded_irq(pfdev->dev, irq, panfrost_mmu_irq_handler, 616b31bdd13SRob Herring panfrost_mmu_irq_handler_thread, 617f3ba9122SRob Herring IRQF_SHARED, "mmu", pfdev); 618f3ba9122SRob Herring 619f3ba9122SRob Herring if (err) { 620f3ba9122SRob Herring dev_err(pfdev->dev, "failed to request mmu irq"); 621f3ba9122SRob Herring return err; 622f3ba9122SRob Herring } 623f3ba9122SRob Herring 624f3ba9122SRob Herring return 0; 625f3ba9122SRob Herring } 626f3ba9122SRob Herring 627f3ba9122SRob Herring void panfrost_mmu_fini(struct panfrost_device *pfdev) 628f3ba9122SRob Herring { 629f3ba9122SRob Herring mmu_write(pfdev, MMU_INT_MASK, 0); 630f3ba9122SRob Herring mmu_disable(pfdev, 0); 631f3ba9122SRob Herring } 632