1263b2ba5SJacek Lawrynowicz // SPDX-License-Identifier: GPL-2.0-only 2263b2ba5SJacek Lawrynowicz /* 3263b2ba5SJacek Lawrynowicz * Copyright (C) 2020-2023 Intel Corporation 4263b2ba5SJacek Lawrynowicz */ 5263b2ba5SJacek Lawrynowicz 6263b2ba5SJacek Lawrynowicz #include <linux/bitfield.h> 7263b2ba5SJacek Lawrynowicz #include <linux/highmem.h> 8263b2ba5SJacek Lawrynowicz 9263b2ba5SJacek Lawrynowicz #include "ivpu_drv.h" 10263b2ba5SJacek Lawrynowicz #include "ivpu_hw.h" 11263b2ba5SJacek Lawrynowicz #include "ivpu_mmu.h" 12263b2ba5SJacek Lawrynowicz #include "ivpu_mmu_context.h" 13263b2ba5SJacek Lawrynowicz 14*a2fd4a6fSKarol Wachowski #define IVPU_MMU_PGD_INDEX_MASK GENMASK(47, 39) 15*a2fd4a6fSKarol Wachowski #define IVPU_MMU_PUD_INDEX_MASK GENMASK(38, 30) 16263b2ba5SJacek Lawrynowicz #define IVPU_MMU_PMD_INDEX_MASK GENMASK(29, 21) 17263b2ba5SJacek Lawrynowicz #define IVPU_MMU_PTE_INDEX_MASK GENMASK(20, 12) 18263b2ba5SJacek Lawrynowicz #define IVPU_MMU_ENTRY_FLAGS_MASK GENMASK(11, 0) 19263b2ba5SJacek Lawrynowicz #define IVPU_MMU_ENTRY_FLAG_NG BIT(11) 20263b2ba5SJacek Lawrynowicz #define IVPU_MMU_ENTRY_FLAG_AF BIT(10) 21263b2ba5SJacek Lawrynowicz #define IVPU_MMU_ENTRY_FLAG_USER BIT(6) 22263b2ba5SJacek Lawrynowicz #define IVPU_MMU_ENTRY_FLAG_LLC_COHERENT BIT(2) 23263b2ba5SJacek Lawrynowicz #define IVPU_MMU_ENTRY_FLAG_TYPE_PAGE BIT(1) 24263b2ba5SJacek Lawrynowicz #define IVPU_MMU_ENTRY_FLAG_VALID BIT(0) 25263b2ba5SJacek Lawrynowicz 26263b2ba5SJacek Lawrynowicz #define IVPU_MMU_PAGE_SIZE SZ_4K 27263b2ba5SJacek Lawrynowicz #define IVPU_MMU_PTE_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PAGE_SIZE) 28263b2ba5SJacek Lawrynowicz #define IVPU_MMU_PMD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PTE_MAP_SIZE) 29*a2fd4a6fSKarol Wachowski #define IVPU_MMU_PUD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PMD_MAP_SIZE) 30*a2fd4a6fSKarol Wachowski #define IVPU_MMU_PGD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PUD_MAP_SIZE) 31263b2ba5SJacek Lawrynowicz #define IVPU_MMU_PGTABLE_SIZE (IVPU_MMU_PGTABLE_ENTRIES * sizeof(u64)) 32263b2ba5SJacek Lawrynowicz 33263b2ba5SJacek Lawrynowicz #define IVPU_MMU_DUMMY_ADDRESS 0xdeadb000 34263b2ba5SJacek Lawrynowicz #define IVPU_MMU_ENTRY_VALID (IVPU_MMU_ENTRY_FLAG_TYPE_PAGE | IVPU_MMU_ENTRY_FLAG_VALID) 35263b2ba5SJacek Lawrynowicz #define IVPU_MMU_ENTRY_INVALID (IVPU_MMU_DUMMY_ADDRESS & ~IVPU_MMU_ENTRY_FLAGS_MASK) 36263b2ba5SJacek Lawrynowicz #define IVPU_MMU_ENTRY_MAPPED (IVPU_MMU_ENTRY_FLAG_AF | IVPU_MMU_ENTRY_FLAG_USER | \ 37263b2ba5SJacek Lawrynowicz IVPU_MMU_ENTRY_FLAG_NG | IVPU_MMU_ENTRY_VALID) 38263b2ba5SJacek Lawrynowicz 39263b2ba5SJacek Lawrynowicz static int ivpu_mmu_pgtable_init(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable) 40263b2ba5SJacek Lawrynowicz { 41263b2ba5SJacek Lawrynowicz dma_addr_t pgd_dma; 42263b2ba5SJacek Lawrynowicz u64 *pgd; 43263b2ba5SJacek Lawrynowicz 44263b2ba5SJacek Lawrynowicz pgd = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pgd_dma, GFP_KERNEL); 45263b2ba5SJacek Lawrynowicz if (!pgd) 46263b2ba5SJacek Lawrynowicz return -ENOMEM; 47263b2ba5SJacek Lawrynowicz 48263b2ba5SJacek Lawrynowicz pgtable->pgd = pgd; 49263b2ba5SJacek Lawrynowicz pgtable->pgd_dma = pgd_dma; 50263b2ba5SJacek Lawrynowicz 51263b2ba5SJacek Lawrynowicz return 0; 52263b2ba5SJacek Lawrynowicz } 53263b2ba5SJacek Lawrynowicz 54263b2ba5SJacek Lawrynowicz static void ivpu_mmu_pgtable_free(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable) 55263b2ba5SJacek Lawrynowicz { 56*a2fd4a6fSKarol Wachowski int pgd_idx, pud_idx, pmd_idx; 57263b2ba5SJacek Lawrynowicz 58*a2fd4a6fSKarol Wachowski for (pgd_idx = 0; pgd_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pgd_idx) { 59*a2fd4a6fSKarol Wachowski u64 **pud_entries = pgtable->pgd_cpu_entries[pgd_idx]; 60*a2fd4a6fSKarol Wachowski u64 *pud = pgtable->pgd_entries[pgd_idx]; 61*a2fd4a6fSKarol Wachowski 62*a2fd4a6fSKarol Wachowski if (!pud_entries) 63*a2fd4a6fSKarol Wachowski continue; 64*a2fd4a6fSKarol Wachowski 65*a2fd4a6fSKarol Wachowski for (pud_idx = 0; pud_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pud_idx) { 66*a2fd4a6fSKarol Wachowski u64 **pmd_entries = pgtable->pgd_far_entries[pgd_idx][pud_idx]; 67*a2fd4a6fSKarol Wachowski u64 *pmd = pgtable->pgd_cpu_entries[pgd_idx][pud_idx]; 68263b2ba5SJacek Lawrynowicz 69263b2ba5SJacek Lawrynowicz if (!pmd_entries) 70263b2ba5SJacek Lawrynowicz continue; 71263b2ba5SJacek Lawrynowicz 72*a2fd4a6fSKarol Wachowski for (pmd_idx = 0; pmd_idx < IVPU_MMU_PGTABLE_ENTRIES; ++pmd_idx) { 73*a2fd4a6fSKarol Wachowski if (pmd_entries[pmd_idx]) 74263b2ba5SJacek Lawrynowicz dma_free_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, 75*a2fd4a6fSKarol Wachowski pmd_entries[pmd_idx], 76*a2fd4a6fSKarol Wachowski pmd[pmd_idx] & ~IVPU_MMU_ENTRY_FLAGS_MASK); 77263b2ba5SJacek Lawrynowicz } 78263b2ba5SJacek Lawrynowicz 79263b2ba5SJacek Lawrynowicz kfree(pmd_entries); 80*a2fd4a6fSKarol Wachowski dma_free_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, 81*a2fd4a6fSKarol Wachowski pud_entries[pud_idx], 82*a2fd4a6fSKarol Wachowski pud[pud_idx] & ~IVPU_MMU_ENTRY_FLAGS_MASK); 83*a2fd4a6fSKarol Wachowski } 84*a2fd4a6fSKarol Wachowski 85*a2fd4a6fSKarol Wachowski kfree(pud_entries); 86*a2fd4a6fSKarol Wachowski dma_free_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, pgtable->pgd_entries[pgd_idx], 87*a2fd4a6fSKarol Wachowski pgtable->pgd[pgd_idx] & ~IVPU_MMU_ENTRY_FLAGS_MASK); 88263b2ba5SJacek Lawrynowicz } 89263b2ba5SJacek Lawrynowicz 90263b2ba5SJacek Lawrynowicz dma_free_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, pgtable->pgd, 91263b2ba5SJacek Lawrynowicz pgtable->pgd_dma & ~IVPU_MMU_ENTRY_FLAGS_MASK); 92263b2ba5SJacek Lawrynowicz } 93263b2ba5SJacek Lawrynowicz 94263b2ba5SJacek Lawrynowicz static u64* 95*a2fd4a6fSKarol Wachowski ivpu_mmu_ensure_pud(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, int pgd_idx) 96*a2fd4a6fSKarol Wachowski { 97*a2fd4a6fSKarol Wachowski u64 ***far_pud_entries; 98*a2fd4a6fSKarol Wachowski u64 **pud_entries; 99*a2fd4a6fSKarol Wachowski dma_addr_t pud_dma; 100*a2fd4a6fSKarol Wachowski u64 *pud; 101*a2fd4a6fSKarol Wachowski 102*a2fd4a6fSKarol Wachowski if (pgtable->pgd_entries[pgd_idx]) 103*a2fd4a6fSKarol Wachowski return pgtable->pgd_entries[pgd_idx]; 104*a2fd4a6fSKarol Wachowski 105*a2fd4a6fSKarol Wachowski pud = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pud_dma, GFP_KERNEL); 106*a2fd4a6fSKarol Wachowski if (!pud) 107*a2fd4a6fSKarol Wachowski return NULL; 108*a2fd4a6fSKarol Wachowski 109*a2fd4a6fSKarol Wachowski pud_entries = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL); 110*a2fd4a6fSKarol Wachowski if (!pud_entries) 111*a2fd4a6fSKarol Wachowski goto err_free_pud; 112*a2fd4a6fSKarol Wachowski 113*a2fd4a6fSKarol Wachowski far_pud_entries = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL); 114*a2fd4a6fSKarol Wachowski if (!far_pud_entries) 115*a2fd4a6fSKarol Wachowski goto err_free_pud_entries; 116*a2fd4a6fSKarol Wachowski 117*a2fd4a6fSKarol Wachowski pgtable->pgd[pgd_idx] = pud_dma | IVPU_MMU_ENTRY_VALID; 118*a2fd4a6fSKarol Wachowski pgtable->pgd_entries[pgd_idx] = pud; 119*a2fd4a6fSKarol Wachowski pgtable->pgd_cpu_entries[pgd_idx] = pud_entries; 120*a2fd4a6fSKarol Wachowski pgtable->pgd_far_entries[pgd_idx] = far_pud_entries; 121*a2fd4a6fSKarol Wachowski 122*a2fd4a6fSKarol Wachowski return pud; 123*a2fd4a6fSKarol Wachowski 124*a2fd4a6fSKarol Wachowski err_free_pud_entries: 125*a2fd4a6fSKarol Wachowski kfree(pud_entries); 126*a2fd4a6fSKarol Wachowski 127*a2fd4a6fSKarol Wachowski err_free_pud: 128*a2fd4a6fSKarol Wachowski dma_free_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, pud, pud_dma); 129*a2fd4a6fSKarol Wachowski return NULL; 130*a2fd4a6fSKarol Wachowski } 131*a2fd4a6fSKarol Wachowski 132*a2fd4a6fSKarol Wachowski static u64* 133*a2fd4a6fSKarol Wachowski ivpu_mmu_ensure_pmd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, 134*a2fd4a6fSKarol Wachowski int pgd_idx, int pud_idx) 135263b2ba5SJacek Lawrynowicz { 136263b2ba5SJacek Lawrynowicz u64 **pmd_entries; 137263b2ba5SJacek Lawrynowicz dma_addr_t pmd_dma; 138263b2ba5SJacek Lawrynowicz u64 *pmd; 139263b2ba5SJacek Lawrynowicz 140*a2fd4a6fSKarol Wachowski if (pgtable->pgd_cpu_entries[pgd_idx][pud_idx]) 141*a2fd4a6fSKarol Wachowski return pgtable->pgd_cpu_entries[pgd_idx][pud_idx]; 142263b2ba5SJacek Lawrynowicz 143263b2ba5SJacek Lawrynowicz pmd = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pmd_dma, GFP_KERNEL); 144263b2ba5SJacek Lawrynowicz if (!pmd) 145263b2ba5SJacek Lawrynowicz return NULL; 146263b2ba5SJacek Lawrynowicz 147263b2ba5SJacek Lawrynowicz pmd_entries = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL); 148263b2ba5SJacek Lawrynowicz if (!pmd_entries) 149*a2fd4a6fSKarol Wachowski goto err_free_pmd; 150263b2ba5SJacek Lawrynowicz 151*a2fd4a6fSKarol Wachowski pgtable->pgd_entries[pgd_idx][pud_idx] = pmd_dma | IVPU_MMU_ENTRY_VALID; 152*a2fd4a6fSKarol Wachowski pgtable->pgd_cpu_entries[pgd_idx][pud_idx] = pmd; 153*a2fd4a6fSKarol Wachowski pgtable->pgd_far_entries[pgd_idx][pud_idx] = pmd_entries; 154263b2ba5SJacek Lawrynowicz 155263b2ba5SJacek Lawrynowicz return pmd; 156263b2ba5SJacek Lawrynowicz 157*a2fd4a6fSKarol Wachowski err_free_pmd: 158263b2ba5SJacek Lawrynowicz dma_free_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, pmd, pmd_dma); 159263b2ba5SJacek Lawrynowicz return NULL; 160263b2ba5SJacek Lawrynowicz } 161263b2ba5SJacek Lawrynowicz 162263b2ba5SJacek Lawrynowicz static u64* 163263b2ba5SJacek Lawrynowicz ivpu_mmu_ensure_pte(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, 164*a2fd4a6fSKarol Wachowski int pgd_idx, int pud_idx, int pmd_idx) 165263b2ba5SJacek Lawrynowicz { 166263b2ba5SJacek Lawrynowicz dma_addr_t pte_dma; 167263b2ba5SJacek Lawrynowicz u64 *pte; 168263b2ba5SJacek Lawrynowicz 169*a2fd4a6fSKarol Wachowski if (pgtable->pgd_far_entries[pgd_idx][pud_idx][pmd_idx]) 170*a2fd4a6fSKarol Wachowski return pgtable->pgd_far_entries[pgd_idx][pud_idx][pmd_idx]; 171263b2ba5SJacek Lawrynowicz 172263b2ba5SJacek Lawrynowicz pte = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pte_dma, GFP_KERNEL); 173263b2ba5SJacek Lawrynowicz if (!pte) 174263b2ba5SJacek Lawrynowicz return NULL; 175263b2ba5SJacek Lawrynowicz 176*a2fd4a6fSKarol Wachowski pgtable->pgd_cpu_entries[pgd_idx][pud_idx][pmd_idx] = pte_dma | IVPU_MMU_ENTRY_VALID; 177*a2fd4a6fSKarol Wachowski pgtable->pgd_far_entries[pgd_idx][pud_idx][pmd_idx] = pte; 178263b2ba5SJacek Lawrynowicz 179263b2ba5SJacek Lawrynowicz return pte; 180263b2ba5SJacek Lawrynowicz } 181263b2ba5SJacek Lawrynowicz 182263b2ba5SJacek Lawrynowicz static int 183263b2ba5SJacek Lawrynowicz ivpu_mmu_context_map_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, 184263b2ba5SJacek Lawrynowicz u64 vpu_addr, dma_addr_t dma_addr, int prot) 185263b2ba5SJacek Lawrynowicz { 186263b2ba5SJacek Lawrynowicz u64 *pte; 187*a2fd4a6fSKarol Wachowski int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr); 188*a2fd4a6fSKarol Wachowski int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr); 189*a2fd4a6fSKarol Wachowski int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr); 190*a2fd4a6fSKarol Wachowski int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr); 191*a2fd4a6fSKarol Wachowski 192*a2fd4a6fSKarol Wachowski /* Allocate PUD - first level page table if needed */ 193*a2fd4a6fSKarol Wachowski if (!ivpu_mmu_ensure_pud(vdev, &ctx->pgtable, pgd_idx)) 194*a2fd4a6fSKarol Wachowski return -ENOMEM; 195263b2ba5SJacek Lawrynowicz 196263b2ba5SJacek Lawrynowicz /* Allocate PMD - second level page table if needed */ 197*a2fd4a6fSKarol Wachowski if (!ivpu_mmu_ensure_pmd(vdev, &ctx->pgtable, pgd_idx, pud_idx)) 198263b2ba5SJacek Lawrynowicz return -ENOMEM; 199263b2ba5SJacek Lawrynowicz 200263b2ba5SJacek Lawrynowicz /* Allocate PTE - third level page table if needed */ 201*a2fd4a6fSKarol Wachowski pte = ivpu_mmu_ensure_pte(vdev, &ctx->pgtable, pgd_idx, pud_idx, pmd_idx); 202263b2ba5SJacek Lawrynowicz if (!pte) 203263b2ba5SJacek Lawrynowicz return -ENOMEM; 204263b2ba5SJacek Lawrynowicz 205263b2ba5SJacek Lawrynowicz /* Update PTE - third level page table with DMA address */ 206*a2fd4a6fSKarol Wachowski pte[pte_idx] = dma_addr | prot; 207263b2ba5SJacek Lawrynowicz 208263b2ba5SJacek Lawrynowicz return 0; 209263b2ba5SJacek Lawrynowicz } 210263b2ba5SJacek Lawrynowicz 211263b2ba5SJacek Lawrynowicz static void ivpu_mmu_context_unmap_page(struct ivpu_mmu_context *ctx, u64 vpu_addr) 212263b2ba5SJacek Lawrynowicz { 213*a2fd4a6fSKarol Wachowski int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr); 214*a2fd4a6fSKarol Wachowski int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr); 215*a2fd4a6fSKarol Wachowski int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr); 216*a2fd4a6fSKarol Wachowski int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr); 217263b2ba5SJacek Lawrynowicz 218263b2ba5SJacek Lawrynowicz /* Update PTE with dummy physical address and clear flags */ 219*a2fd4a6fSKarol Wachowski ctx->pgtable.pgd_far_entries[pgd_idx][pud_idx][pmd_idx][pte_idx] = IVPU_MMU_ENTRY_INVALID; 220263b2ba5SJacek Lawrynowicz } 221263b2ba5SJacek Lawrynowicz 222263b2ba5SJacek Lawrynowicz static void 223263b2ba5SJacek Lawrynowicz ivpu_mmu_context_flush_page_tables(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size) 224263b2ba5SJacek Lawrynowicz { 225263b2ba5SJacek Lawrynowicz u64 end_addr = vpu_addr + size; 226263b2ba5SJacek Lawrynowicz u64 *pgd = ctx->pgtable.pgd; 227263b2ba5SJacek Lawrynowicz 228263b2ba5SJacek Lawrynowicz /* Align to PMD entry (2 MB) */ 229263b2ba5SJacek Lawrynowicz vpu_addr &= ~(IVPU_MMU_PTE_MAP_SIZE - 1); 230263b2ba5SJacek Lawrynowicz while (vpu_addr < end_addr) { 231*a2fd4a6fSKarol Wachowski int pgd_idx = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr); 232*a2fd4a6fSKarol Wachowski u64 pud_end = (pgd_idx + 1) * (u64)IVPU_MMU_PUD_MAP_SIZE; 233*a2fd4a6fSKarol Wachowski u64 *pud = ctx->pgtable.pgd_entries[pgd_idx]; 234*a2fd4a6fSKarol Wachowski 235*a2fd4a6fSKarol Wachowski while (vpu_addr < end_addr && vpu_addr < pud_end) { 236*a2fd4a6fSKarol Wachowski int pud_idx = FIELD_GET(IVPU_MMU_PUD_INDEX_MASK, vpu_addr); 237*a2fd4a6fSKarol Wachowski u64 pmd_end = (pud_idx + 1) * (u64)IVPU_MMU_PMD_MAP_SIZE; 238*a2fd4a6fSKarol Wachowski u64 *pmd = ctx->pgtable.pgd_cpu_entries[pgd_idx][pud_idx]; 239263b2ba5SJacek Lawrynowicz 240263b2ba5SJacek Lawrynowicz while (vpu_addr < end_addr && vpu_addr < pmd_end) { 241*a2fd4a6fSKarol Wachowski int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr); 242*a2fd4a6fSKarol Wachowski u64 *pte = ctx->pgtable.pgd_far_entries 243*a2fd4a6fSKarol Wachowski [pgd_idx][pud_idx][pmd_idx]; 244263b2ba5SJacek Lawrynowicz 245263b2ba5SJacek Lawrynowicz clflush_cache_range(pte, IVPU_MMU_PGTABLE_SIZE); 246263b2ba5SJacek Lawrynowicz vpu_addr += IVPU_MMU_PTE_MAP_SIZE; 247263b2ba5SJacek Lawrynowicz } 248263b2ba5SJacek Lawrynowicz clflush_cache_range(pmd, IVPU_MMU_PGTABLE_SIZE); 249263b2ba5SJacek Lawrynowicz } 250*a2fd4a6fSKarol Wachowski clflush_cache_range(pud, IVPU_MMU_PGTABLE_SIZE); 251*a2fd4a6fSKarol Wachowski } 252263b2ba5SJacek Lawrynowicz clflush_cache_range(pgd, IVPU_MMU_PGTABLE_SIZE); 253263b2ba5SJacek Lawrynowicz } 254263b2ba5SJacek Lawrynowicz 255263b2ba5SJacek Lawrynowicz static int 256263b2ba5SJacek Lawrynowicz ivpu_mmu_context_map_pages(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, 257263b2ba5SJacek Lawrynowicz u64 vpu_addr, dma_addr_t dma_addr, size_t size, int prot) 258263b2ba5SJacek Lawrynowicz { 259263b2ba5SJacek Lawrynowicz while (size) { 260263b2ba5SJacek Lawrynowicz int ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot); 261263b2ba5SJacek Lawrynowicz 262263b2ba5SJacek Lawrynowicz if (ret) 263263b2ba5SJacek Lawrynowicz return ret; 264263b2ba5SJacek Lawrynowicz 265263b2ba5SJacek Lawrynowicz vpu_addr += IVPU_MMU_PAGE_SIZE; 266263b2ba5SJacek Lawrynowicz dma_addr += IVPU_MMU_PAGE_SIZE; 267263b2ba5SJacek Lawrynowicz size -= IVPU_MMU_PAGE_SIZE; 268263b2ba5SJacek Lawrynowicz } 269263b2ba5SJacek Lawrynowicz 270263b2ba5SJacek Lawrynowicz return 0; 271263b2ba5SJacek Lawrynowicz } 272263b2ba5SJacek Lawrynowicz 273263b2ba5SJacek Lawrynowicz static void ivpu_mmu_context_unmap_pages(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size) 274263b2ba5SJacek Lawrynowicz { 275263b2ba5SJacek Lawrynowicz while (size) { 276263b2ba5SJacek Lawrynowicz ivpu_mmu_context_unmap_page(ctx, vpu_addr); 277263b2ba5SJacek Lawrynowicz vpu_addr += IVPU_MMU_PAGE_SIZE; 278263b2ba5SJacek Lawrynowicz size -= IVPU_MMU_PAGE_SIZE; 279263b2ba5SJacek Lawrynowicz } 280263b2ba5SJacek Lawrynowicz } 281263b2ba5SJacek Lawrynowicz 282263b2ba5SJacek Lawrynowicz int 283263b2ba5SJacek Lawrynowicz ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, 284263b2ba5SJacek Lawrynowicz u64 vpu_addr, struct sg_table *sgt, bool llc_coherent) 285263b2ba5SJacek Lawrynowicz { 286263b2ba5SJacek Lawrynowicz struct scatterlist *sg; 287263b2ba5SJacek Lawrynowicz int prot; 288263b2ba5SJacek Lawrynowicz int ret; 289263b2ba5SJacek Lawrynowicz u64 i; 290263b2ba5SJacek Lawrynowicz 291263b2ba5SJacek Lawrynowicz if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE)) 292263b2ba5SJacek Lawrynowicz return -EINVAL; 293263b2ba5SJacek Lawrynowicz /* 294263b2ba5SJacek Lawrynowicz * VPU is only 32 bit, but DMA engine is 38 bit 295263b2ba5SJacek Lawrynowicz * Ranges < 2 GB are reserved for VPU internal registers 296263b2ba5SJacek Lawrynowicz * Limit range to 8 GB 297263b2ba5SJacek Lawrynowicz */ 298263b2ba5SJacek Lawrynowicz if (vpu_addr < SZ_2G || vpu_addr > SZ_8G) 299263b2ba5SJacek Lawrynowicz return -EINVAL; 300263b2ba5SJacek Lawrynowicz 301263b2ba5SJacek Lawrynowicz prot = IVPU_MMU_ENTRY_MAPPED; 302263b2ba5SJacek Lawrynowicz if (llc_coherent) 303263b2ba5SJacek Lawrynowicz prot |= IVPU_MMU_ENTRY_FLAG_LLC_COHERENT; 304263b2ba5SJacek Lawrynowicz 305263b2ba5SJacek Lawrynowicz mutex_lock(&ctx->lock); 306263b2ba5SJacek Lawrynowicz 307263b2ba5SJacek Lawrynowicz for_each_sgtable_dma_sg(sgt, sg, i) { 308263b2ba5SJacek Lawrynowicz u64 dma_addr = sg_dma_address(sg) - sg->offset; 309263b2ba5SJacek Lawrynowicz size_t size = sg_dma_len(sg) + sg->offset; 310263b2ba5SJacek Lawrynowicz 311263b2ba5SJacek Lawrynowicz ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot); 312263b2ba5SJacek Lawrynowicz if (ret) { 313263b2ba5SJacek Lawrynowicz ivpu_err(vdev, "Failed to map context pages\n"); 314263b2ba5SJacek Lawrynowicz mutex_unlock(&ctx->lock); 315263b2ba5SJacek Lawrynowicz return ret; 316263b2ba5SJacek Lawrynowicz } 317263b2ba5SJacek Lawrynowicz ivpu_mmu_context_flush_page_tables(ctx, vpu_addr, size); 318263b2ba5SJacek Lawrynowicz vpu_addr += size; 319263b2ba5SJacek Lawrynowicz } 320263b2ba5SJacek Lawrynowicz 321263b2ba5SJacek Lawrynowicz mutex_unlock(&ctx->lock); 322263b2ba5SJacek Lawrynowicz 323263b2ba5SJacek Lawrynowicz ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id); 324263b2ba5SJacek Lawrynowicz if (ret) 325263b2ba5SJacek Lawrynowicz ivpu_err(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret); 326263b2ba5SJacek Lawrynowicz return ret; 327263b2ba5SJacek Lawrynowicz } 328263b2ba5SJacek Lawrynowicz 329263b2ba5SJacek Lawrynowicz void 330263b2ba5SJacek Lawrynowicz ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, 331263b2ba5SJacek Lawrynowicz u64 vpu_addr, struct sg_table *sgt) 332263b2ba5SJacek Lawrynowicz { 333263b2ba5SJacek Lawrynowicz struct scatterlist *sg; 334263b2ba5SJacek Lawrynowicz int ret; 335263b2ba5SJacek Lawrynowicz u64 i; 336263b2ba5SJacek Lawrynowicz 337263b2ba5SJacek Lawrynowicz if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE)) 338263b2ba5SJacek Lawrynowicz ivpu_warn(vdev, "Unaligned vpu_addr: 0x%llx\n", vpu_addr); 339263b2ba5SJacek Lawrynowicz 340263b2ba5SJacek Lawrynowicz mutex_lock(&ctx->lock); 341263b2ba5SJacek Lawrynowicz 342263b2ba5SJacek Lawrynowicz for_each_sgtable_dma_sg(sgt, sg, i) { 343263b2ba5SJacek Lawrynowicz size_t size = sg_dma_len(sg) + sg->offset; 344263b2ba5SJacek Lawrynowicz 345263b2ba5SJacek Lawrynowicz ivpu_mmu_context_unmap_pages(ctx, vpu_addr, size); 346263b2ba5SJacek Lawrynowicz ivpu_mmu_context_flush_page_tables(ctx, vpu_addr, size); 347263b2ba5SJacek Lawrynowicz vpu_addr += size; 348263b2ba5SJacek Lawrynowicz } 349263b2ba5SJacek Lawrynowicz 350263b2ba5SJacek Lawrynowicz mutex_unlock(&ctx->lock); 351263b2ba5SJacek Lawrynowicz 352263b2ba5SJacek Lawrynowicz ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id); 353263b2ba5SJacek Lawrynowicz if (ret) 354263b2ba5SJacek Lawrynowicz ivpu_warn(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret); 355263b2ba5SJacek Lawrynowicz } 356263b2ba5SJacek Lawrynowicz 357263b2ba5SJacek Lawrynowicz int 358263b2ba5SJacek Lawrynowicz ivpu_mmu_context_insert_node_locked(struct ivpu_mmu_context *ctx, 359263b2ba5SJacek Lawrynowicz const struct ivpu_addr_range *range, 360263b2ba5SJacek Lawrynowicz u64 size, struct drm_mm_node *node) 361263b2ba5SJacek Lawrynowicz { 362263b2ba5SJacek Lawrynowicz lockdep_assert_held(&ctx->lock); 363263b2ba5SJacek Lawrynowicz 364263b2ba5SJacek Lawrynowicz return drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_PAGE_SIZE, 365263b2ba5SJacek Lawrynowicz 0, range->start, range->end, DRM_MM_INSERT_BEST); 366263b2ba5SJacek Lawrynowicz } 367263b2ba5SJacek Lawrynowicz 368263b2ba5SJacek Lawrynowicz void 369263b2ba5SJacek Lawrynowicz ivpu_mmu_context_remove_node_locked(struct ivpu_mmu_context *ctx, struct drm_mm_node *node) 370263b2ba5SJacek Lawrynowicz { 371263b2ba5SJacek Lawrynowicz lockdep_assert_held(&ctx->lock); 372263b2ba5SJacek Lawrynowicz 373263b2ba5SJacek Lawrynowicz drm_mm_remove_node(node); 374263b2ba5SJacek Lawrynowicz } 375263b2ba5SJacek Lawrynowicz 376263b2ba5SJacek Lawrynowicz static int 377263b2ba5SJacek Lawrynowicz ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 context_id) 378263b2ba5SJacek Lawrynowicz { 379263b2ba5SJacek Lawrynowicz u64 start, end; 380263b2ba5SJacek Lawrynowicz int ret; 381263b2ba5SJacek Lawrynowicz 382263b2ba5SJacek Lawrynowicz mutex_init(&ctx->lock); 383263b2ba5SJacek Lawrynowicz INIT_LIST_HEAD(&ctx->bo_list); 384263b2ba5SJacek Lawrynowicz 385263b2ba5SJacek Lawrynowicz ret = ivpu_mmu_pgtable_init(vdev, &ctx->pgtable); 386263b2ba5SJacek Lawrynowicz if (ret) 387263b2ba5SJacek Lawrynowicz return ret; 388263b2ba5SJacek Lawrynowicz 389263b2ba5SJacek Lawrynowicz if (!context_id) { 390263b2ba5SJacek Lawrynowicz start = vdev->hw->ranges.global_low.start; 391263b2ba5SJacek Lawrynowicz end = vdev->hw->ranges.global_high.end; 392263b2ba5SJacek Lawrynowicz } else { 393263b2ba5SJacek Lawrynowicz start = vdev->hw->ranges.user_low.start; 394263b2ba5SJacek Lawrynowicz end = vdev->hw->ranges.user_high.end; 395263b2ba5SJacek Lawrynowicz } 396263b2ba5SJacek Lawrynowicz 397263b2ba5SJacek Lawrynowicz drm_mm_init(&ctx->mm, start, end - start); 398263b2ba5SJacek Lawrynowicz ctx->id = context_id; 399263b2ba5SJacek Lawrynowicz 400263b2ba5SJacek Lawrynowicz return 0; 401263b2ba5SJacek Lawrynowicz } 402263b2ba5SJacek Lawrynowicz 403263b2ba5SJacek Lawrynowicz static void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx) 404263b2ba5SJacek Lawrynowicz { 405263b2ba5SJacek Lawrynowicz drm_WARN_ON(&vdev->drm, !ctx->pgtable.pgd); 406263b2ba5SJacek Lawrynowicz 407263b2ba5SJacek Lawrynowicz mutex_destroy(&ctx->lock); 408263b2ba5SJacek Lawrynowicz ivpu_mmu_pgtable_free(vdev, &ctx->pgtable); 409263b2ba5SJacek Lawrynowicz drm_mm_takedown(&ctx->mm); 410263b2ba5SJacek Lawrynowicz } 411263b2ba5SJacek Lawrynowicz 412263b2ba5SJacek Lawrynowicz int ivpu_mmu_global_context_init(struct ivpu_device *vdev) 413263b2ba5SJacek Lawrynowicz { 414263b2ba5SJacek Lawrynowicz return ivpu_mmu_context_init(vdev, &vdev->gctx, IVPU_GLOBAL_CONTEXT_MMU_SSID); 415263b2ba5SJacek Lawrynowicz } 416263b2ba5SJacek Lawrynowicz 417263b2ba5SJacek Lawrynowicz void ivpu_mmu_global_context_fini(struct ivpu_device *vdev) 418263b2ba5SJacek Lawrynowicz { 419263b2ba5SJacek Lawrynowicz return ivpu_mmu_context_fini(vdev, &vdev->gctx); 420263b2ba5SJacek Lawrynowicz } 421263b2ba5SJacek Lawrynowicz 422263b2ba5SJacek Lawrynowicz void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid) 423263b2ba5SJacek Lawrynowicz { 424263b2ba5SJacek Lawrynowicz struct ivpu_file_priv *file_priv; 425263b2ba5SJacek Lawrynowicz 426263b2ba5SJacek Lawrynowicz xa_lock(&vdev->context_xa); 427263b2ba5SJacek Lawrynowicz 428263b2ba5SJacek Lawrynowicz file_priv = xa_load(&vdev->context_xa, ssid); 429263b2ba5SJacek Lawrynowicz if (file_priv) 430263b2ba5SJacek Lawrynowicz file_priv->has_mmu_faults = true; 431263b2ba5SJacek Lawrynowicz 432263b2ba5SJacek Lawrynowicz xa_unlock(&vdev->context_xa); 433263b2ba5SJacek Lawrynowicz } 434263b2ba5SJacek Lawrynowicz 435263b2ba5SJacek Lawrynowicz int ivpu_mmu_user_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 ctx_id) 436263b2ba5SJacek Lawrynowicz { 437263b2ba5SJacek Lawrynowicz int ret; 438263b2ba5SJacek Lawrynowicz 439263b2ba5SJacek Lawrynowicz drm_WARN_ON(&vdev->drm, !ctx_id); 440263b2ba5SJacek Lawrynowicz 441263b2ba5SJacek Lawrynowicz ret = ivpu_mmu_context_init(vdev, ctx, ctx_id); 442263b2ba5SJacek Lawrynowicz if (ret) { 443263b2ba5SJacek Lawrynowicz ivpu_err(vdev, "Failed to initialize context: %d\n", ret); 444263b2ba5SJacek Lawrynowicz return ret; 445263b2ba5SJacek Lawrynowicz } 446263b2ba5SJacek Lawrynowicz 447263b2ba5SJacek Lawrynowicz ret = ivpu_mmu_set_pgtable(vdev, ctx_id, &ctx->pgtable); 448263b2ba5SJacek Lawrynowicz if (ret) { 449263b2ba5SJacek Lawrynowicz ivpu_err(vdev, "Failed to set page table: %d\n", ret); 450263b2ba5SJacek Lawrynowicz goto err_context_fini; 451263b2ba5SJacek Lawrynowicz } 452263b2ba5SJacek Lawrynowicz 453263b2ba5SJacek Lawrynowicz return 0; 454263b2ba5SJacek Lawrynowicz 455263b2ba5SJacek Lawrynowicz err_context_fini: 456263b2ba5SJacek Lawrynowicz ivpu_mmu_context_fini(vdev, ctx); 457263b2ba5SJacek Lawrynowicz return ret; 458263b2ba5SJacek Lawrynowicz } 459263b2ba5SJacek Lawrynowicz 460263b2ba5SJacek Lawrynowicz void ivpu_mmu_user_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx) 461263b2ba5SJacek Lawrynowicz { 462263b2ba5SJacek Lawrynowicz drm_WARN_ON(&vdev->drm, !ctx->id); 463263b2ba5SJacek Lawrynowicz 464263b2ba5SJacek Lawrynowicz ivpu_mmu_clear_pgtable(vdev, ctx->id); 465263b2ba5SJacek Lawrynowicz ivpu_mmu_context_fini(vdev, ctx); 466263b2ba5SJacek Lawrynowicz } 467