12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2a39af6f7SJonas Bonn /*
3a39af6f7SJonas Bonn * OpenRISC Linux
4a39af6f7SJonas Bonn *
5a39af6f7SJonas Bonn * Linux architectural port borrowing liberally from similar works of
6a39af6f7SJonas Bonn * others. All original copyrights apply as per the original source
7a39af6f7SJonas Bonn * declaration.
8a39af6f7SJonas Bonn *
9a39af6f7SJonas Bonn * Modifications for the OpenRISC architecture:
10a39af6f7SJonas Bonn * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
11a39af6f7SJonas Bonn * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
12a39af6f7SJonas Bonn *
13a39af6f7SJonas Bonn * DMA mapping callbacks...
14a39af6f7SJonas Bonn */
15a39af6f7SJonas Bonn
169f4df96bSChristoph Hellwig #include <linux/dma-map-ops.h>
17a520110eSChristoph Hellwig #include <linux/pagewalk.h>
18a39af6f7SJonas Bonn
19a39af6f7SJonas Bonn #include <asm/cpuinfo.h>
20a39af6f7SJonas Bonn #include <asm/spr_defs.h>
21a39af6f7SJonas Bonn #include <asm/tlbflush.h>
22a39af6f7SJonas Bonn
237b903e6cSJonas Bonn static int
page_set_nocache(pte_t * pte,unsigned long addr,unsigned long next,struct mm_walk * walk)247b903e6cSJonas Bonn page_set_nocache(pte_t *pte, unsigned long addr,
25a39af6f7SJonas Bonn unsigned long next, struct mm_walk *walk)
26a39af6f7SJonas Bonn {
27a39af6f7SJonas Bonn unsigned long cl;
288e6d08e0SStefan Kristiansson struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
29a39af6f7SJonas Bonn
30a39af6f7SJonas Bonn pte_val(*pte) |= _PAGE_CI;
31a39af6f7SJonas Bonn
32a39af6f7SJonas Bonn /*
33a39af6f7SJonas Bonn * Flush the page out of the TLB so that the new page flags get
34a39af6f7SJonas Bonn * picked up next time there's an access
35a39af6f7SJonas Bonn */
3627dff9a9SStafford Horne flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
37a39af6f7SJonas Bonn
38a39af6f7SJonas Bonn /* Flush page out of dcache */
398e6d08e0SStefan Kristiansson for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size)
40a39af6f7SJonas Bonn mtspr(SPR_DCBFR, cl);
41a39af6f7SJonas Bonn
42a39af6f7SJonas Bonn return 0;
43a39af6f7SJonas Bonn }
44a39af6f7SJonas Bonn
457b86ac33SChristoph Hellwig static const struct mm_walk_ops set_nocache_walk_ops = {
467b86ac33SChristoph Hellwig .pte_entry = page_set_nocache,
477b86ac33SChristoph Hellwig };
487b86ac33SChristoph Hellwig
497b903e6cSJonas Bonn static int
page_clear_nocache(pte_t * pte,unsigned long addr,unsigned long next,struct mm_walk * walk)507b903e6cSJonas Bonn page_clear_nocache(pte_t *pte, unsigned long addr,
51a39af6f7SJonas Bonn unsigned long next, struct mm_walk *walk)
52a39af6f7SJonas Bonn {
53a39af6f7SJonas Bonn pte_val(*pte) &= ~_PAGE_CI;
54a39af6f7SJonas Bonn
55a39af6f7SJonas Bonn /*
56a39af6f7SJonas Bonn * Flush the page out of the TLB so that the new page flags get
57a39af6f7SJonas Bonn * picked up next time there's an access
58a39af6f7SJonas Bonn */
5927dff9a9SStafford Horne flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
60a39af6f7SJonas Bonn
61a39af6f7SJonas Bonn return 0;
62a39af6f7SJonas Bonn }
63a39af6f7SJonas Bonn
647b86ac33SChristoph Hellwig static const struct mm_walk_ops clear_nocache_walk_ops = {
657b86ac33SChristoph Hellwig .pte_entry = page_clear_nocache,
667b86ac33SChristoph Hellwig };
677b86ac33SChristoph Hellwig
arch_dma_set_uncached(void * cpu_addr,size_t size)68a4a4d11aSChristoph Hellwig void *arch_dma_set_uncached(void *cpu_addr, size_t size)
69a39af6f7SJonas Bonn {
70a4a4d11aSChristoph Hellwig unsigned long va = (unsigned long)cpu_addr;
71a4a4d11aSChristoph Hellwig int error;
72a39af6f7SJonas Bonn
73a39af6f7SJonas Bonn /*
74a39af6f7SJonas Bonn * We need to iterate through the pages, clearing the dcache for
75a39af6f7SJonas Bonn * them and setting the cache-inhibit bit.
76a39af6f7SJonas Bonn */
77*28148a17SJann Horn mmap_write_lock(&init_mm);
78*28148a17SJann Horn error = walk_page_range_novma(&init_mm, va, va + size,
79*28148a17SJann Horn &set_nocache_walk_ops, NULL, NULL);
80*28148a17SJann Horn mmap_write_unlock(&init_mm);
81313a5257SStafford Horne
82a4a4d11aSChristoph Hellwig if (error)
83a4a4d11aSChristoph Hellwig return ERR_PTR(error);
84a4a4d11aSChristoph Hellwig return cpu_addr;
85a39af6f7SJonas Bonn }
86a39af6f7SJonas Bonn
arch_dma_clear_uncached(void * cpu_addr,size_t size)87a4a4d11aSChristoph Hellwig void arch_dma_clear_uncached(void *cpu_addr, size_t size)
88a39af6f7SJonas Bonn {
89a4a4d11aSChristoph Hellwig unsigned long va = (unsigned long)cpu_addr;
90a39af6f7SJonas Bonn
91*28148a17SJann Horn mmap_write_lock(&init_mm);
92a39af6f7SJonas Bonn /* walk_page_range shouldn't be able to fail here */
93*28148a17SJann Horn WARN_ON(walk_page_range_novma(&init_mm, va, va + size,
94*28148a17SJann Horn &clear_nocache_walk_ops, NULL, NULL));
95*28148a17SJann Horn mmap_write_unlock(&init_mm);
96a39af6f7SJonas Bonn }
97a39af6f7SJonas Bonn
arch_sync_dma_for_device(phys_addr_t addr,size_t size,enum dma_data_direction dir)9856e35f9cSChristoph Hellwig void arch_sync_dma_for_device(phys_addr_t addr, size_t size,
992c1de929SChristoph Hellwig enum dma_data_direction dir)
100a39af6f7SJonas Bonn {
101a39af6f7SJonas Bonn unsigned long cl;
1028e6d08e0SStefan Kristiansson struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
103a39af6f7SJonas Bonn
104a39af6f7SJonas Bonn switch (dir) {
105a39af6f7SJonas Bonn case DMA_TO_DEVICE:
106a39af6f7SJonas Bonn /* Flush the dcache for the requested range */
107a39af6f7SJonas Bonn for (cl = addr; cl < addr + size;
1088e6d08e0SStefan Kristiansson cl += cpuinfo->dcache_block_size)
109a39af6f7SJonas Bonn mtspr(SPR_DCBFR, cl);
110a39af6f7SJonas Bonn break;
111a39af6f7SJonas Bonn case DMA_FROM_DEVICE:
112a39af6f7SJonas Bonn /* Invalidate the dcache for the requested range */
113a39af6f7SJonas Bonn for (cl = addr; cl < addr + size;
1148e6d08e0SStefan Kristiansson cl += cpuinfo->dcache_block_size)
115a39af6f7SJonas Bonn mtspr(SPR_DCBIR, cl);
116a39af6f7SJonas Bonn break;
117a39af6f7SJonas Bonn default:
118a39af6f7SJonas Bonn /*
119a39af6f7SJonas Bonn * NOTE: If dir == DMA_BIDIRECTIONAL then there's no need to
120a39af6f7SJonas Bonn * flush nor invalidate the cache here as the area will need
121a39af6f7SJonas Bonn * to be manually synced anyway.
122a39af6f7SJonas Bonn */
123a39af6f7SJonas Bonn break;
124a39af6f7SJonas Bonn }
1252c1de929SChristoph Hellwig }
126