xref: /openbmc/linux/arch/sh/kernel/io_trapped.c (revision 002dff36)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Trapped io support
4  *
5  * Copyright (C) 2008 Magnus Damm
6  *
7  * Intercept io operations by trapping.
8  */
9 #include <linux/kernel.h>
10 #include <linux/mm.h>
11 #include <linux/bitops.h>
12 #include <linux/vmalloc.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <asm/mmu_context.h>
16 #include <linux/uaccess.h>
17 #include <asm/io.h>
18 #include <asm/io_trapped.h>
19 
20 #define TRAPPED_PAGES_MAX 16
21 
22 #ifdef CONFIG_HAS_IOPORT_MAP
23 LIST_HEAD(trapped_io);
24 EXPORT_SYMBOL_GPL(trapped_io);
25 #endif
26 #ifdef CONFIG_HAS_IOMEM
27 LIST_HEAD(trapped_mem);
28 EXPORT_SYMBOL_GPL(trapped_mem);
29 #endif
30 static DEFINE_SPINLOCK(trapped_lock);
31 
32 static int trapped_io_disable __read_mostly;
33 
34 static int __init trapped_io_setup(char *__unused)
35 {
36 	trapped_io_disable = 1;
37 	return 1;
38 }
39 __setup("noiotrap", trapped_io_setup);
40 
41 int register_trapped_io(struct trapped_io *tiop)
42 {
43 	struct resource *res;
44 	unsigned long len = 0, flags = 0;
45 	struct page *pages[TRAPPED_PAGES_MAX];
46 	int k, n;
47 
48 	if (unlikely(trapped_io_disable))
49 		return 0;
50 
51 	/* structure must be page aligned */
52 	if ((unsigned long)tiop & (PAGE_SIZE - 1))
53 		goto bad;
54 
55 	for (k = 0; k < tiop->num_resources; k++) {
56 		res = tiop->resource + k;
57 		len += roundup(resource_size(res), PAGE_SIZE);
58 		flags |= res->flags;
59 	}
60 
61 	/* support IORESOURCE_IO _or_ MEM, not both */
62 	if (hweight_long(flags) != 1)
63 		goto bad;
64 
65 	n = len >> PAGE_SHIFT;
66 
67 	if (n >= TRAPPED_PAGES_MAX)
68 		goto bad;
69 
70 	for (k = 0; k < n; k++)
71 		pages[k] = virt_to_page(tiop);
72 
73 	tiop->virt_base = vmap(pages, n, VM_MAP, PAGE_NONE);
74 	if (!tiop->virt_base)
75 		goto bad;
76 
77 	len = 0;
78 	for (k = 0; k < tiop->num_resources; k++) {
79 		res = tiop->resource + k;
80 		pr_info("trapped io 0x%08lx overrides %s 0x%08lx\n",
81 		       (unsigned long)(tiop->virt_base + len),
82 		       res->flags & IORESOURCE_IO ? "io" : "mmio",
83 		       (unsigned long)res->start);
84 		len += roundup(resource_size(res), PAGE_SIZE);
85 	}
86 
87 	tiop->magic = IO_TRAPPED_MAGIC;
88 	INIT_LIST_HEAD(&tiop->list);
89 	spin_lock_irq(&trapped_lock);
90 #ifdef CONFIG_HAS_IOPORT_MAP
91 	if (flags & IORESOURCE_IO)
92 		list_add(&tiop->list, &trapped_io);
93 #endif
94 #ifdef CONFIG_HAS_IOMEM
95 	if (flags & IORESOURCE_MEM)
96 		list_add(&tiop->list, &trapped_mem);
97 #endif
98 	spin_unlock_irq(&trapped_lock);
99 
100 	return 0;
101  bad:
102 	pr_warn("unable to install trapped io filter\n");
103 	return -1;
104 }
105 EXPORT_SYMBOL_GPL(register_trapped_io);
106 
107 void __iomem *match_trapped_io_handler(struct list_head *list,
108 				       unsigned long offset,
109 				       unsigned long size)
110 {
111 	unsigned long voffs;
112 	struct trapped_io *tiop;
113 	struct resource *res;
114 	int k, len;
115 	unsigned long flags;
116 
117 	spin_lock_irqsave(&trapped_lock, flags);
118 	list_for_each_entry(tiop, list, list) {
119 		voffs = 0;
120 		for (k = 0; k < tiop->num_resources; k++) {
121 			res = tiop->resource + k;
122 			if (res->start == offset) {
123 				spin_unlock_irqrestore(&trapped_lock, flags);
124 				return tiop->virt_base + voffs;
125 			}
126 
127 			len = resource_size(res);
128 			voffs += roundup(len, PAGE_SIZE);
129 		}
130 	}
131 	spin_unlock_irqrestore(&trapped_lock, flags);
132 	return NULL;
133 }
134 EXPORT_SYMBOL_GPL(match_trapped_io_handler);
135 
136 static struct trapped_io *lookup_tiop(unsigned long address)
137 {
138 	pgd_t *pgd_k;
139 	p4d_t *p4d_k;
140 	pud_t *pud_k;
141 	pmd_t *pmd_k;
142 	pte_t *pte_k;
143 	pte_t entry;
144 
145 	pgd_k = swapper_pg_dir + pgd_index(address);
146 	if (!pgd_present(*pgd_k))
147 		return NULL;
148 
149 	p4d_k = p4d_offset(pgd_k, address);
150 	if (!p4d_present(*p4d_k))
151 		return NULL;
152 
153 	pud_k = pud_offset(p4d_k, address);
154 	if (!pud_present(*pud_k))
155 		return NULL;
156 
157 	pmd_k = pmd_offset(pud_k, address);
158 	if (!pmd_present(*pmd_k))
159 		return NULL;
160 
161 	pte_k = pte_offset_kernel(pmd_k, address);
162 	entry = *pte_k;
163 
164 	return pfn_to_kaddr(pte_pfn(entry));
165 }
166 
167 static unsigned long lookup_address(struct trapped_io *tiop,
168 				    unsigned long address)
169 {
170 	struct resource *res;
171 	unsigned long vaddr = (unsigned long)tiop->virt_base;
172 	unsigned long len;
173 	int k;
174 
175 	for (k = 0; k < tiop->num_resources; k++) {
176 		res = tiop->resource + k;
177 		len = roundup(resource_size(res), PAGE_SIZE);
178 		if (address < (vaddr + len))
179 			return res->start + (address - vaddr);
180 		vaddr += len;
181 	}
182 	return 0;
183 }
184 
185 static unsigned long long copy_word(unsigned long src_addr, int src_len,
186 				    unsigned long dst_addr, int dst_len)
187 {
188 	unsigned long long tmp = 0;
189 
190 	switch (src_len) {
191 	case 1:
192 		tmp = __raw_readb(src_addr);
193 		break;
194 	case 2:
195 		tmp = __raw_readw(src_addr);
196 		break;
197 	case 4:
198 		tmp = __raw_readl(src_addr);
199 		break;
200 	case 8:
201 		tmp = __raw_readq(src_addr);
202 		break;
203 	}
204 
205 	switch (dst_len) {
206 	case 1:
207 		__raw_writeb(tmp, dst_addr);
208 		break;
209 	case 2:
210 		__raw_writew(tmp, dst_addr);
211 		break;
212 	case 4:
213 		__raw_writel(tmp, dst_addr);
214 		break;
215 	case 8:
216 		__raw_writeq(tmp, dst_addr);
217 		break;
218 	}
219 
220 	return tmp;
221 }
222 
223 static unsigned long from_device(void *dst, const void *src, unsigned long cnt)
224 {
225 	struct trapped_io *tiop;
226 	unsigned long src_addr = (unsigned long)src;
227 	unsigned long long tmp;
228 
229 	pr_debug("trapped io read 0x%08lx (%ld)\n", src_addr, cnt);
230 	tiop = lookup_tiop(src_addr);
231 	WARN_ON(!tiop || (tiop->magic != IO_TRAPPED_MAGIC));
232 
233 	src_addr = lookup_address(tiop, src_addr);
234 	if (!src_addr)
235 		return cnt;
236 
237 	tmp = copy_word(src_addr,
238 			max_t(unsigned long, cnt,
239 			      (tiop->minimum_bus_width / 8)),
240 			(unsigned long)dst, cnt);
241 
242 	pr_debug("trapped io read 0x%08lx -> 0x%08llx\n", src_addr, tmp);
243 	return 0;
244 }
245 
246 static unsigned long to_device(void *dst, const void *src, unsigned long cnt)
247 {
248 	struct trapped_io *tiop;
249 	unsigned long dst_addr = (unsigned long)dst;
250 	unsigned long long tmp;
251 
252 	pr_debug("trapped io write 0x%08lx (%ld)\n", dst_addr, cnt);
253 	tiop = lookup_tiop(dst_addr);
254 	WARN_ON(!tiop || (tiop->magic != IO_TRAPPED_MAGIC));
255 
256 	dst_addr = lookup_address(tiop, dst_addr);
257 	if (!dst_addr)
258 		return cnt;
259 
260 	tmp = copy_word((unsigned long)src, cnt,
261 			dst_addr, max_t(unsigned long, cnt,
262 					(tiop->minimum_bus_width / 8)));
263 
264 	pr_debug("trapped io write 0x%08lx -> 0x%08llx\n", dst_addr, tmp);
265 	return 0;
266 }
267 
268 static struct mem_access trapped_io_access = {
269 	from_device,
270 	to_device,
271 };
272 
273 int handle_trapped_io(struct pt_regs *regs, unsigned long address)
274 {
275 	mm_segment_t oldfs;
276 	insn_size_t instruction;
277 	int tmp;
278 
279 	if (trapped_io_disable)
280 		return 0;
281 	if (!lookup_tiop(address))
282 		return 0;
283 
284 	WARN_ON(user_mode(regs));
285 
286 	oldfs = get_fs();
287 	set_fs(KERNEL_DS);
288 	if (copy_from_user(&instruction, (void *)(regs->pc),
289 			   sizeof(instruction))) {
290 		set_fs(oldfs);
291 		return 0;
292 	}
293 
294 	tmp = handle_unaligned_access(instruction, regs,
295 				      &trapped_io_access, 1, address);
296 	set_fs(oldfs);
297 	return tmp == 0;
298 }
299