xref: /openbmc/linux/arch/sh/kernel/cpu/sh4/sq.c (revision 8730046c)
1 /*
2  * arch/sh/kernel/cpu/sh4/sq.c
3  *
4  * General management API for SH-4 integrated Store Queues
5  *
6  * Copyright (C) 2001 - 2006  Paul Mundt
7  * Copyright (C) 2001, 2002  M. R. Brown
8  *
9  * This file is subject to the terms and conditions of the GNU General Public
10  * License.  See the file "COPYING" in the main directory of this archive
11  * for more details.
12  */
13 #include <linux/init.h>
14 #include <linux/cpu.h>
15 #include <linux/bitmap.h>
16 #include <linux/device.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/vmalloc.h>
21 #include <linux/mm.h>
22 #include <linux/io.h>
23 #include <linux/prefetch.h>
24 #include <asm/page.h>
25 #include <asm/cacheflush.h>
26 #include <cpu/sq.h>
27 
28 struct sq_mapping;
29 
30 struct sq_mapping {
31 	const char *name;
32 
33 	unsigned long sq_addr;
34 	unsigned long addr;
35 	unsigned int size;
36 
37 	struct sq_mapping *next;
38 };
39 
40 static struct sq_mapping *sq_mapping_list;
41 static DEFINE_SPINLOCK(sq_mapping_lock);
42 static struct kmem_cache *sq_cache;
43 static unsigned long *sq_bitmap;
44 
45 #define store_queue_barrier()			\
46 do {						\
47 	(void)__raw_readl(P4SEG_STORE_QUE);	\
48 	__raw_writel(0, P4SEG_STORE_QUE + 0);	\
49 	__raw_writel(0, P4SEG_STORE_QUE + 8);	\
50 } while (0);
51 
52 /**
53  * sq_flush_range - Flush (prefetch) a specific SQ range
54  * @start: the store queue address to start flushing from
55  * @len: the length to flush
56  *
57  * Flushes the store queue cache from @start to @start + @len in a
58  * linear fashion.
59  */
60 void sq_flush_range(unsigned long start, unsigned int len)
61 {
62 	unsigned long *sq = (unsigned long *)start;
63 
64 	/* Flush the queues */
65 	for (len >>= 5; len--; sq += 8)
66 		prefetchw(sq);
67 
68 	/* Wait for completion */
69 	store_queue_barrier();
70 }
71 EXPORT_SYMBOL(sq_flush_range);
72 
73 static inline void sq_mapping_list_add(struct sq_mapping *map)
74 {
75 	struct sq_mapping **p, *tmp;
76 
77 	spin_lock_irq(&sq_mapping_lock);
78 
79 	p = &sq_mapping_list;
80 	while ((tmp = *p) != NULL)
81 		p = &tmp->next;
82 
83 	map->next = tmp;
84 	*p = map;
85 
86 	spin_unlock_irq(&sq_mapping_lock);
87 }
88 
89 static inline void sq_mapping_list_del(struct sq_mapping *map)
90 {
91 	struct sq_mapping **p, *tmp;
92 
93 	spin_lock_irq(&sq_mapping_lock);
94 
95 	for (p = &sq_mapping_list; (tmp = *p); p = &tmp->next)
96 		if (tmp == map) {
97 			*p = tmp->next;
98 			break;
99 		}
100 
101 	spin_unlock_irq(&sq_mapping_lock);
102 }
103 
104 static int __sq_remap(struct sq_mapping *map, pgprot_t prot)
105 {
106 #if defined(CONFIG_MMU)
107 	struct vm_struct *vma;
108 
109 	vma = __get_vm_area(map->size, VM_ALLOC, map->sq_addr, SQ_ADDRMAX);
110 	if (!vma)
111 		return -ENOMEM;
112 
113 	vma->phys_addr = map->addr;
114 
115 	if (ioremap_page_range((unsigned long)vma->addr,
116 			       (unsigned long)vma->addr + map->size,
117 			       vma->phys_addr, prot)) {
118 		vunmap(vma->addr);
119 		return -EAGAIN;
120 	}
121 #else
122 	/*
123 	 * Without an MMU (or with it turned off), this is much more
124 	 * straightforward, as we can just load up each queue's QACR with
125 	 * the physical address appropriately masked.
126 	 */
127 	__raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR0);
128 	__raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR1);
129 #endif
130 
131 	return 0;
132 }
133 
134 /**
135  * sq_remap - Map a physical address through the Store Queues
136  * @phys: Physical address of mapping.
137  * @size: Length of mapping.
138  * @name: User invoking mapping.
139  * @prot: Protection bits.
140  *
141  * Remaps the physical address @phys through the next available store queue
142  * address of @size length. @name is logged at boot time as well as through
143  * the sysfs interface.
144  */
145 unsigned long sq_remap(unsigned long phys, unsigned int size,
146 		       const char *name, pgprot_t prot)
147 {
148 	struct sq_mapping *map;
149 	unsigned long end;
150 	unsigned int psz;
151 	int ret, page;
152 
153 	/* Don't allow wraparound or zero size */
154 	end = phys + size - 1;
155 	if (unlikely(!size || end < phys))
156 		return -EINVAL;
157 	/* Don't allow anyone to remap normal memory.. */
158 	if (unlikely(phys < virt_to_phys(high_memory)))
159 		return -EINVAL;
160 
161 	phys &= PAGE_MASK;
162 	size = PAGE_ALIGN(end + 1) - phys;
163 
164 	map = kmem_cache_alloc(sq_cache, GFP_KERNEL);
165 	if (unlikely(!map))
166 		return -ENOMEM;
167 
168 	map->addr = phys;
169 	map->size = size;
170 	map->name = name;
171 
172 	page = bitmap_find_free_region(sq_bitmap, 0x04000000 >> PAGE_SHIFT,
173 				       get_order(map->size));
174 	if (unlikely(page < 0)) {
175 		ret = -ENOSPC;
176 		goto out;
177 	}
178 
179 	map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT);
180 
181 	ret = __sq_remap(map, prot);
182 	if (unlikely(ret != 0))
183 		goto out;
184 
185 	psz = (size + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
186 	pr_info("sqremap: %15s  [%4d page%s]  va 0x%08lx   pa 0x%08lx\n",
187 		likely(map->name) ? map->name : "???",
188 		psz, psz == 1 ? " " : "s",
189 		map->sq_addr, map->addr);
190 
191 	sq_mapping_list_add(map);
192 
193 	return map->sq_addr;
194 
195 out:
196 	kmem_cache_free(sq_cache, map);
197 	return ret;
198 }
199 EXPORT_SYMBOL(sq_remap);
200 
201 /**
202  * sq_unmap - Unmap a Store Queue allocation
203  * @vaddr: Pre-allocated Store Queue mapping.
204  *
205  * Unmaps the store queue allocation @map that was previously created by
206  * sq_remap(). Also frees up the pte that was previously inserted into
207  * the kernel page table and discards the UTLB translation.
208  */
209 void sq_unmap(unsigned long vaddr)
210 {
211 	struct sq_mapping **p, *map;
212 	int page;
213 
214 	for (p = &sq_mapping_list; (map = *p); p = &map->next)
215 		if (map->sq_addr == vaddr)
216 			break;
217 
218 	if (unlikely(!map)) {
219 		printk("%s: bad store queue address 0x%08lx\n",
220 		       __func__, vaddr);
221 		return;
222 	}
223 
224 	page = (map->sq_addr - P4SEG_STORE_QUE) >> PAGE_SHIFT;
225 	bitmap_release_region(sq_bitmap, page, get_order(map->size));
226 
227 #ifdef CONFIG_MMU
228 	{
229 		/*
230 		 * Tear down the VMA in the MMU case.
231 		 */
232 		struct vm_struct *vma;
233 
234 		vma = remove_vm_area((void *)(map->sq_addr & PAGE_MASK));
235 		if (!vma) {
236 			printk(KERN_ERR "%s: bad address 0x%08lx\n",
237 			       __func__, map->sq_addr);
238 			return;
239 		}
240 	}
241 #endif
242 
243 	sq_mapping_list_del(map);
244 
245 	kmem_cache_free(sq_cache, map);
246 }
247 EXPORT_SYMBOL(sq_unmap);
248 
249 /*
250  * Needlessly complex sysfs interface. Unfortunately it doesn't seem like
251  * there is any other easy way to add things on a per-cpu basis without
252  * putting the directory entries somewhere stupid and having to create
253  * links in sysfs by hand back in to the per-cpu directories.
254  *
255  * Some day we may want to have an additional abstraction per store
256  * queue, but considering the kobject hell we already have to deal with,
257  * it's simply not worth the trouble.
258  */
259 static struct kobject *sq_kobject[NR_CPUS];
260 
261 struct sq_sysfs_attr {
262 	struct attribute attr;
263 	ssize_t (*show)(char *buf);
264 	ssize_t (*store)(const char *buf, size_t count);
265 };
266 
267 #define to_sq_sysfs_attr(a)	container_of(a, struct sq_sysfs_attr, attr)
268 
269 static ssize_t sq_sysfs_show(struct kobject *kobj, struct attribute *attr,
270 			     char *buf)
271 {
272 	struct sq_sysfs_attr *sattr = to_sq_sysfs_attr(attr);
273 
274 	if (likely(sattr->show))
275 		return sattr->show(buf);
276 
277 	return -EIO;
278 }
279 
280 static ssize_t sq_sysfs_store(struct kobject *kobj, struct attribute *attr,
281 			      const char *buf, size_t count)
282 {
283 	struct sq_sysfs_attr *sattr = to_sq_sysfs_attr(attr);
284 
285 	if (likely(sattr->store))
286 		return sattr->store(buf, count);
287 
288 	return -EIO;
289 }
290 
291 static ssize_t mapping_show(char *buf)
292 {
293 	struct sq_mapping **list, *entry;
294 	char *p = buf;
295 
296 	for (list = &sq_mapping_list; (entry = *list); list = &entry->next)
297 		p += sprintf(p, "%08lx-%08lx [%08lx]: %s\n",
298 			     entry->sq_addr, entry->sq_addr + entry->size,
299 			     entry->addr, entry->name);
300 
301 	return p - buf;
302 }
303 
304 static ssize_t mapping_store(const char *buf, size_t count)
305 {
306 	unsigned long base = 0, len = 0;
307 
308 	sscanf(buf, "%lx %lx", &base, &len);
309 	if (!base)
310 		return -EIO;
311 
312 	if (likely(len)) {
313 		int ret = sq_remap(base, len, "Userspace", PAGE_SHARED);
314 		if (ret < 0)
315 			return ret;
316 	} else
317 		sq_unmap(base);
318 
319 	return count;
320 }
321 
322 static struct sq_sysfs_attr mapping_attr =
323 	__ATTR(mapping, 0644, mapping_show, mapping_store);
324 
325 static struct attribute *sq_sysfs_attrs[] = {
326 	&mapping_attr.attr,
327 	NULL,
328 };
329 
330 static const struct sysfs_ops sq_sysfs_ops = {
331 	.show	= sq_sysfs_show,
332 	.store	= sq_sysfs_store,
333 };
334 
335 static struct kobj_type ktype_percpu_entry = {
336 	.sysfs_ops	= &sq_sysfs_ops,
337 	.default_attrs	= sq_sysfs_attrs,
338 };
339 
340 static int sq_dev_add(struct device *dev, struct subsys_interface *sif)
341 {
342 	unsigned int cpu = dev->id;
343 	struct kobject *kobj;
344 	int error;
345 
346 	sq_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL);
347 	if (unlikely(!sq_kobject[cpu]))
348 		return -ENOMEM;
349 
350 	kobj = sq_kobject[cpu];
351 	error = kobject_init_and_add(kobj, &ktype_percpu_entry, &dev->kobj,
352 				     "%s", "sq");
353 	if (!error)
354 		kobject_uevent(kobj, KOBJ_ADD);
355 	return error;
356 }
357 
358 static void sq_dev_remove(struct device *dev, struct subsys_interface *sif)
359 {
360 	unsigned int cpu = dev->id;
361 	struct kobject *kobj = sq_kobject[cpu];
362 
363 	kobject_put(kobj);
364 }
365 
366 static struct subsys_interface sq_interface = {
367 	.name		= "sq",
368 	.subsys		= &cpu_subsys,
369 	.add_dev	= sq_dev_add,
370 	.remove_dev	= sq_dev_remove,
371 };
372 
373 static int __init sq_api_init(void)
374 {
375 	unsigned int nr_pages = 0x04000000 >> PAGE_SHIFT;
376 	unsigned int size = (nr_pages + (BITS_PER_LONG - 1)) / BITS_PER_LONG;
377 	int ret = -ENOMEM;
378 
379 	printk(KERN_NOTICE "sq: Registering store queue API.\n");
380 
381 	sq_cache = kmem_cache_create("store_queue_cache",
382 				sizeof(struct sq_mapping), 0, 0, NULL);
383 	if (unlikely(!sq_cache))
384 		return ret;
385 
386 	sq_bitmap = kzalloc(size, GFP_KERNEL);
387 	if (unlikely(!sq_bitmap))
388 		goto out;
389 
390 	ret = subsys_interface_register(&sq_interface);
391 	if (unlikely(ret != 0))
392 		goto out;
393 
394 	return 0;
395 
396 out:
397 	kfree(sq_bitmap);
398 	kmem_cache_destroy(sq_cache);
399 
400 	return ret;
401 }
402 
403 static void __exit sq_api_exit(void)
404 {
405 	subsys_interface_unregister(&sq_interface);
406 	kfree(sq_bitmap);
407 	kmem_cache_destroy(sq_cache);
408 }
409 
410 module_init(sq_api_init);
411 module_exit(sq_api_exit);
412 
413 MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, M. R. Brown <mrbrown@0xd6.org>");
414 MODULE_DESCRIPTION("Simple API for SH-4 integrated Store Queues");
415 MODULE_LICENSE("GPL");
416