xref: /openbmc/linux/arch/sh/kernel/cpu/sh4/sq.c (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
16ecc0a4dSKuninori Morimoto // SPDX-License-Identifier: GPL-2.0
21da177e4SLinus Torvalds /*
3d7c30c68SPaul Mundt  * arch/sh/kernel/cpu/sh4/sq.c
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  * General management API for SH-4 integrated Store Queues
61da177e4SLinus Torvalds  *
7d7c30c68SPaul Mundt  * Copyright (C) 2001 - 2006  Paul Mundt
81da177e4SLinus Torvalds  * Copyright (C) 2001, 2002  M. R. Brown
91da177e4SLinus Torvalds  */
101da177e4SLinus Torvalds #include <linux/init.h>
11d7c30c68SPaul Mundt #include <linux/cpu.h>
12d7c30c68SPaul Mundt #include <linux/bitmap.h>
138a25a2fdSKay Sievers #include <linux/device.h>
141da177e4SLinus Torvalds #include <linux/kernel.h>
151da177e4SLinus Torvalds #include <linux/module.h>
161da177e4SLinus Torvalds #include <linux/slab.h>
171da177e4SLinus Torvalds #include <linux/vmalloc.h>
18e4c2cfeeSPaul Mundt #include <linux/mm.h>
199f650cf2SPaul Mundt #include <linux/io.h>
20268bb0ceSLinus Torvalds #include <linux/prefetch.h>
211da177e4SLinus Torvalds #include <asm/page.h>
22e4c2cfeeSPaul Mundt #include <asm/cacheflush.h>
23f15cbe6fSPaul Mundt #include <cpu/sq.h>
241da177e4SLinus Torvalds 
25d7c30c68SPaul Mundt struct sq_mapping;
261da177e4SLinus Torvalds 
27d7c30c68SPaul Mundt struct sq_mapping {
28d7c30c68SPaul Mundt 	const char *name;
29d7c30c68SPaul Mundt 
30d7c30c68SPaul Mundt 	unsigned long sq_addr;
31d7c30c68SPaul Mundt 	unsigned long addr;
32d7c30c68SPaul Mundt 	unsigned int size;
33d7c30c68SPaul Mundt 
34d7c30c68SPaul Mundt 	struct sq_mapping *next;
35d7c30c68SPaul Mundt };
36d7c30c68SPaul Mundt 
37d7c30c68SPaul Mundt static struct sq_mapping *sq_mapping_list;
38d7c30c68SPaul Mundt static DEFINE_SPINLOCK(sq_mapping_lock);
39e18b890bSChristoph Lameter static struct kmem_cache *sq_cache;
40d7c30c68SPaul Mundt static unsigned long *sq_bitmap;
41d7c30c68SPaul Mundt 
42d7c30c68SPaul Mundt #define store_queue_barrier()			\
43d7c30c68SPaul Mundt do {						\
449d56dd3bSPaul Mundt 	(void)__raw_readl(P4SEG_STORE_QUE);	\
459d56dd3bSPaul Mundt 	__raw_writel(0, P4SEG_STORE_QUE + 0);	\
469d56dd3bSPaul Mundt 	__raw_writel(0, P4SEG_STORE_QUE + 8);	\
47d7c30c68SPaul Mundt } while (0);
481da177e4SLinus Torvalds 
491da177e4SLinus Torvalds /**
501da177e4SLinus Torvalds  * sq_flush_range - Flush (prefetch) a specific SQ range
511da177e4SLinus Torvalds  * @start: the store queue address to start flushing from
521da177e4SLinus Torvalds  * @len: the length to flush
531da177e4SLinus Torvalds  *
541da177e4SLinus Torvalds  * Flushes the store queue cache from @start to @start + @len in a
551da177e4SLinus Torvalds  * linear fashion.
561da177e4SLinus Torvalds  */
sq_flush_range(unsigned long start,unsigned int len)571da177e4SLinus Torvalds void sq_flush_range(unsigned long start, unsigned int len)
581da177e4SLinus Torvalds {
59b05d1865SPaul Mundt 	unsigned long *sq = (unsigned long *)start;
601da177e4SLinus Torvalds 
611da177e4SLinus Torvalds 	/* Flush the queues */
621da177e4SLinus Torvalds 	for (len >>= 5; len--; sq += 8)
63b05d1865SPaul Mundt 		prefetchw(sq);
641da177e4SLinus Torvalds 
651da177e4SLinus Torvalds 	/* Wait for completion */
66d7c30c68SPaul Mundt 	store_queue_barrier();
671da177e4SLinus Torvalds }
689f650cf2SPaul Mundt EXPORT_SYMBOL(sq_flush_range);
691da177e4SLinus Torvalds 
sq_mapping_list_add(struct sq_mapping * map)70d7c30c68SPaul Mundt static inline void sq_mapping_list_add(struct sq_mapping *map)
711da177e4SLinus Torvalds {
72d7c30c68SPaul Mundt 	struct sq_mapping **p, *tmp;
731da177e4SLinus Torvalds 
74d7c30c68SPaul Mundt 	spin_lock_irq(&sq_mapping_lock);
751da177e4SLinus Torvalds 
76d7c30c68SPaul Mundt 	p = &sq_mapping_list;
77d7c30c68SPaul Mundt 	while ((tmp = *p) != NULL)
78d7c30c68SPaul Mundt 		p = &tmp->next;
791da177e4SLinus Torvalds 
80d7c30c68SPaul Mundt 	map->next = tmp;
81d7c30c68SPaul Mundt 	*p = map;
821da177e4SLinus Torvalds 
83d7c30c68SPaul Mundt 	spin_unlock_irq(&sq_mapping_lock);
841da177e4SLinus Torvalds }
851da177e4SLinus Torvalds 
sq_mapping_list_del(struct sq_mapping * map)86d7c30c68SPaul Mundt static inline void sq_mapping_list_del(struct sq_mapping *map)
871da177e4SLinus Torvalds {
88d7c30c68SPaul Mundt 	struct sq_mapping **p, *tmp;
891da177e4SLinus Torvalds 
90d7c30c68SPaul Mundt 	spin_lock_irq(&sq_mapping_lock);
911da177e4SLinus Torvalds 
92d7c30c68SPaul Mundt 	for (p = &sq_mapping_list; (tmp = *p); p = &tmp->next)
93d7c30c68SPaul Mundt 		if (tmp == map) {
94d7c30c68SPaul Mundt 			*p = tmp->next;
95d7c30c68SPaul Mundt 			break;
961da177e4SLinus Torvalds 		}
971da177e4SLinus Torvalds 
98d7c30c68SPaul Mundt 	spin_unlock_irq(&sq_mapping_lock);
991da177e4SLinus Torvalds }
1001da177e4SLinus Torvalds 
__sq_remap(struct sq_mapping * map,pgprot_t prot)1017bdda620SPaul Mundt static int __sq_remap(struct sq_mapping *map, pgprot_t prot)
1021da177e4SLinus Torvalds {
103d7c30c68SPaul Mundt #if defined(CONFIG_MMU)
1041da177e4SLinus Torvalds 	struct vm_struct *vma;
1051da177e4SLinus Torvalds 
10661a1a990SBaoquan He 	vma = __get_vm_area_caller(map->size, VM_IOREMAP, map->sq_addr,
10749266277SChristoph Hellwig 			SQ_ADDRMAX, __builtin_return_address(0));
108d7c30c68SPaul Mundt 	if (!vma)
109d7c30c68SPaul Mundt 		return -ENOMEM;
110d7c30c68SPaul Mundt 
111d7c30c68SPaul Mundt 	vma->phys_addr = map->addr;
112d7c30c68SPaul Mundt 
11337bda1daSPaul Mundt 	if (ioremap_page_range((unsigned long)vma->addr,
11437bda1daSPaul Mundt 			       (unsigned long)vma->addr + map->size,
1157bdda620SPaul Mundt 			       vma->phys_addr, prot)) {
116d7c30c68SPaul Mundt 		vunmap(vma->addr);
117d7c30c68SPaul Mundt 		return -EAGAIN;
118d7c30c68SPaul Mundt 	}
119d7c30c68SPaul Mundt #else
1201da177e4SLinus Torvalds 	/*
1211da177e4SLinus Torvalds 	 * Without an MMU (or with it turned off), this is much more
1221da177e4SLinus Torvalds 	 * straightforward, as we can just load up each queue's QACR with
1231da177e4SLinus Torvalds 	 * the physical address appropriately masked.
1241da177e4SLinus Torvalds 	 */
1259d56dd3bSPaul Mundt 	__raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR0);
1269d56dd3bSPaul Mundt 	__raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR1);
127d7c30c68SPaul Mundt #endif
1281da177e4SLinus Torvalds 
129d7c30c68SPaul Mundt 	return 0;
1301da177e4SLinus Torvalds }
1311da177e4SLinus Torvalds 
1321da177e4SLinus Torvalds /**
1331da177e4SLinus Torvalds  * sq_remap - Map a physical address through the Store Queues
1341da177e4SLinus Torvalds  * @phys: Physical address of mapping.
1351da177e4SLinus Torvalds  * @size: Length of mapping.
1361da177e4SLinus Torvalds  * @name: User invoking mapping.
1377bdda620SPaul Mundt  * @prot: Protection bits.
1381da177e4SLinus Torvalds  *
1391da177e4SLinus Torvalds  * Remaps the physical address @phys through the next available store queue
1401da177e4SLinus Torvalds  * address of @size length. @name is logged at boot time as well as through
141d7c30c68SPaul Mundt  * the sysfs interface.
1421da177e4SLinus Torvalds  */
sq_remap(unsigned long phys,unsigned int size,const char * name,pgprot_t prot)143d7c30c68SPaul Mundt unsigned long sq_remap(unsigned long phys, unsigned int size,
1447bdda620SPaul Mundt 		       const char *name, pgprot_t prot)
1451da177e4SLinus Torvalds {
1461da177e4SLinus Torvalds 	struct sq_mapping *map;
147d7c30c68SPaul Mundt 	unsigned long end;
1481da177e4SLinus Torvalds 	unsigned int psz;
149d7c30c68SPaul Mundt 	int ret, page;
1501da177e4SLinus Torvalds 
1511da177e4SLinus Torvalds 	/* Don't allow wraparound or zero size */
1521da177e4SLinus Torvalds 	end = phys + size - 1;
153d7c30c68SPaul Mundt 	if (unlikely(!size || end < phys))
154d7c30c68SPaul Mundt 		return -EINVAL;
1551da177e4SLinus Torvalds 	/* Don't allow anyone to remap normal memory.. */
156d7c30c68SPaul Mundt 	if (unlikely(phys < virt_to_phys(high_memory)))
157d7c30c68SPaul Mundt 		return -EINVAL;
1581da177e4SLinus Torvalds 
1591da177e4SLinus Torvalds 	phys &= PAGE_MASK;
1601da177e4SLinus Torvalds 	size = PAGE_ALIGN(end + 1) - phys;
1611da177e4SLinus Torvalds 
162d7c30c68SPaul Mundt 	map = kmem_cache_alloc(sq_cache, GFP_KERNEL);
163d7c30c68SPaul Mundt 	if (unlikely(!map))
164d7c30c68SPaul Mundt 		return -ENOMEM;
165d7c30c68SPaul Mundt 
166d7c30c68SPaul Mundt 	map->addr = phys;
167d7c30c68SPaul Mundt 	map->size = size;
168d7c30c68SPaul Mundt 	map->name = name;
169d7c30c68SPaul Mundt 
1709f650cf2SPaul Mundt 	page = bitmap_find_free_region(sq_bitmap, 0x04000000 >> PAGE_SHIFT,
171d7c30c68SPaul Mundt 				       get_order(map->size));
172d7c30c68SPaul Mundt 	if (unlikely(page < 0)) {
173d7c30c68SPaul Mundt 		ret = -ENOSPC;
174d7c30c68SPaul Mundt 		goto out;
175d7c30c68SPaul Mundt 	}
176d7c30c68SPaul Mundt 
177d7c30c68SPaul Mundt 	map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT);
178d7c30c68SPaul Mundt 
1797bdda620SPaul Mundt 	ret = __sq_remap(map, prot);
180d7c30c68SPaul Mundt 	if (unlikely(ret != 0))
181d7c30c68SPaul Mundt 		goto out;
182d7c30c68SPaul Mundt 
183d7c30c68SPaul Mundt 	psz = (size + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
184d7c30c68SPaul Mundt 	pr_info("sqremap: %15s  [%4d page%s]  va 0x%08lx   pa 0x%08lx\n",
185d7c30c68SPaul Mundt 		likely(map->name) ? map->name : "???",
1861da177e4SLinus Torvalds 		psz, psz == 1 ? " " : "s",
1871da177e4SLinus Torvalds 		map->sq_addr, map->addr);
1881da177e4SLinus Torvalds 
189d7c30c68SPaul Mundt 	sq_mapping_list_add(map);
190d7c30c68SPaul Mundt 
191d7c30c68SPaul Mundt 	return map->sq_addr;
192d7c30c68SPaul Mundt 
193d7c30c68SPaul Mundt out:
194d7c30c68SPaul Mundt 	kmem_cache_free(sq_cache, map);
195d7c30c68SPaul Mundt 	return ret;
1961da177e4SLinus Torvalds }
1979f650cf2SPaul Mundt EXPORT_SYMBOL(sq_remap);
1981da177e4SLinus Torvalds 
1991da177e4SLinus Torvalds /**
2001da177e4SLinus Torvalds  * sq_unmap - Unmap a Store Queue allocation
2016a9545bdSPaul Mundt  * @vaddr: Pre-allocated Store Queue mapping.
2021da177e4SLinus Torvalds  *
2031da177e4SLinus Torvalds  * Unmaps the store queue allocation @map that was previously created by
2041da177e4SLinus Torvalds  * sq_remap(). Also frees up the pte that was previously inserted into
2051da177e4SLinus Torvalds  * the kernel page table and discards the UTLB translation.
2061da177e4SLinus Torvalds  */
sq_unmap(unsigned long vaddr)207d7c30c68SPaul Mundt void sq_unmap(unsigned long vaddr)
2081da177e4SLinus Torvalds {
209d7c30c68SPaul Mundt 	struct sq_mapping **p, *map;
210d7c30c68SPaul Mundt 	int page;
2111da177e4SLinus Torvalds 
212d7c30c68SPaul Mundt 	for (p = &sq_mapping_list; (map = *p); p = &map->next)
213d7c30c68SPaul Mundt 		if (map->sq_addr == vaddr)
214d7c30c68SPaul Mundt 			break;
2151da177e4SLinus Torvalds 
216d7c30c68SPaul Mundt 	if (unlikely(!map)) {
217d7c30c68SPaul Mundt 		printk("%s: bad store queue address 0x%08lx\n",
218866e6b9eSHarvey Harrison 		       __func__, vaddr);
2191da177e4SLinus Torvalds 		return;
2201da177e4SLinus Torvalds 	}
2211da177e4SLinus Torvalds 
222d7c30c68SPaul Mundt 	page = (map->sq_addr - P4SEG_STORE_QUE) >> PAGE_SHIFT;
223d7c30c68SPaul Mundt 	bitmap_release_region(sq_bitmap, page, get_order(map->size));
2241da177e4SLinus Torvalds 
225d7c30c68SPaul Mundt #ifdef CONFIG_MMU
226b067c50aSPaul Mundt 	{
227b067c50aSPaul Mundt 		/*
228b067c50aSPaul Mundt 		 * Tear down the VMA in the MMU case.
229b067c50aSPaul Mundt 		 */
230b067c50aSPaul Mundt 		struct vm_struct *vma;
231b067c50aSPaul Mundt 
232d7c30c68SPaul Mundt 		vma = remove_vm_area((void *)(map->sq_addr & PAGE_MASK));
233d7c30c68SPaul Mundt 		if (!vma) {
234d7c30c68SPaul Mundt 			printk(KERN_ERR "%s: bad address 0x%08lx\n",
235866e6b9eSHarvey Harrison 			       __func__, map->sq_addr);
236d7c30c68SPaul Mundt 			return;
2371da177e4SLinus Torvalds 		}
238b067c50aSPaul Mundt 	}
2391da177e4SLinus Torvalds #endif
2401da177e4SLinus Torvalds 
241d7c30c68SPaul Mundt 	sq_mapping_list_del(map);
242d7c30c68SPaul Mundt 
243d7c30c68SPaul Mundt 	kmem_cache_free(sq_cache, map);
244d7c30c68SPaul Mundt }
2459f650cf2SPaul Mundt EXPORT_SYMBOL(sq_unmap);
246d7c30c68SPaul Mundt 
247d7c30c68SPaul Mundt /*
248d7c30c68SPaul Mundt  * Needlessly complex sysfs interface. Unfortunately it doesn't seem like
249d7c30c68SPaul Mundt  * there is any other easy way to add things on a per-cpu basis without
250d7c30c68SPaul Mundt  * putting the directory entries somewhere stupid and having to create
251d7c30c68SPaul Mundt  * links in sysfs by hand back in to the per-cpu directories.
252d7c30c68SPaul Mundt  *
253d7c30c68SPaul Mundt  * Some day we may want to have an additional abstraction per store
254d7c30c68SPaul Mundt  * queue, but considering the kobject hell we already have to deal with,
255d7c30c68SPaul Mundt  * it's simply not worth the trouble.
256d7c30c68SPaul Mundt  */
257d7c30c68SPaul Mundt static struct kobject *sq_kobject[NR_CPUS];
258d7c30c68SPaul Mundt 
259d7c30c68SPaul Mundt struct sq_sysfs_attr {
260d7c30c68SPaul Mundt 	struct attribute attr;
261d7c30c68SPaul Mundt 	ssize_t (*show)(char *buf);
262d7c30c68SPaul Mundt 	ssize_t (*store)(const char *buf, size_t count);
2631da177e4SLinus Torvalds };
2641da177e4SLinus Torvalds 
26525478445SAlexey Dobriyan #define to_sq_sysfs_attr(a)	container_of(a, struct sq_sysfs_attr, attr)
266d7c30c68SPaul Mundt 
sq_sysfs_show(struct kobject * kobj,struct attribute * attr,char * buf)267d7c30c68SPaul Mundt static ssize_t sq_sysfs_show(struct kobject *kobj, struct attribute *attr,
268d7c30c68SPaul Mundt 			     char *buf)
269d7c30c68SPaul Mundt {
270d7c30c68SPaul Mundt 	struct sq_sysfs_attr *sattr = to_sq_sysfs_attr(attr);
271d7c30c68SPaul Mundt 
272d7c30c68SPaul Mundt 	if (likely(sattr->show))
273d7c30c68SPaul Mundt 		return sattr->show(buf);
274d7c30c68SPaul Mundt 
275d7c30c68SPaul Mundt 	return -EIO;
276d7c30c68SPaul Mundt }
277d7c30c68SPaul Mundt 
sq_sysfs_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t count)278d7c30c68SPaul Mundt static ssize_t sq_sysfs_store(struct kobject *kobj, struct attribute *attr,
279d7c30c68SPaul Mundt 			      const char *buf, size_t count)
280d7c30c68SPaul Mundt {
281d7c30c68SPaul Mundt 	struct sq_sysfs_attr *sattr = to_sq_sysfs_attr(attr);
282d7c30c68SPaul Mundt 
283d7c30c68SPaul Mundt 	if (likely(sattr->store))
284d7c30c68SPaul Mundt 		return sattr->store(buf, count);
285d7c30c68SPaul Mundt 
286d7c30c68SPaul Mundt 	return -EIO;
287d7c30c68SPaul Mundt }
288d7c30c68SPaul Mundt 
mapping_show(char * buf)289d7c30c68SPaul Mundt static ssize_t mapping_show(char *buf)
290d7c30c68SPaul Mundt {
291d7c30c68SPaul Mundt 	struct sq_mapping **list, *entry;
292d7c30c68SPaul Mundt 	char *p = buf;
293d7c30c68SPaul Mundt 
294d7c30c68SPaul Mundt 	for (list = &sq_mapping_list; (entry = *list); list = &entry->next)
295d7c30c68SPaul Mundt 		p += sprintf(p, "%08lx-%08lx [%08lx]: %s\n",
296d7c30c68SPaul Mundt 			     entry->sq_addr, entry->sq_addr + entry->size,
297d7c30c68SPaul Mundt 			     entry->addr, entry->name);
298d7c30c68SPaul Mundt 
299d7c30c68SPaul Mundt 	return p - buf;
300d7c30c68SPaul Mundt }
301d7c30c68SPaul Mundt 
mapping_store(const char * buf,size_t count)302d7c30c68SPaul Mundt static ssize_t mapping_store(const char *buf, size_t count)
303d7c30c68SPaul Mundt {
304d7c30c68SPaul Mundt 	unsigned long base = 0, len = 0;
305d7c30c68SPaul Mundt 
306d7c30c68SPaul Mundt 	sscanf(buf, "%lx %lx", &base, &len);
307d7c30c68SPaul Mundt 	if (!base)
308d7c30c68SPaul Mundt 		return -EIO;
309d7c30c68SPaul Mundt 
310d7c30c68SPaul Mundt 	if (likely(len)) {
3117bdda620SPaul Mundt 		int ret = sq_remap(base, len, "Userspace", PAGE_SHARED);
312d7c30c68SPaul Mundt 		if (ret < 0)
313d7c30c68SPaul Mundt 			return ret;
314d7c30c68SPaul Mundt 	} else
315d7c30c68SPaul Mundt 		sq_unmap(base);
316d7c30c68SPaul Mundt 
317d7c30c68SPaul Mundt 	return count;
318d7c30c68SPaul Mundt }
319d7c30c68SPaul Mundt 
320d7c30c68SPaul Mundt static struct sq_sysfs_attr mapping_attr =
321d7c30c68SPaul Mundt 	__ATTR(mapping, 0644, mapping_show, mapping_store);
322d7c30c68SPaul Mundt 
323d7c30c68SPaul Mundt static struct attribute *sq_sysfs_attrs[] = {
324d7c30c68SPaul Mundt 	&mapping_attr.attr,
325d7c30c68SPaul Mundt 	NULL,
326d7c30c68SPaul Mundt };
32799a6a4b3SGreg Kroah-Hartman ATTRIBUTE_GROUPS(sq_sysfs);
328d7c30c68SPaul Mundt 
32952cf25d0SEmese Revfy static const struct sysfs_ops sq_sysfs_ops = {
330d7c30c68SPaul Mundt 	.show	= sq_sysfs_show,
331d7c30c68SPaul Mundt 	.store	= sq_sysfs_store,
332d7c30c68SPaul Mundt };
333d7c30c68SPaul Mundt 
334d7c30c68SPaul Mundt static struct kobj_type ktype_percpu_entry = {
335d7c30c68SPaul Mundt 	.sysfs_ops	= &sq_sysfs_ops,
33699a6a4b3SGreg Kroah-Hartman 	.default_groups	= sq_sysfs_groups,
337d7c30c68SPaul Mundt };
338d7c30c68SPaul Mundt 
sq_dev_add(struct device * dev,struct subsys_interface * sif)33933dc5c10SPaul Mundt static int sq_dev_add(struct device *dev, struct subsys_interface *sif)
340d7c30c68SPaul Mundt {
3418a25a2fdSKay Sievers 	unsigned int cpu = dev->id;
342d7c30c68SPaul Mundt 	struct kobject *kobj;
343d48b3352SGreg Kroah-Hartman 	int error;
344d7c30c68SPaul Mundt 
345d7c30c68SPaul Mundt 	sq_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL);
346d7c30c68SPaul Mundt 	if (unlikely(!sq_kobject[cpu]))
347d7c30c68SPaul Mundt 		return -ENOMEM;
348d7c30c68SPaul Mundt 
349d7c30c68SPaul Mundt 	kobj = sq_kobject[cpu];
3508a25a2fdSKay Sievers 	error = kobject_init_and_add(kobj, &ktype_percpu_entry, &dev->kobj,
351d48b3352SGreg Kroah-Hartman 				     "%s", "sq");
352d48b3352SGreg Kroah-Hartman 	if (!error)
353d48b3352SGreg Kroah-Hartman 		kobject_uevent(kobj, KOBJ_ADD);
354d48b3352SGreg Kroah-Hartman 	return error;
355d7c30c68SPaul Mundt }
356d7c30c68SPaul Mundt 
sq_dev_remove(struct device * dev,struct subsys_interface * sif)35771db87baSViresh Kumar static void sq_dev_remove(struct device *dev, struct subsys_interface *sif)
358d7c30c68SPaul Mundt {
3598a25a2fdSKay Sievers 	unsigned int cpu = dev->id;
360d7c30c68SPaul Mundt 	struct kobject *kobj = sq_kobject[cpu];
361d7c30c68SPaul Mundt 
36238a382aeSGreg Kroah-Hartman 	kobject_put(kobj);
363d7c30c68SPaul Mundt }
364d7c30c68SPaul Mundt 
3658a25a2fdSKay Sievers static struct subsys_interface sq_interface = {
36633dc5c10SPaul Mundt 	.name		= "sq",
3678a25a2fdSKay Sievers 	.subsys		= &cpu_subsys,
3688a25a2fdSKay Sievers 	.add_dev	= sq_dev_add,
36933dc5c10SPaul Mundt 	.remove_dev	= sq_dev_remove,
3701da177e4SLinus Torvalds };
3711da177e4SLinus Torvalds 
sq_api_init(void)3721da177e4SLinus Torvalds static int __init sq_api_init(void)
3731da177e4SLinus Torvalds {
374d7c30c68SPaul Mundt 	unsigned int nr_pages = 0x04000000 >> PAGE_SHIFT;
375d7c30c68SPaul Mundt 	int ret = -ENOMEM;
376d7c30c68SPaul Mundt 
3771da177e4SLinus Torvalds 	printk(KERN_NOTICE "sq: Registering store queue API.\n");
3781da177e4SLinus Torvalds 
379d7c30c68SPaul Mundt 	sq_cache = kmem_cache_create("store_queue_cache",
38020c2df83SPaul Mundt 				sizeof(struct sq_mapping), 0, 0, NULL);
381d7c30c68SPaul Mundt 	if (unlikely(!sq_cache))
382d7c30c68SPaul Mundt 		return ret;
3831da177e4SLinus Torvalds 
384*2d60eca5SChristophe JAILLET 	sq_bitmap = bitmap_zalloc(nr_pages, GFP_KERNEL);
385d7c30c68SPaul Mundt 	if (unlikely(!sq_bitmap))
386d7c30c68SPaul Mundt 		goto out;
387d7c30c68SPaul Mundt 
3888a25a2fdSKay Sievers 	ret = subsys_interface_register(&sq_interface);
389d7c30c68SPaul Mundt 	if (unlikely(ret != 0))
390d7c30c68SPaul Mundt 		goto out;
391d7c30c68SPaul Mundt 
392d7c30c68SPaul Mundt 	return 0;
393d7c30c68SPaul Mundt 
394d7c30c68SPaul Mundt out:
395*2d60eca5SChristophe JAILLET 	bitmap_free(sq_bitmap);
396d7c30c68SPaul Mundt 	kmem_cache_destroy(sq_cache);
397757be186SNeil Horman 
398757be186SNeil Horman 	return ret;
3991da177e4SLinus Torvalds }
4001da177e4SLinus Torvalds 
sq_api_exit(void)4011da177e4SLinus Torvalds static void __exit sq_api_exit(void)
4021da177e4SLinus Torvalds {
4038a25a2fdSKay Sievers 	subsys_interface_unregister(&sq_interface);
404*2d60eca5SChristophe JAILLET 	bitmap_free(sq_bitmap);
405d7c30c68SPaul Mundt 	kmem_cache_destroy(sq_cache);
4061da177e4SLinus Torvalds }
4071da177e4SLinus Torvalds 
4081da177e4SLinus Torvalds module_init(sq_api_init);
4091da177e4SLinus Torvalds module_exit(sq_api_exit);
4101da177e4SLinus Torvalds 
4111da177e4SLinus Torvalds MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, M. R. Brown <mrbrown@0xd6.org>");
4121da177e4SLinus Torvalds MODULE_DESCRIPTION("Simple API for SH-4 integrated Store Queues");
4131da177e4SLinus Torvalds MODULE_LICENSE("GPL");
414