xref: /openbmc/linux/lib/logic_pio.c (revision 26ba4e57)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2017 HiSilicon Limited, All Rights Reserved.
4  * Author: Gabriele Paoloni <gabriele.paoloni@huawei.com>
5  * Author: Zhichang Yuan <yuanzhichang@hisilicon.com>
6  */
7 
8 #define pr_fmt(fmt)	"LOGIC PIO: " fmt
9 
10 #include <linux/of.h>
11 #include <linux/io.h>
12 #include <linux/logic_pio.h>
13 #include <linux/mm.h>
14 #include <linux/rculist.h>
15 #include <linux/sizes.h>
16 #include <linux/slab.h>
17 
18 /* The unique hardware address list */
19 static LIST_HEAD(io_range_list);
20 static DEFINE_MUTEX(io_range_mutex);
21 
22 /* Consider a kernel general helper for this */
23 #define in_range(b, first, len)        ((b) >= (first) && (b) < (first) + (len))
24 
25 /**
26  * logic_pio_register_range - register logical PIO range for a host
27  * @new_range: pointer to the IO range to be registered.
28  *
29  * Returns 0 on success, the error code in case of failure.
30  *
31  * Register a new IO range node in the IO range list.
32  */
33 int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
34 {
35 	struct logic_pio_hwaddr *range;
36 	resource_size_t start;
37 	resource_size_t end;
38 	resource_size_t mmio_end = 0;
39 	resource_size_t iio_sz = MMIO_UPPER_LIMIT;
40 	int ret = 0;
41 
42 	if (!new_range || !new_range->fwnode || !new_range->size)
43 		return -EINVAL;
44 
45 	start = new_range->hw_start;
46 	end = new_range->hw_start + new_range->size;
47 
48 	mutex_lock(&io_range_mutex);
49 	list_for_each_entry(range, &io_range_list, list) {
50 		if (range->fwnode == new_range->fwnode) {
51 			/* range already there */
52 			goto end_register;
53 		}
54 		if (range->flags == LOGIC_PIO_CPU_MMIO &&
55 		    new_range->flags == LOGIC_PIO_CPU_MMIO) {
56 			/* for MMIO ranges we need to check for overlap */
57 			if (start >= range->hw_start + range->size ||
58 			    end < range->hw_start) {
59 				mmio_end = range->io_start + range->size;
60 			} else {
61 				ret = -EFAULT;
62 				goto end_register;
63 			}
64 		} else if (range->flags == LOGIC_PIO_INDIRECT &&
65 			   new_range->flags == LOGIC_PIO_INDIRECT) {
66 			iio_sz += range->size;
67 		}
68 	}
69 
70 	/* range not registered yet, check for available space */
71 	if (new_range->flags == LOGIC_PIO_CPU_MMIO) {
72 		if (mmio_end + new_range->size - 1 > MMIO_UPPER_LIMIT) {
73 			/* if it's too big check if 64K space can be reserved */
74 			if (mmio_end + SZ_64K - 1 > MMIO_UPPER_LIMIT) {
75 				ret = -E2BIG;
76 				goto end_register;
77 			}
78 			new_range->size = SZ_64K;
79 			pr_warn("Requested IO range too big, new size set to 64K\n");
80 		}
81 		new_range->io_start = mmio_end;
82 	} else if (new_range->flags == LOGIC_PIO_INDIRECT) {
83 		if (iio_sz + new_range->size - 1 > IO_SPACE_LIMIT) {
84 			ret = -E2BIG;
85 			goto end_register;
86 		}
87 		new_range->io_start = iio_sz;
88 	} else {
89 		/* invalid flag */
90 		ret = -EINVAL;
91 		goto end_register;
92 	}
93 
94 	list_add_tail_rcu(&new_range->list, &io_range_list);
95 
96 end_register:
97 	mutex_unlock(&io_range_mutex);
98 	return ret;
99 }
100 
101 /**
102  * logic_pio_unregister_range - unregister a logical PIO range for a host
103  * @range: pointer to the IO range which has been already registered.
104  *
105  * Unregister a previously-registered IO range node.
106  */
107 void logic_pio_unregister_range(struct logic_pio_hwaddr *range)
108 {
109 	mutex_lock(&io_range_mutex);
110 	list_del_rcu(&range->list);
111 	mutex_unlock(&io_range_mutex);
112 	synchronize_rcu();
113 }
114 
115 /**
116  * find_io_range_by_fwnode - find logical PIO range for given FW node
117  * @fwnode: FW node handle associated with logical PIO range
118  *
119  * Returns pointer to node on success, NULL otherwise.
120  *
121  * Traverse the io_range_list to find the registered node for @fwnode.
122  */
123 struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode)
124 {
125 	struct logic_pio_hwaddr *range, *found_range = NULL;
126 
127 	rcu_read_lock();
128 	list_for_each_entry_rcu(range, &io_range_list, list) {
129 		if (range->fwnode == fwnode) {
130 			found_range = range;
131 			break;
132 		}
133 	}
134 	rcu_read_unlock();
135 
136 	return found_range;
137 }
138 
139 /* Return a registered range given an input PIO token */
140 static struct logic_pio_hwaddr *find_io_range(unsigned long pio)
141 {
142 	struct logic_pio_hwaddr *range, *found_range = NULL;
143 
144 	rcu_read_lock();
145 	list_for_each_entry_rcu(range, &io_range_list, list) {
146 		if (in_range(pio, range->io_start, range->size)) {
147 			found_range = range;
148 			break;
149 		}
150 	}
151 	rcu_read_unlock();
152 
153 	if (!found_range)
154 		pr_err("PIO entry token 0x%lx invalid\n", pio);
155 
156 	return found_range;
157 }
158 
159 /**
160  * logic_pio_to_hwaddr - translate logical PIO to HW address
161  * @pio: logical PIO value
162  *
163  * Returns HW address if valid, ~0 otherwise.
164  *
165  * Translate the input logical PIO to the corresponding hardware address.
166  * The input PIO should be unique in the whole logical PIO space.
167  */
168 resource_size_t logic_pio_to_hwaddr(unsigned long pio)
169 {
170 	struct logic_pio_hwaddr *range;
171 
172 	range = find_io_range(pio);
173 	if (range)
174 		return range->hw_start + pio - range->io_start;
175 
176 	return (resource_size_t)~0;
177 }
178 
179 /**
180  * logic_pio_trans_hwaddr - translate HW address to logical PIO
181  * @fwnode: FW node reference for the host
182  * @addr: Host-relative HW address
183  * @size: size to translate
184  *
185  * Returns Logical PIO value if successful, ~0UL otherwise
186  */
187 unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode,
188 				     resource_size_t addr, resource_size_t size)
189 {
190 	struct logic_pio_hwaddr *range;
191 
192 	range = find_io_range_by_fwnode(fwnode);
193 	if (!range || range->flags == LOGIC_PIO_CPU_MMIO) {
194 		pr_err("IO range not found or invalid\n");
195 		return ~0UL;
196 	}
197 	if (range->size < size) {
198 		pr_err("resource size %pa cannot fit in IO range size %pa\n",
199 		       &size, &range->size);
200 		return ~0UL;
201 	}
202 	return addr - range->hw_start + range->io_start;
203 }
204 
205 unsigned long logic_pio_trans_cpuaddr(resource_size_t addr)
206 {
207 	struct logic_pio_hwaddr *range;
208 
209 	rcu_read_lock();
210 	list_for_each_entry_rcu(range, &io_range_list, list) {
211 		if (range->flags != LOGIC_PIO_CPU_MMIO)
212 			continue;
213 		if (in_range(addr, range->hw_start, range->size)) {
214 			unsigned long cpuaddr;
215 
216 			cpuaddr = addr - range->hw_start + range->io_start;
217 
218 			rcu_read_unlock();
219 			return cpuaddr;
220 		}
221 	}
222 	rcu_read_unlock();
223 
224 	pr_err("addr %pa not registered in io_range_list\n", &addr);
225 
226 	return ~0UL;
227 }
228 
229 #if defined(CONFIG_INDIRECT_PIO) && defined(PCI_IOBASE)
230 #define BUILD_LOGIC_IO(bw, type)					\
231 type logic_in##bw(unsigned long addr)					\
232 {									\
233 	type ret = (type)~0;						\
234 									\
235 	if (addr < MMIO_UPPER_LIMIT) {					\
236 		ret = read##bw(PCI_IOBASE + addr);			\
237 	} else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
238 		struct logic_pio_hwaddr *entry = find_io_range(addr);	\
239 									\
240 		if (entry && entry->ops)				\
241 			ret = entry->ops->in(entry->hostdata,		\
242 					addr, sizeof(type));		\
243 		else							\
244 			WARN_ON_ONCE(1);				\
245 	}								\
246 	return ret;							\
247 }									\
248 									\
249 void logic_out##bw(type value, unsigned long addr)			\
250 {									\
251 	if (addr < MMIO_UPPER_LIMIT) {					\
252 		write##bw(value, PCI_IOBASE + addr);			\
253 	} else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) {	\
254 		struct logic_pio_hwaddr *entry = find_io_range(addr);	\
255 									\
256 		if (entry && entry->ops)				\
257 			entry->ops->out(entry->hostdata,		\
258 					addr, value, sizeof(type));	\
259 		else							\
260 			WARN_ON_ONCE(1);				\
261 	}								\
262 }									\
263 									\
264 void logic_ins##bw(unsigned long addr, void *buffer,		\
265 		   unsigned int count)					\
266 {									\
267 	if (addr < MMIO_UPPER_LIMIT) {					\
268 		reads##bw(PCI_IOBASE + addr, buffer, count);		\
269 	} else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) {	\
270 		struct logic_pio_hwaddr *entry = find_io_range(addr);	\
271 									\
272 		if (entry && entry->ops)				\
273 			entry->ops->ins(entry->hostdata,		\
274 				addr, buffer, sizeof(type), count);	\
275 		else							\
276 			WARN_ON_ONCE(1);				\
277 	}								\
278 									\
279 }									\
280 									\
281 void logic_outs##bw(unsigned long addr, const void *buffer,		\
282 		    unsigned int count)					\
283 {									\
284 	if (addr < MMIO_UPPER_LIMIT) {					\
285 		writes##bw(PCI_IOBASE + addr, buffer, count);		\
286 	} else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) {	\
287 		struct logic_pio_hwaddr *entry = find_io_range(addr);	\
288 									\
289 		if (entry && entry->ops)				\
290 			entry->ops->outs(entry->hostdata,		\
291 				addr, buffer, sizeof(type), count);	\
292 		else							\
293 			WARN_ON_ONCE(1);				\
294 	}								\
295 }
296 
297 BUILD_LOGIC_IO(b, u8)
298 EXPORT_SYMBOL(logic_inb);
299 EXPORT_SYMBOL(logic_insb);
300 EXPORT_SYMBOL(logic_outb);
301 EXPORT_SYMBOL(logic_outsb);
302 
303 BUILD_LOGIC_IO(w, u16)
304 EXPORT_SYMBOL(logic_inw);
305 EXPORT_SYMBOL(logic_insw);
306 EXPORT_SYMBOL(logic_outw);
307 EXPORT_SYMBOL(logic_outsw);
308 
309 BUILD_LOGIC_IO(l, u32)
310 EXPORT_SYMBOL(logic_inl);
311 EXPORT_SYMBOL(logic_insl);
312 EXPORT_SYMBOL(logic_outl);
313 EXPORT_SYMBOL(logic_outsl);
314 
315 #endif /* CONFIG_INDIRECT_PIO && PCI_IOBASE */
316