xref: /openbmc/linux/lib/logic_pio.c (revision bbb774d9)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2017 HiSilicon Limited, All Rights Reserved.
4  * Author: Gabriele Paoloni <gabriele.paoloni@huawei.com>
5  * Author: Zhichang Yuan <yuanzhichang@hisilicon.com>
6  * Author: John Garry <john.garry@huawei.com>
7  */
8 
9 #define pr_fmt(fmt)	"LOGIC PIO: " fmt
10 
11 #include <linux/of.h>
12 #include <linux/io.h>
13 #include <linux/logic_pio.h>
14 #include <linux/mm.h>
15 #include <linux/rculist.h>
16 #include <linux/sizes.h>
17 #include <linux/slab.h>
18 
19 /* The unique hardware address list */
20 static LIST_HEAD(io_range_list);
21 static DEFINE_MUTEX(io_range_mutex);
22 
23 /* Consider a kernel general helper for this */
24 #define in_range(b, first, len)        ((b) >= (first) && (b) < (first) + (len))
25 
26 /**
27  * logic_pio_register_range - register logical PIO range for a host
28  * @new_range: pointer to the IO range to be registered.
29  *
30  * Returns 0 on success, the error code in case of failure.
31  * If the range already exists, -EEXIST will be returned, which should be
32  * considered a success.
33  *
34  * Register a new IO range node in the IO range list.
35  */
36 int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
37 {
38 	struct logic_pio_hwaddr *range;
39 	resource_size_t start;
40 	resource_size_t end;
41 	resource_size_t mmio_end = 0;
42 	resource_size_t iio_sz = MMIO_UPPER_LIMIT;
43 	int ret = 0;
44 
45 	if (!new_range || !new_range->fwnode || !new_range->size ||
46 	    (new_range->flags == LOGIC_PIO_INDIRECT && !new_range->ops))
47 		return -EINVAL;
48 
49 	start = new_range->hw_start;
50 	end = new_range->hw_start + new_range->size;
51 
52 	mutex_lock(&io_range_mutex);
53 	list_for_each_entry(range, &io_range_list, list) {
54 		if (range->fwnode == new_range->fwnode) {
55 			/* range already there */
56 			ret = -EEXIST;
57 			goto end_register;
58 		}
59 		if (range->flags == LOGIC_PIO_CPU_MMIO &&
60 		    new_range->flags == LOGIC_PIO_CPU_MMIO) {
61 			/* for MMIO ranges we need to check for overlap */
62 			if (start >= range->hw_start + range->size ||
63 			    end < range->hw_start) {
64 				mmio_end = range->io_start + range->size;
65 			} else {
66 				ret = -EFAULT;
67 				goto end_register;
68 			}
69 		} else if (range->flags == LOGIC_PIO_INDIRECT &&
70 			   new_range->flags == LOGIC_PIO_INDIRECT) {
71 			iio_sz += range->size;
72 		}
73 	}
74 
75 	/* range not registered yet, check for available space */
76 	if (new_range->flags == LOGIC_PIO_CPU_MMIO) {
77 		if (mmio_end + new_range->size - 1 > MMIO_UPPER_LIMIT) {
78 			/* if it's too big check if 64K space can be reserved */
79 			if (mmio_end + SZ_64K - 1 > MMIO_UPPER_LIMIT) {
80 				ret = -E2BIG;
81 				goto end_register;
82 			}
83 			new_range->size = SZ_64K;
84 			pr_warn("Requested IO range too big, new size set to 64K\n");
85 		}
86 		new_range->io_start = mmio_end;
87 	} else if (new_range->flags == LOGIC_PIO_INDIRECT) {
88 		if (iio_sz + new_range->size - 1 > IO_SPACE_LIMIT) {
89 			ret = -E2BIG;
90 			goto end_register;
91 		}
92 		new_range->io_start = iio_sz;
93 	} else {
94 		/* invalid flag */
95 		ret = -EINVAL;
96 		goto end_register;
97 	}
98 
99 	list_add_tail_rcu(&new_range->list, &io_range_list);
100 
101 end_register:
102 	mutex_unlock(&io_range_mutex);
103 	return ret;
104 }
105 
106 /**
107  * logic_pio_unregister_range - unregister a logical PIO range for a host
108  * @range: pointer to the IO range which has been already registered.
109  *
110  * Unregister a previously-registered IO range node.
111  */
112 void logic_pio_unregister_range(struct logic_pio_hwaddr *range)
113 {
114 	mutex_lock(&io_range_mutex);
115 	list_del_rcu(&range->list);
116 	mutex_unlock(&io_range_mutex);
117 	synchronize_rcu();
118 }
119 
120 /**
121  * find_io_range_by_fwnode - find logical PIO range for given FW node
122  * @fwnode: FW node handle associated with logical PIO range
123  *
124  * Returns pointer to node on success, NULL otherwise.
125  *
126  * Traverse the io_range_list to find the registered node for @fwnode.
127  */
128 struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode)
129 {
130 	struct logic_pio_hwaddr *range, *found_range = NULL;
131 
132 	rcu_read_lock();
133 	list_for_each_entry_rcu(range, &io_range_list, list) {
134 		if (range->fwnode == fwnode) {
135 			found_range = range;
136 			break;
137 		}
138 	}
139 	rcu_read_unlock();
140 
141 	return found_range;
142 }
143 
144 /* Return a registered range given an input PIO token */
145 static struct logic_pio_hwaddr *find_io_range(unsigned long pio)
146 {
147 	struct logic_pio_hwaddr *range, *found_range = NULL;
148 
149 	rcu_read_lock();
150 	list_for_each_entry_rcu(range, &io_range_list, list) {
151 		if (in_range(pio, range->io_start, range->size)) {
152 			found_range = range;
153 			break;
154 		}
155 	}
156 	rcu_read_unlock();
157 
158 	if (!found_range)
159 		pr_err("PIO entry token 0x%lx invalid\n", pio);
160 
161 	return found_range;
162 }
163 
164 /**
165  * logic_pio_to_hwaddr - translate logical PIO to HW address
166  * @pio: logical PIO value
167  *
168  * Returns HW address if valid, ~0 otherwise.
169  *
170  * Translate the input logical PIO to the corresponding hardware address.
171  * The input PIO should be unique in the whole logical PIO space.
172  */
173 resource_size_t logic_pio_to_hwaddr(unsigned long pio)
174 {
175 	struct logic_pio_hwaddr *range;
176 
177 	range = find_io_range(pio);
178 	if (range)
179 		return range->hw_start + pio - range->io_start;
180 
181 	return (resource_size_t)~0;
182 }
183 
184 /**
185  * logic_pio_trans_hwaddr - translate HW address to logical PIO
186  * @fwnode: FW node reference for the host
187  * @addr: Host-relative HW address
188  * @size: size to translate
189  *
190  * Returns Logical PIO value if successful, ~0UL otherwise
191  */
192 unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode,
193 				     resource_size_t addr, resource_size_t size)
194 {
195 	struct logic_pio_hwaddr *range;
196 
197 	range = find_io_range_by_fwnode(fwnode);
198 	if (!range || range->flags == LOGIC_PIO_CPU_MMIO) {
199 		pr_err("IO range not found or invalid\n");
200 		return ~0UL;
201 	}
202 	if (range->size < size) {
203 		pr_err("resource size %pa cannot fit in IO range size %pa\n",
204 		       &size, &range->size);
205 		return ~0UL;
206 	}
207 	return addr - range->hw_start + range->io_start;
208 }
209 
210 unsigned long logic_pio_trans_cpuaddr(resource_size_t addr)
211 {
212 	struct logic_pio_hwaddr *range;
213 
214 	rcu_read_lock();
215 	list_for_each_entry_rcu(range, &io_range_list, list) {
216 		if (range->flags != LOGIC_PIO_CPU_MMIO)
217 			continue;
218 		if (in_range(addr, range->hw_start, range->size)) {
219 			unsigned long cpuaddr;
220 
221 			cpuaddr = addr - range->hw_start + range->io_start;
222 
223 			rcu_read_unlock();
224 			return cpuaddr;
225 		}
226 	}
227 	rcu_read_unlock();
228 
229 	pr_err("addr %pa not registered in io_range_list\n", &addr);
230 
231 	return ~0UL;
232 }
233 
234 #if defined(CONFIG_INDIRECT_PIO) && defined(PCI_IOBASE)
235 #define BUILD_LOGIC_IO(bwl, type)					\
236 type logic_in##bwl(unsigned long addr)					\
237 {									\
238 	type ret = (type)~0;						\
239 									\
240 	if (addr < MMIO_UPPER_LIMIT) {					\
241 		ret = _in##bwl(addr);					\
242 	} else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
243 		struct logic_pio_hwaddr *entry = find_io_range(addr);	\
244 									\
245 		if (entry)						\
246 			ret = entry->ops->in(entry->hostdata,		\
247 					addr, sizeof(type));		\
248 		else							\
249 			WARN_ON_ONCE(1);				\
250 	}								\
251 	return ret;							\
252 }									\
253 									\
254 void logic_out##bwl(type value, unsigned long addr)			\
255 {									\
256 	if (addr < MMIO_UPPER_LIMIT) {					\
257 		_out##bwl(value, addr);				\
258 	} else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) {	\
259 		struct logic_pio_hwaddr *entry = find_io_range(addr);	\
260 									\
261 		if (entry)						\
262 			entry->ops->out(entry->hostdata,		\
263 					addr, value, sizeof(type));	\
264 		else							\
265 			WARN_ON_ONCE(1);				\
266 	}								\
267 }									\
268 									\
269 void logic_ins##bwl(unsigned long addr, void *buffer,			\
270 		    unsigned int count)					\
271 {									\
272 	if (addr < MMIO_UPPER_LIMIT) {					\
273 		reads##bwl(PCI_IOBASE + addr, buffer, count);		\
274 	} else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) {	\
275 		struct logic_pio_hwaddr *entry = find_io_range(addr);	\
276 									\
277 		if (entry)						\
278 			entry->ops->ins(entry->hostdata,		\
279 				addr, buffer, sizeof(type), count);	\
280 		else							\
281 			WARN_ON_ONCE(1);				\
282 	}								\
283 									\
284 }									\
285 									\
286 void logic_outs##bwl(unsigned long addr, const void *buffer,		\
287 		     unsigned int count)				\
288 {									\
289 	if (addr < MMIO_UPPER_LIMIT) {					\
290 		writes##bwl(PCI_IOBASE + addr, buffer, count);		\
291 	} else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) {	\
292 		struct logic_pio_hwaddr *entry = find_io_range(addr);	\
293 									\
294 		if (entry)						\
295 			entry->ops->outs(entry->hostdata,		\
296 				addr, buffer, sizeof(type), count);	\
297 		else							\
298 			WARN_ON_ONCE(1);				\
299 	}								\
300 }
301 
302 BUILD_LOGIC_IO(b, u8)
303 EXPORT_SYMBOL(logic_inb);
304 EXPORT_SYMBOL(logic_insb);
305 EXPORT_SYMBOL(logic_outb);
306 EXPORT_SYMBOL(logic_outsb);
307 
308 BUILD_LOGIC_IO(w, u16)
309 EXPORT_SYMBOL(logic_inw);
310 EXPORT_SYMBOL(logic_insw);
311 EXPORT_SYMBOL(logic_outw);
312 EXPORT_SYMBOL(logic_outsw);
313 
314 BUILD_LOGIC_IO(l, u32)
315 EXPORT_SYMBOL(logic_inl);
316 EXPORT_SYMBOL(logic_insl);
317 EXPORT_SYMBOL(logic_outl);
318 EXPORT_SYMBOL(logic_outsl);
319 
320 #endif /* CONFIG_INDIRECT_PIO && PCI_IOBASE */
321