xref: /openbmc/linux/drivers/infiniband/core/cache.c (revision 64c70b1c)
1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Intel Corporation. All rights reserved.
4  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5  * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  *
35  * $Id: cache.c 1349 2004-12-16 21:09:43Z roland $
36  */
37 
38 #include <linux/module.h>
39 #include <linux/errno.h>
40 #include <linux/slab.h>
41 #include <linux/workqueue.h>
42 
43 #include <rdma/ib_cache.h>
44 
45 #include "core_priv.h"
46 
47 struct ib_pkey_cache {
48 	int             table_len;
49 	u16             table[0];
50 };
51 
52 struct ib_gid_cache {
53 	int             table_len;
54 	union ib_gid    table[0];
55 };
56 
57 struct ib_update_work {
58 	struct work_struct work;
59 	struct ib_device  *device;
60 	u8                 port_num;
61 };
62 
63 static inline int start_port(struct ib_device *device)
64 {
65 	return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1;
66 }
67 
68 static inline int end_port(struct ib_device *device)
69 {
70 	return (device->node_type == RDMA_NODE_IB_SWITCH) ?
71 		0 : device->phys_port_cnt;
72 }
73 
74 int ib_get_cached_gid(struct ib_device *device,
75 		      u8                port_num,
76 		      int               index,
77 		      union ib_gid     *gid)
78 {
79 	struct ib_gid_cache *cache;
80 	unsigned long flags;
81 	int ret = 0;
82 
83 	if (port_num < start_port(device) || port_num > end_port(device))
84 		return -EINVAL;
85 
86 	read_lock_irqsave(&device->cache.lock, flags);
87 
88 	cache = device->cache.gid_cache[port_num - start_port(device)];
89 
90 	if (index < 0 || index >= cache->table_len)
91 		ret = -EINVAL;
92 	else
93 		*gid = cache->table[index];
94 
95 	read_unlock_irqrestore(&device->cache.lock, flags);
96 
97 	return ret;
98 }
99 EXPORT_SYMBOL(ib_get_cached_gid);
100 
101 int ib_find_cached_gid(struct ib_device *device,
102 		       union ib_gid	*gid,
103 		       u8               *port_num,
104 		       u16              *index)
105 {
106 	struct ib_gid_cache *cache;
107 	unsigned long flags;
108 	int p, i;
109 	int ret = -ENOENT;
110 
111 	*port_num = -1;
112 	if (index)
113 		*index = -1;
114 
115 	read_lock_irqsave(&device->cache.lock, flags);
116 
117 	for (p = 0; p <= end_port(device) - start_port(device); ++p) {
118 		cache = device->cache.gid_cache[p];
119 		for (i = 0; i < cache->table_len; ++i) {
120 			if (!memcmp(gid, &cache->table[i], sizeof *gid)) {
121 				*port_num = p + start_port(device);
122 				if (index)
123 					*index = i;
124 				ret = 0;
125 				goto found;
126 			}
127 		}
128 	}
129 found:
130 	read_unlock_irqrestore(&device->cache.lock, flags);
131 
132 	return ret;
133 }
134 EXPORT_SYMBOL(ib_find_cached_gid);
135 
136 int ib_get_cached_pkey(struct ib_device *device,
137 		       u8                port_num,
138 		       int               index,
139 		       u16              *pkey)
140 {
141 	struct ib_pkey_cache *cache;
142 	unsigned long flags;
143 	int ret = 0;
144 
145 	if (port_num < start_port(device) || port_num > end_port(device))
146 		return -EINVAL;
147 
148 	read_lock_irqsave(&device->cache.lock, flags);
149 
150 	cache = device->cache.pkey_cache[port_num - start_port(device)];
151 
152 	if (index < 0 || index >= cache->table_len)
153 		ret = -EINVAL;
154 	else
155 		*pkey = cache->table[index];
156 
157 	read_unlock_irqrestore(&device->cache.lock, flags);
158 
159 	return ret;
160 }
161 EXPORT_SYMBOL(ib_get_cached_pkey);
162 
163 int ib_find_cached_pkey(struct ib_device *device,
164 			u8                port_num,
165 			u16               pkey,
166 			u16              *index)
167 {
168 	struct ib_pkey_cache *cache;
169 	unsigned long flags;
170 	int i;
171 	int ret = -ENOENT;
172 
173 	if (port_num < start_port(device) || port_num > end_port(device))
174 		return -EINVAL;
175 
176 	read_lock_irqsave(&device->cache.lock, flags);
177 
178 	cache = device->cache.pkey_cache[port_num - start_port(device)];
179 
180 	*index = -1;
181 
182 	for (i = 0; i < cache->table_len; ++i)
183 		if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
184 			*index = i;
185 			ret = 0;
186 			break;
187 		}
188 
189 	read_unlock_irqrestore(&device->cache.lock, flags);
190 
191 	return ret;
192 }
193 EXPORT_SYMBOL(ib_find_cached_pkey);
194 
195 int ib_get_cached_lmc(struct ib_device *device,
196 		      u8                port_num,
197 		      u8                *lmc)
198 {
199 	unsigned long flags;
200 	int ret = 0;
201 
202 	if (port_num < start_port(device) || port_num > end_port(device))
203 		return -EINVAL;
204 
205 	read_lock_irqsave(&device->cache.lock, flags);
206 	*lmc = device->cache.lmc_cache[port_num - start_port(device)];
207 	read_unlock_irqrestore(&device->cache.lock, flags);
208 
209 	return ret;
210 }
211 EXPORT_SYMBOL(ib_get_cached_lmc);
212 
213 static void ib_cache_update(struct ib_device *device,
214 			    u8                port)
215 {
216 	struct ib_port_attr       *tprops = NULL;
217 	struct ib_pkey_cache      *pkey_cache = NULL, *old_pkey_cache;
218 	struct ib_gid_cache       *gid_cache = NULL, *old_gid_cache;
219 	int                        i;
220 	int                        ret;
221 
222 	tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
223 	if (!tprops)
224 		return;
225 
226 	ret = ib_query_port(device, port, tprops);
227 	if (ret) {
228 		printk(KERN_WARNING "ib_query_port failed (%d) for %s\n",
229 		       ret, device->name);
230 		goto err;
231 	}
232 
233 	pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
234 			     sizeof *pkey_cache->table, GFP_KERNEL);
235 	if (!pkey_cache)
236 		goto err;
237 
238 	pkey_cache->table_len = tprops->pkey_tbl_len;
239 
240 	gid_cache = kmalloc(sizeof *gid_cache + tprops->gid_tbl_len *
241 			    sizeof *gid_cache->table, GFP_KERNEL);
242 	if (!gid_cache)
243 		goto err;
244 
245 	gid_cache->table_len = tprops->gid_tbl_len;
246 
247 	for (i = 0; i < pkey_cache->table_len; ++i) {
248 		ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
249 		if (ret) {
250 			printk(KERN_WARNING "ib_query_pkey failed (%d) for %s (index %d)\n",
251 			       ret, device->name, i);
252 			goto err;
253 		}
254 	}
255 
256 	for (i = 0; i < gid_cache->table_len; ++i) {
257 		ret = ib_query_gid(device, port, i, gid_cache->table + i);
258 		if (ret) {
259 			printk(KERN_WARNING "ib_query_gid failed (%d) for %s (index %d)\n",
260 			       ret, device->name, i);
261 			goto err;
262 		}
263 	}
264 
265 	write_lock_irq(&device->cache.lock);
266 
267 	old_pkey_cache = device->cache.pkey_cache[port - start_port(device)];
268 	old_gid_cache  = device->cache.gid_cache [port - start_port(device)];
269 
270 	device->cache.pkey_cache[port - start_port(device)] = pkey_cache;
271 	device->cache.gid_cache [port - start_port(device)] = gid_cache;
272 
273 	device->cache.lmc_cache[port - start_port(device)] = tprops->lmc;
274 
275 	write_unlock_irq(&device->cache.lock);
276 
277 	kfree(old_pkey_cache);
278 	kfree(old_gid_cache);
279 	kfree(tprops);
280 	return;
281 
282 err:
283 	kfree(pkey_cache);
284 	kfree(gid_cache);
285 	kfree(tprops);
286 }
287 
288 static void ib_cache_task(struct work_struct *_work)
289 {
290 	struct ib_update_work *work =
291 		container_of(_work, struct ib_update_work, work);
292 
293 	ib_cache_update(work->device, work->port_num);
294 	kfree(work);
295 }
296 
297 static void ib_cache_event(struct ib_event_handler *handler,
298 			   struct ib_event *event)
299 {
300 	struct ib_update_work *work;
301 
302 	if (event->event == IB_EVENT_PORT_ERR    ||
303 	    event->event == IB_EVENT_PORT_ACTIVE ||
304 	    event->event == IB_EVENT_LID_CHANGE  ||
305 	    event->event == IB_EVENT_PKEY_CHANGE ||
306 	    event->event == IB_EVENT_SM_CHANGE   ||
307 	    event->event == IB_EVENT_CLIENT_REREGISTER) {
308 		work = kmalloc(sizeof *work, GFP_ATOMIC);
309 		if (work) {
310 			INIT_WORK(&work->work, ib_cache_task);
311 			work->device   = event->device;
312 			work->port_num = event->element.port_num;
313 			schedule_work(&work->work);
314 		}
315 	}
316 }
317 
318 static void ib_cache_setup_one(struct ib_device *device)
319 {
320 	int p;
321 
322 	rwlock_init(&device->cache.lock);
323 
324 	device->cache.pkey_cache =
325 		kmalloc(sizeof *device->cache.pkey_cache *
326 			(end_port(device) - start_port(device) + 1), GFP_KERNEL);
327 	device->cache.gid_cache =
328 		kmalloc(sizeof *device->cache.gid_cache *
329 			(end_port(device) - start_port(device) + 1), GFP_KERNEL);
330 
331 	device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache *
332 					  (end_port(device) -
333 					   start_port(device) + 1),
334 					  GFP_KERNEL);
335 
336 	if (!device->cache.pkey_cache || !device->cache.gid_cache ||
337 	    !device->cache.lmc_cache) {
338 		printk(KERN_WARNING "Couldn't allocate cache "
339 		       "for %s\n", device->name);
340 		goto err;
341 	}
342 
343 	for (p = 0; p <= end_port(device) - start_port(device); ++p) {
344 		device->cache.pkey_cache[p] = NULL;
345 		device->cache.gid_cache [p] = NULL;
346 		ib_cache_update(device, p + start_port(device));
347 	}
348 
349 	INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
350 			      device, ib_cache_event);
351 	if (ib_register_event_handler(&device->cache.event_handler))
352 		goto err_cache;
353 
354 	return;
355 
356 err_cache:
357 	for (p = 0; p <= end_port(device) - start_port(device); ++p) {
358 		kfree(device->cache.pkey_cache[p]);
359 		kfree(device->cache.gid_cache[p]);
360 	}
361 
362 err:
363 	kfree(device->cache.pkey_cache);
364 	kfree(device->cache.gid_cache);
365 	kfree(device->cache.lmc_cache);
366 }
367 
368 static void ib_cache_cleanup_one(struct ib_device *device)
369 {
370 	int p;
371 
372 	ib_unregister_event_handler(&device->cache.event_handler);
373 	flush_scheduled_work();
374 
375 	for (p = 0; p <= end_port(device) - start_port(device); ++p) {
376 		kfree(device->cache.pkey_cache[p]);
377 		kfree(device->cache.gid_cache[p]);
378 	}
379 
380 	kfree(device->cache.pkey_cache);
381 	kfree(device->cache.gid_cache);
382 	kfree(device->cache.lmc_cache);
383 }
384 
385 static struct ib_client cache_client = {
386 	.name   = "cache",
387 	.add    = ib_cache_setup_one,
388 	.remove = ib_cache_cleanup_one
389 };
390 
391 int __init ib_cache_setup(void)
392 {
393 	return ib_register_client(&cache_client);
394 }
395 
396 void __exit ib_cache_cleanup(void)
397 {
398 	ib_unregister_client(&cache_client);
399 }
400