1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Intel Corporation. All rights reserved. 4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/module.h> 37 #include <linux/errno.h> 38 #include <linux/slab.h> 39 #include <linux/workqueue.h> 40 41 #include <rdma/ib_cache.h> 42 43 #include "core_priv.h" 44 45 struct ib_pkey_cache { 46 int table_len; 47 u16 table[0]; 48 }; 49 50 struct ib_gid_cache { 51 int table_len; 52 union ib_gid table[0]; 53 }; 54 55 struct ib_update_work { 56 struct work_struct work; 57 struct ib_device *device; 58 u8 port_num; 59 }; 60 61 int ib_get_cached_gid(struct ib_device *device, 62 u8 port_num, 63 int index, 64 union ib_gid *gid) 65 { 66 struct ib_gid_cache *cache; 67 unsigned long flags; 68 int ret = 0; 69 70 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) 71 return -EINVAL; 72 73 read_lock_irqsave(&device->cache.lock, flags); 74 75 cache = device->cache.gid_cache[port_num - rdma_start_port(device)]; 76 77 if (index < 0 || index >= cache->table_len) 78 ret = -EINVAL; 79 else 80 *gid = cache->table[index]; 81 82 read_unlock_irqrestore(&device->cache.lock, flags); 83 84 return ret; 85 } 86 EXPORT_SYMBOL(ib_get_cached_gid); 87 88 int ib_find_cached_gid(struct ib_device *device, 89 const union ib_gid *gid, 90 u8 *port_num, 91 u16 *index) 92 { 93 struct ib_gid_cache *cache; 94 unsigned long flags; 95 int p, i; 96 int ret = -ENOENT; 97 98 *port_num = -1; 99 if (index) 100 *index = -1; 101 102 read_lock_irqsave(&device->cache.lock, flags); 103 104 for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) { 105 cache = device->cache.gid_cache[p]; 106 for (i = 0; i < cache->table_len; ++i) { 107 if (!memcmp(gid, &cache->table[i], sizeof *gid)) { 108 *port_num = p + rdma_start_port(device); 109 if (index) 110 *index = i; 111 ret = 0; 112 goto found; 113 } 114 } 115 } 116 found: 117 read_unlock_irqrestore(&device->cache.lock, flags); 118 119 return ret; 120 } 121 EXPORT_SYMBOL(ib_find_cached_gid); 122 123 int ib_get_cached_pkey(struct ib_device *device, 124 u8 port_num, 125 int index, 126 u16 *pkey) 127 { 128 struct ib_pkey_cache *cache; 129 unsigned long flags; 130 int ret = 0; 131 132 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) 133 return -EINVAL; 134 135 read_lock_irqsave(&device->cache.lock, flags); 136 137 cache = device->cache.pkey_cache[port_num - rdma_start_port(device)]; 138 139 if (index < 0 || index >= cache->table_len) 140 ret = -EINVAL; 141 else 142 *pkey = cache->table[index]; 143 144 read_unlock_irqrestore(&device->cache.lock, flags); 145 146 return ret; 147 } 148 EXPORT_SYMBOL(ib_get_cached_pkey); 149 150 int ib_find_cached_pkey(struct ib_device *device, 151 u8 port_num, 152 u16 pkey, 153 u16 *index) 154 { 155 struct ib_pkey_cache *cache; 156 unsigned long flags; 157 int i; 158 int ret = -ENOENT; 159 int partial_ix = -1; 160 161 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) 162 return -EINVAL; 163 164 read_lock_irqsave(&device->cache.lock, flags); 165 166 cache = device->cache.pkey_cache[port_num - rdma_start_port(device)]; 167 168 *index = -1; 169 170 for (i = 0; i < cache->table_len; ++i) 171 if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) { 172 if (cache->table[i] & 0x8000) { 173 *index = i; 174 ret = 0; 175 break; 176 } else 177 partial_ix = i; 178 } 179 180 if (ret && partial_ix >= 0) { 181 *index = partial_ix; 182 ret = 0; 183 } 184 185 read_unlock_irqrestore(&device->cache.lock, flags); 186 187 return ret; 188 } 189 EXPORT_SYMBOL(ib_find_cached_pkey); 190 191 int ib_find_exact_cached_pkey(struct ib_device *device, 192 u8 port_num, 193 u16 pkey, 194 u16 *index) 195 { 196 struct ib_pkey_cache *cache; 197 unsigned long flags; 198 int i; 199 int ret = -ENOENT; 200 201 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) 202 return -EINVAL; 203 204 read_lock_irqsave(&device->cache.lock, flags); 205 206 cache = device->cache.pkey_cache[port_num - rdma_start_port(device)]; 207 208 *index = -1; 209 210 for (i = 0; i < cache->table_len; ++i) 211 if (cache->table[i] == pkey) { 212 *index = i; 213 ret = 0; 214 break; 215 } 216 217 read_unlock_irqrestore(&device->cache.lock, flags); 218 219 return ret; 220 } 221 EXPORT_SYMBOL(ib_find_exact_cached_pkey); 222 223 int ib_get_cached_lmc(struct ib_device *device, 224 u8 port_num, 225 u8 *lmc) 226 { 227 unsigned long flags; 228 int ret = 0; 229 230 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) 231 return -EINVAL; 232 233 read_lock_irqsave(&device->cache.lock, flags); 234 *lmc = device->cache.lmc_cache[port_num - rdma_start_port(device)]; 235 read_unlock_irqrestore(&device->cache.lock, flags); 236 237 return ret; 238 } 239 EXPORT_SYMBOL(ib_get_cached_lmc); 240 241 static void ib_cache_update(struct ib_device *device, 242 u8 port) 243 { 244 struct ib_port_attr *tprops = NULL; 245 struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache; 246 struct ib_gid_cache *gid_cache = NULL, *old_gid_cache; 247 int i; 248 int ret; 249 250 tprops = kmalloc(sizeof *tprops, GFP_KERNEL); 251 if (!tprops) 252 return; 253 254 ret = ib_query_port(device, port, tprops); 255 if (ret) { 256 printk(KERN_WARNING "ib_query_port failed (%d) for %s\n", 257 ret, device->name); 258 goto err; 259 } 260 261 pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len * 262 sizeof *pkey_cache->table, GFP_KERNEL); 263 if (!pkey_cache) 264 goto err; 265 266 pkey_cache->table_len = tprops->pkey_tbl_len; 267 268 gid_cache = kmalloc(sizeof *gid_cache + tprops->gid_tbl_len * 269 sizeof *gid_cache->table, GFP_KERNEL); 270 if (!gid_cache) 271 goto err; 272 273 gid_cache->table_len = tprops->gid_tbl_len; 274 275 for (i = 0; i < pkey_cache->table_len; ++i) { 276 ret = ib_query_pkey(device, port, i, pkey_cache->table + i); 277 if (ret) { 278 printk(KERN_WARNING "ib_query_pkey failed (%d) for %s (index %d)\n", 279 ret, device->name, i); 280 goto err; 281 } 282 } 283 284 for (i = 0; i < gid_cache->table_len; ++i) { 285 ret = ib_query_gid(device, port, i, gid_cache->table + i); 286 if (ret) { 287 printk(KERN_WARNING "ib_query_gid failed (%d) for %s (index %d)\n", 288 ret, device->name, i); 289 goto err; 290 } 291 } 292 293 write_lock_irq(&device->cache.lock); 294 295 old_pkey_cache = device->cache.pkey_cache[port - rdma_start_port(device)]; 296 old_gid_cache = device->cache.gid_cache [port - rdma_start_port(device)]; 297 298 device->cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache; 299 device->cache.gid_cache [port - rdma_start_port(device)] = gid_cache; 300 301 device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc; 302 303 write_unlock_irq(&device->cache.lock); 304 305 kfree(old_pkey_cache); 306 kfree(old_gid_cache); 307 kfree(tprops); 308 return; 309 310 err: 311 kfree(pkey_cache); 312 kfree(gid_cache); 313 kfree(tprops); 314 } 315 316 static void ib_cache_task(struct work_struct *_work) 317 { 318 struct ib_update_work *work = 319 container_of(_work, struct ib_update_work, work); 320 321 ib_cache_update(work->device, work->port_num); 322 kfree(work); 323 } 324 325 static void ib_cache_event(struct ib_event_handler *handler, 326 struct ib_event *event) 327 { 328 struct ib_update_work *work; 329 330 if (event->event == IB_EVENT_PORT_ERR || 331 event->event == IB_EVENT_PORT_ACTIVE || 332 event->event == IB_EVENT_LID_CHANGE || 333 event->event == IB_EVENT_PKEY_CHANGE || 334 event->event == IB_EVENT_SM_CHANGE || 335 event->event == IB_EVENT_CLIENT_REREGISTER || 336 event->event == IB_EVENT_GID_CHANGE) { 337 work = kmalloc(sizeof *work, GFP_ATOMIC); 338 if (work) { 339 INIT_WORK(&work->work, ib_cache_task); 340 work->device = event->device; 341 work->port_num = event->element.port_num; 342 queue_work(ib_wq, &work->work); 343 } 344 } 345 } 346 347 static void ib_cache_setup_one(struct ib_device *device) 348 { 349 int p; 350 351 rwlock_init(&device->cache.lock); 352 353 device->cache.pkey_cache = 354 kmalloc(sizeof *device->cache.pkey_cache * 355 (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL); 356 device->cache.gid_cache = 357 kmalloc(sizeof *device->cache.gid_cache * 358 (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL); 359 360 device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache * 361 (rdma_end_port(device) - 362 rdma_start_port(device) + 1), 363 GFP_KERNEL); 364 365 if (!device->cache.pkey_cache || !device->cache.gid_cache || 366 !device->cache.lmc_cache) { 367 printk(KERN_WARNING "Couldn't allocate cache " 368 "for %s\n", device->name); 369 goto err; 370 } 371 372 for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) { 373 device->cache.pkey_cache[p] = NULL; 374 device->cache.gid_cache [p] = NULL; 375 ib_cache_update(device, p + rdma_start_port(device)); 376 } 377 378 INIT_IB_EVENT_HANDLER(&device->cache.event_handler, 379 device, ib_cache_event); 380 if (ib_register_event_handler(&device->cache.event_handler)) 381 goto err_cache; 382 383 return; 384 385 err_cache: 386 for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) { 387 kfree(device->cache.pkey_cache[p]); 388 kfree(device->cache.gid_cache[p]); 389 } 390 391 err: 392 kfree(device->cache.pkey_cache); 393 kfree(device->cache.gid_cache); 394 kfree(device->cache.lmc_cache); 395 } 396 397 static void ib_cache_cleanup_one(struct ib_device *device) 398 { 399 int p; 400 401 ib_unregister_event_handler(&device->cache.event_handler); 402 flush_workqueue(ib_wq); 403 404 for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) { 405 kfree(device->cache.pkey_cache[p]); 406 kfree(device->cache.gid_cache[p]); 407 } 408 409 kfree(device->cache.pkey_cache); 410 kfree(device->cache.gid_cache); 411 kfree(device->cache.lmc_cache); 412 } 413 414 static struct ib_client cache_client = { 415 .name = "cache", 416 .add = ib_cache_setup_one, 417 .remove = ib_cache_cleanup_one 418 }; 419 420 int __init ib_cache_setup(void) 421 { 422 return ib_register_client(&cache_client); 423 } 424 425 void __exit ib_cache_cleanup(void) 426 { 427 ib_unregister_client(&cache_client); 428 } 429