1 /* 2 * Copyright (c) 2014 Chelsio, Inc. All rights reserved. 3 * Copyright (c) 2014 Intel Corporation. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include "iwpm_util.h" 35 36 #define IWPM_HASH_BUCKET_SIZE 512 37 #define IWPM_HASH_BUCKET_MASK (IWPM_HASH_BUCKET_SIZE - 1) 38 39 static LIST_HEAD(iwpm_nlmsg_req_list); 40 static DEFINE_SPINLOCK(iwpm_nlmsg_req_lock); 41 42 static struct hlist_head *iwpm_hash_bucket; 43 static DEFINE_SPINLOCK(iwpm_mapinfo_lock); 44 45 static DEFINE_MUTEX(iwpm_admin_lock); 46 static struct iwpm_admin_data iwpm_admin; 47 48 int iwpm_init(u8 nl_client) 49 { 50 if (iwpm_valid_client(nl_client)) 51 return -EINVAL; 52 mutex_lock(&iwpm_admin_lock); 53 if (atomic_read(&iwpm_admin.refcount) == 0) { 54 iwpm_hash_bucket = kzalloc(IWPM_HASH_BUCKET_SIZE * 55 sizeof(struct hlist_head), GFP_KERNEL); 56 if (!iwpm_hash_bucket) { 57 mutex_unlock(&iwpm_admin_lock); 58 pr_err("%s Unable to create mapinfo hash table\n", __func__); 59 return -ENOMEM; 60 } 61 } 62 atomic_inc(&iwpm_admin.refcount); 63 mutex_unlock(&iwpm_admin_lock); 64 iwpm_set_valid(nl_client, 1); 65 return 0; 66 } 67 EXPORT_SYMBOL(iwpm_init); 68 69 static void free_hash_bucket(void); 70 71 int iwpm_exit(u8 nl_client) 72 { 73 74 if (!iwpm_valid_client(nl_client)) 75 return -EINVAL; 76 mutex_lock(&iwpm_admin_lock); 77 if (atomic_read(&iwpm_admin.refcount) == 0) { 78 mutex_unlock(&iwpm_admin_lock); 79 pr_err("%s Incorrect usage - negative refcount\n", __func__); 80 return -EINVAL; 81 } 82 if (atomic_dec_and_test(&iwpm_admin.refcount)) { 83 free_hash_bucket(); 84 pr_debug("%s: Mapinfo hash table is destroyed\n", __func__); 85 } 86 mutex_unlock(&iwpm_admin_lock); 87 iwpm_set_valid(nl_client, 0); 88 return 0; 89 } 90 EXPORT_SYMBOL(iwpm_exit); 91 92 static struct hlist_head *get_hash_bucket_head(struct sockaddr_storage *, 93 struct sockaddr_storage *); 94 95 int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr, 96 struct sockaddr_storage *mapped_sockaddr, 97 u8 nl_client) 98 { 99 struct hlist_head *hash_bucket_head; 100 struct iwpm_mapping_info *map_info; 101 unsigned long flags; 102 103 if (!iwpm_valid_client(nl_client)) 104 return -EINVAL; 105 map_info = kzalloc(sizeof(struct iwpm_mapping_info), GFP_KERNEL); 106 if (!map_info) { 107 pr_err("%s: Unable to allocate a mapping info\n", __func__); 108 return -ENOMEM; 109 } 110 memcpy(&map_info->local_sockaddr, local_sockaddr, 111 sizeof(struct sockaddr_storage)); 112 memcpy(&map_info->mapped_sockaddr, mapped_sockaddr, 113 sizeof(struct sockaddr_storage)); 114 map_info->nl_client = nl_client; 115 116 spin_lock_irqsave(&iwpm_mapinfo_lock, flags); 117 if (iwpm_hash_bucket) { 118 hash_bucket_head = get_hash_bucket_head( 119 &map_info->local_sockaddr, 120 &map_info->mapped_sockaddr); 121 hlist_add_head(&map_info->hlist_node, hash_bucket_head); 122 } 123 spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); 124 return 0; 125 } 126 EXPORT_SYMBOL(iwpm_create_mapinfo); 127 128 int iwpm_remove_mapinfo(struct sockaddr_storage *local_sockaddr, 129 struct sockaddr_storage *mapped_local_addr) 130 { 131 struct hlist_node *tmp_hlist_node; 132 struct hlist_head *hash_bucket_head; 133 struct iwpm_mapping_info *map_info = NULL; 134 unsigned long flags; 135 int ret = -EINVAL; 136 137 spin_lock_irqsave(&iwpm_mapinfo_lock, flags); 138 if (iwpm_hash_bucket) { 139 hash_bucket_head = get_hash_bucket_head( 140 local_sockaddr, 141 mapped_local_addr); 142 hlist_for_each_entry_safe(map_info, tmp_hlist_node, 143 hash_bucket_head, hlist_node) { 144 145 if (!iwpm_compare_sockaddr(&map_info->mapped_sockaddr, 146 mapped_local_addr)) { 147 148 hlist_del_init(&map_info->hlist_node); 149 kfree(map_info); 150 ret = 0; 151 break; 152 } 153 } 154 } 155 spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); 156 return ret; 157 } 158 EXPORT_SYMBOL(iwpm_remove_mapinfo); 159 160 static void free_hash_bucket(void) 161 { 162 struct hlist_node *tmp_hlist_node; 163 struct iwpm_mapping_info *map_info; 164 unsigned long flags; 165 int i; 166 167 /* remove all the mapinfo data from the list */ 168 spin_lock_irqsave(&iwpm_mapinfo_lock, flags); 169 for (i = 0; i < IWPM_HASH_BUCKET_SIZE; i++) { 170 hlist_for_each_entry_safe(map_info, tmp_hlist_node, 171 &iwpm_hash_bucket[i], hlist_node) { 172 173 hlist_del_init(&map_info->hlist_node); 174 kfree(map_info); 175 } 176 } 177 /* free the hash list */ 178 kfree(iwpm_hash_bucket); 179 iwpm_hash_bucket = NULL; 180 spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); 181 } 182 183 struct iwpm_nlmsg_request *iwpm_get_nlmsg_request(__u32 nlmsg_seq, 184 u8 nl_client, gfp_t gfp) 185 { 186 struct iwpm_nlmsg_request *nlmsg_request = NULL; 187 unsigned long flags; 188 189 nlmsg_request = kzalloc(sizeof(struct iwpm_nlmsg_request), gfp); 190 if (!nlmsg_request) { 191 pr_err("%s Unable to allocate a nlmsg_request\n", __func__); 192 return NULL; 193 } 194 spin_lock_irqsave(&iwpm_nlmsg_req_lock, flags); 195 list_add_tail(&nlmsg_request->inprocess_list, &iwpm_nlmsg_req_list); 196 spin_unlock_irqrestore(&iwpm_nlmsg_req_lock, flags); 197 198 kref_init(&nlmsg_request->kref); 199 kref_get(&nlmsg_request->kref); 200 nlmsg_request->nlmsg_seq = nlmsg_seq; 201 nlmsg_request->nl_client = nl_client; 202 nlmsg_request->request_done = 0; 203 nlmsg_request->err_code = 0; 204 return nlmsg_request; 205 } 206 207 void iwpm_free_nlmsg_request(struct kref *kref) 208 { 209 struct iwpm_nlmsg_request *nlmsg_request; 210 unsigned long flags; 211 212 nlmsg_request = container_of(kref, struct iwpm_nlmsg_request, kref); 213 214 spin_lock_irqsave(&iwpm_nlmsg_req_lock, flags); 215 list_del_init(&nlmsg_request->inprocess_list); 216 spin_unlock_irqrestore(&iwpm_nlmsg_req_lock, flags); 217 218 if (!nlmsg_request->request_done) 219 pr_debug("%s Freeing incomplete nlmsg request (seq = %u).\n", 220 __func__, nlmsg_request->nlmsg_seq); 221 kfree(nlmsg_request); 222 } 223 224 struct iwpm_nlmsg_request *iwpm_find_nlmsg_request(__u32 echo_seq) 225 { 226 struct iwpm_nlmsg_request *nlmsg_request; 227 struct iwpm_nlmsg_request *found_request = NULL; 228 unsigned long flags; 229 230 spin_lock_irqsave(&iwpm_nlmsg_req_lock, flags); 231 list_for_each_entry(nlmsg_request, &iwpm_nlmsg_req_list, 232 inprocess_list) { 233 if (nlmsg_request->nlmsg_seq == echo_seq) { 234 found_request = nlmsg_request; 235 kref_get(&nlmsg_request->kref); 236 break; 237 } 238 } 239 spin_unlock_irqrestore(&iwpm_nlmsg_req_lock, flags); 240 return found_request; 241 } 242 243 int iwpm_wait_complete_req(struct iwpm_nlmsg_request *nlmsg_request) 244 { 245 int ret; 246 init_waitqueue_head(&nlmsg_request->waitq); 247 248 ret = wait_event_timeout(nlmsg_request->waitq, 249 (nlmsg_request->request_done != 0), IWPM_NL_TIMEOUT); 250 if (!ret) { 251 ret = -EINVAL; 252 pr_info("%s: Timeout %d sec for netlink request (seq = %u)\n", 253 __func__, (IWPM_NL_TIMEOUT/HZ), nlmsg_request->nlmsg_seq); 254 } else { 255 ret = nlmsg_request->err_code; 256 } 257 kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request); 258 return ret; 259 } 260 261 int iwpm_get_nlmsg_seq(void) 262 { 263 return atomic_inc_return(&iwpm_admin.nlmsg_seq); 264 } 265 266 int iwpm_valid_client(u8 nl_client) 267 { 268 if (nl_client >= RDMA_NL_NUM_CLIENTS) 269 return 0; 270 return iwpm_admin.client_list[nl_client]; 271 } 272 273 void iwpm_set_valid(u8 nl_client, int valid) 274 { 275 if (nl_client >= RDMA_NL_NUM_CLIENTS) 276 return; 277 iwpm_admin.client_list[nl_client] = valid; 278 } 279 280 /* valid client */ 281 int iwpm_registered_client(u8 nl_client) 282 { 283 return iwpm_admin.reg_list[nl_client]; 284 } 285 286 /* valid client */ 287 void iwpm_set_registered(u8 nl_client, int reg) 288 { 289 iwpm_admin.reg_list[nl_client] = reg; 290 } 291 292 int iwpm_compare_sockaddr(struct sockaddr_storage *a_sockaddr, 293 struct sockaddr_storage *b_sockaddr) 294 { 295 if (a_sockaddr->ss_family != b_sockaddr->ss_family) 296 return 1; 297 if (a_sockaddr->ss_family == AF_INET) { 298 struct sockaddr_in *a4_sockaddr = 299 (struct sockaddr_in *)a_sockaddr; 300 struct sockaddr_in *b4_sockaddr = 301 (struct sockaddr_in *)b_sockaddr; 302 if (!memcmp(&a4_sockaddr->sin_addr, 303 &b4_sockaddr->sin_addr, sizeof(struct in_addr)) 304 && a4_sockaddr->sin_port == b4_sockaddr->sin_port) 305 return 0; 306 307 } else if (a_sockaddr->ss_family == AF_INET6) { 308 struct sockaddr_in6 *a6_sockaddr = 309 (struct sockaddr_in6 *)a_sockaddr; 310 struct sockaddr_in6 *b6_sockaddr = 311 (struct sockaddr_in6 *)b_sockaddr; 312 if (!memcmp(&a6_sockaddr->sin6_addr, 313 &b6_sockaddr->sin6_addr, sizeof(struct in6_addr)) 314 && a6_sockaddr->sin6_port == b6_sockaddr->sin6_port) 315 return 0; 316 317 } else { 318 pr_err("%s: Invalid sockaddr family\n", __func__); 319 } 320 return 1; 321 } 322 323 struct sk_buff *iwpm_create_nlmsg(u32 nl_op, struct nlmsghdr **nlh, 324 int nl_client) 325 { 326 struct sk_buff *skb = NULL; 327 328 skb = dev_alloc_skb(NLMSG_GOODSIZE); 329 if (!skb) { 330 pr_err("%s Unable to allocate skb\n", __func__); 331 goto create_nlmsg_exit; 332 } 333 if (!(ibnl_put_msg(skb, nlh, 0, 0, nl_client, nl_op, 334 NLM_F_REQUEST))) { 335 pr_warn("%s: Unable to put the nlmsg header\n", __func__); 336 dev_kfree_skb(skb); 337 skb = NULL; 338 } 339 create_nlmsg_exit: 340 return skb; 341 } 342 343 int iwpm_parse_nlmsg(struct netlink_callback *cb, int policy_max, 344 const struct nla_policy *nlmsg_policy, 345 struct nlattr *nltb[], const char *msg_type) 346 { 347 int nlh_len = 0; 348 int ret; 349 const char *err_str = ""; 350 351 ret = nlmsg_validate(cb->nlh, nlh_len, policy_max-1, nlmsg_policy); 352 if (ret) { 353 err_str = "Invalid attribute"; 354 goto parse_nlmsg_error; 355 } 356 ret = nlmsg_parse(cb->nlh, nlh_len, nltb, policy_max-1, nlmsg_policy); 357 if (ret) { 358 err_str = "Unable to parse the nlmsg"; 359 goto parse_nlmsg_error; 360 } 361 ret = iwpm_validate_nlmsg_attr(nltb, policy_max); 362 if (ret) { 363 err_str = "Invalid NULL attribute"; 364 goto parse_nlmsg_error; 365 } 366 return 0; 367 parse_nlmsg_error: 368 pr_warn("%s: %s (msg type %s ret = %d)\n", 369 __func__, err_str, msg_type, ret); 370 return ret; 371 } 372 373 void iwpm_print_sockaddr(struct sockaddr_storage *sockaddr, char *msg) 374 { 375 struct sockaddr_in6 *sockaddr_v6; 376 struct sockaddr_in *sockaddr_v4; 377 378 switch (sockaddr->ss_family) { 379 case AF_INET: 380 sockaddr_v4 = (struct sockaddr_in *)sockaddr; 381 pr_debug("%s IPV4 %pI4: %u(0x%04X)\n", 382 msg, &sockaddr_v4->sin_addr, 383 ntohs(sockaddr_v4->sin_port), 384 ntohs(sockaddr_v4->sin_port)); 385 break; 386 case AF_INET6: 387 sockaddr_v6 = (struct sockaddr_in6 *)sockaddr; 388 pr_debug("%s IPV6 %pI6: %u(0x%04X)\n", 389 msg, &sockaddr_v6->sin6_addr, 390 ntohs(sockaddr_v6->sin6_port), 391 ntohs(sockaddr_v6->sin6_port)); 392 break; 393 default: 394 break; 395 } 396 } 397 398 static u32 iwpm_ipv6_jhash(struct sockaddr_in6 *ipv6_sockaddr) 399 { 400 u32 ipv6_hash = jhash(&ipv6_sockaddr->sin6_addr, sizeof(struct in6_addr), 0); 401 u32 hash = jhash_2words(ipv6_hash, (__force u32) ipv6_sockaddr->sin6_port, 0); 402 return hash; 403 } 404 405 static u32 iwpm_ipv4_jhash(struct sockaddr_in *ipv4_sockaddr) 406 { 407 u32 ipv4_hash = jhash(&ipv4_sockaddr->sin_addr, sizeof(struct in_addr), 0); 408 u32 hash = jhash_2words(ipv4_hash, (__force u32) ipv4_sockaddr->sin_port, 0); 409 return hash; 410 } 411 412 static struct hlist_head *get_hash_bucket_head(struct sockaddr_storage 413 *local_sockaddr, 414 struct sockaddr_storage 415 *mapped_sockaddr) 416 { 417 u32 local_hash, mapped_hash, hash; 418 419 if (local_sockaddr->ss_family == AF_INET) { 420 local_hash = iwpm_ipv4_jhash((struct sockaddr_in *) local_sockaddr); 421 mapped_hash = iwpm_ipv4_jhash((struct sockaddr_in *) mapped_sockaddr); 422 423 } else if (local_sockaddr->ss_family == AF_INET6) { 424 local_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) local_sockaddr); 425 mapped_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) mapped_sockaddr); 426 } else { 427 pr_err("%s: Invalid sockaddr family\n", __func__); 428 return NULL; 429 } 430 431 if (local_hash == mapped_hash) /* if port mapper isn't available */ 432 hash = local_hash; 433 else 434 hash = jhash_2words(local_hash, mapped_hash, 0); 435 436 return &iwpm_hash_bucket[hash & IWPM_HASH_BUCKET_MASK]; 437 } 438 439 static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid) 440 { 441 struct sk_buff *skb = NULL; 442 struct nlmsghdr *nlh; 443 u32 msg_seq; 444 const char *err_str = ""; 445 int ret = -EINVAL; 446 447 skb = iwpm_create_nlmsg(RDMA_NL_IWPM_MAPINFO_NUM, &nlh, nl_client); 448 if (!skb) { 449 err_str = "Unable to create a nlmsg"; 450 goto mapinfo_num_error; 451 } 452 nlh->nlmsg_seq = iwpm_get_nlmsg_seq(); 453 msg_seq = 0; 454 err_str = "Unable to put attribute of mapinfo number nlmsg"; 455 ret = ibnl_put_attr(skb, nlh, sizeof(u32), &msg_seq, IWPM_NLA_MAPINFO_SEQ); 456 if (ret) 457 goto mapinfo_num_error; 458 ret = ibnl_put_attr(skb, nlh, sizeof(u32), 459 &mapping_num, IWPM_NLA_MAPINFO_SEND_NUM); 460 if (ret) 461 goto mapinfo_num_error; 462 ret = ibnl_unicast(skb, nlh, iwpm_pid); 463 if (ret) { 464 skb = NULL; 465 err_str = "Unable to send a nlmsg"; 466 goto mapinfo_num_error; 467 } 468 pr_debug("%s: Sent mapping number = %d\n", __func__, mapping_num); 469 return 0; 470 mapinfo_num_error: 471 pr_info("%s: %s\n", __func__, err_str); 472 if (skb) 473 dev_kfree_skb(skb); 474 return ret; 475 } 476 477 static int send_nlmsg_done(struct sk_buff *skb, u8 nl_client, int iwpm_pid) 478 { 479 struct nlmsghdr *nlh = NULL; 480 int ret = 0; 481 482 if (!skb) 483 return ret; 484 if (!(ibnl_put_msg(skb, &nlh, 0, 0, nl_client, 485 RDMA_NL_IWPM_MAPINFO, NLM_F_MULTI))) { 486 pr_warn("%s Unable to put NLMSG_DONE\n", __func__); 487 return -ENOMEM; 488 } 489 nlh->nlmsg_type = NLMSG_DONE; 490 ret = ibnl_unicast(skb, (struct nlmsghdr *)skb->data, iwpm_pid); 491 if (ret) 492 pr_warn("%s Unable to send a nlmsg\n", __func__); 493 return ret; 494 } 495 496 int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid) 497 { 498 struct iwpm_mapping_info *map_info; 499 struct sk_buff *skb = NULL; 500 struct nlmsghdr *nlh; 501 int skb_num = 0, mapping_num = 0; 502 int i = 0, nlmsg_bytes = 0; 503 unsigned long flags; 504 const char *err_str = ""; 505 int ret; 506 507 skb = dev_alloc_skb(NLMSG_GOODSIZE); 508 if (!skb) { 509 ret = -ENOMEM; 510 err_str = "Unable to allocate skb"; 511 goto send_mapping_info_exit; 512 } 513 skb_num++; 514 spin_lock_irqsave(&iwpm_mapinfo_lock, flags); 515 for (i = 0; i < IWPM_HASH_BUCKET_SIZE; i++) { 516 hlist_for_each_entry(map_info, &iwpm_hash_bucket[i], 517 hlist_node) { 518 if (map_info->nl_client != nl_client) 519 continue; 520 nlh = NULL; 521 if (!(ibnl_put_msg(skb, &nlh, 0, 0, nl_client, 522 RDMA_NL_IWPM_MAPINFO, NLM_F_MULTI))) { 523 ret = -ENOMEM; 524 err_str = "Unable to put the nlmsg header"; 525 goto send_mapping_info_unlock; 526 } 527 err_str = "Unable to put attribute of the nlmsg"; 528 ret = ibnl_put_attr(skb, nlh, 529 sizeof(struct sockaddr_storage), 530 &map_info->local_sockaddr, 531 IWPM_NLA_MAPINFO_LOCAL_ADDR); 532 if (ret) 533 goto send_mapping_info_unlock; 534 535 ret = ibnl_put_attr(skb, nlh, 536 sizeof(struct sockaddr_storage), 537 &map_info->mapped_sockaddr, 538 IWPM_NLA_MAPINFO_MAPPED_ADDR); 539 if (ret) 540 goto send_mapping_info_unlock; 541 542 iwpm_print_sockaddr(&map_info->local_sockaddr, 543 "send_mapping_info: Local sockaddr:"); 544 iwpm_print_sockaddr(&map_info->mapped_sockaddr, 545 "send_mapping_info: Mapped local sockaddr:"); 546 mapping_num++; 547 nlmsg_bytes += nlh->nlmsg_len; 548 549 /* check if all mappings can fit in one skb */ 550 if (NLMSG_GOODSIZE - nlmsg_bytes < nlh->nlmsg_len * 2) { 551 /* and leave room for NLMSG_DONE */ 552 nlmsg_bytes = 0; 553 skb_num++; 554 spin_unlock_irqrestore(&iwpm_mapinfo_lock, 555 flags); 556 /* send the skb */ 557 ret = send_nlmsg_done(skb, nl_client, iwpm_pid); 558 skb = NULL; 559 if (ret) { 560 err_str = "Unable to send map info"; 561 goto send_mapping_info_exit; 562 } 563 if (skb_num == IWPM_MAPINFO_SKB_COUNT) { 564 ret = -ENOMEM; 565 err_str = "Insufficient skbs for map info"; 566 goto send_mapping_info_exit; 567 } 568 skb = dev_alloc_skb(NLMSG_GOODSIZE); 569 if (!skb) { 570 ret = -ENOMEM; 571 err_str = "Unable to allocate skb"; 572 goto send_mapping_info_exit; 573 } 574 spin_lock_irqsave(&iwpm_mapinfo_lock, flags); 575 } 576 } 577 } 578 send_mapping_info_unlock: 579 spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); 580 send_mapping_info_exit: 581 if (ret) { 582 pr_warn("%s: %s (ret = %d)\n", __func__, err_str, ret); 583 if (skb) 584 dev_kfree_skb(skb); 585 return ret; 586 } 587 send_nlmsg_done(skb, nl_client, iwpm_pid); 588 return send_mapinfo_num(mapping_num, nl_client, iwpm_pid); 589 } 590 591 int iwpm_mapinfo_available(void) 592 { 593 unsigned long flags; 594 int full_bucket = 0, i = 0; 595 596 spin_lock_irqsave(&iwpm_mapinfo_lock, flags); 597 if (iwpm_hash_bucket) { 598 for (i = 0; i < IWPM_HASH_BUCKET_SIZE; i++) { 599 if (!hlist_empty(&iwpm_hash_bucket[i])) { 600 full_bucket = 1; 601 break; 602 } 603 } 604 } 605 spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); 606 return full_bucket; 607 } 608