1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 * $Id: ipoib_ib.c 1386 2004-12-27 16:23:17Z roland $ 33 */ 34 35 #include <linux/delay.h> 36 #include <linux/dma-mapping.h> 37 38 #include <ib_cache.h> 39 40 #include "ipoib.h" 41 42 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA 43 static int data_debug_level; 44 45 module_param(data_debug_level, int, 0644); 46 MODULE_PARM_DESC(data_debug_level, 47 "Enable data path debug tracing if > 0"); 48 #endif 49 50 #define IPOIB_OP_RECV (1ul << 31) 51 52 static DECLARE_MUTEX(pkey_sem); 53 54 struct ipoib_ah *ipoib_create_ah(struct net_device *dev, 55 struct ib_pd *pd, struct ib_ah_attr *attr) 56 { 57 struct ipoib_ah *ah; 58 59 ah = kmalloc(sizeof *ah, GFP_KERNEL); 60 if (!ah) 61 return NULL; 62 63 ah->dev = dev; 64 ah->last_send = 0; 65 kref_init(&ah->ref); 66 67 ah->ah = ib_create_ah(pd, attr); 68 if (IS_ERR(ah->ah)) { 69 kfree(ah); 70 ah = NULL; 71 } else 72 ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah); 73 74 return ah; 75 } 76 77 void ipoib_free_ah(struct kref *kref) 78 { 79 struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref); 80 struct ipoib_dev_priv *priv = netdev_priv(ah->dev); 81 82 unsigned long flags; 83 84 if (ah->last_send <= priv->tx_tail) { 85 ipoib_dbg(priv, "Freeing ah %p\n", ah->ah); 86 ib_destroy_ah(ah->ah); 87 kfree(ah); 88 } else { 89 spin_lock_irqsave(&priv->lock, flags); 90 list_add_tail(&ah->list, &priv->dead_ahs); 91 spin_unlock_irqrestore(&priv->lock, flags); 92 } 93 } 94 95 static inline int ipoib_ib_receive(struct ipoib_dev_priv *priv, 96 unsigned int wr_id, 97 dma_addr_t addr) 98 { 99 struct ib_sge list = { 100 .addr = addr, 101 .length = IPOIB_BUF_SIZE, 102 .lkey = priv->mr->lkey, 103 }; 104 struct ib_recv_wr param = { 105 .wr_id = wr_id | IPOIB_OP_RECV, 106 .sg_list = &list, 107 .num_sge = 1, 108 }; 109 struct ib_recv_wr *bad_wr; 110 111 return ib_post_recv(priv->qp, ¶m, &bad_wr); 112 } 113 114 static int ipoib_ib_post_receive(struct net_device *dev, int id) 115 { 116 struct ipoib_dev_priv *priv = netdev_priv(dev); 117 struct sk_buff *skb; 118 dma_addr_t addr; 119 int ret; 120 121 skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4); 122 if (!skb) { 123 ipoib_warn(priv, "failed to allocate receive buffer\n"); 124 125 priv->rx_ring[id].skb = NULL; 126 return -ENOMEM; 127 } 128 skb_reserve(skb, 4); /* 16 byte align IP header */ 129 priv->rx_ring[id].skb = skb; 130 addr = dma_map_single(priv->ca->dma_device, 131 skb->data, IPOIB_BUF_SIZE, 132 DMA_FROM_DEVICE); 133 pci_unmap_addr_set(&priv->rx_ring[id], mapping, addr); 134 135 ret = ipoib_ib_receive(priv, id, addr); 136 if (ret) { 137 ipoib_warn(priv, "ipoib_ib_receive failed for buf %d (%d)\n", 138 id, ret); 139 dma_unmap_single(priv->ca->dma_device, addr, 140 IPOIB_BUF_SIZE, DMA_FROM_DEVICE); 141 dev_kfree_skb_any(skb); 142 priv->rx_ring[id].skb = NULL; 143 } 144 145 return ret; 146 } 147 148 static int ipoib_ib_post_receives(struct net_device *dev) 149 { 150 struct ipoib_dev_priv *priv = netdev_priv(dev); 151 int i; 152 153 for (i = 0; i < IPOIB_RX_RING_SIZE; ++i) { 154 if (ipoib_ib_post_receive(dev, i)) { 155 ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i); 156 return -EIO; 157 } 158 } 159 160 return 0; 161 } 162 163 static void ipoib_ib_handle_wc(struct net_device *dev, 164 struct ib_wc *wc) 165 { 166 struct ipoib_dev_priv *priv = netdev_priv(dev); 167 unsigned int wr_id = wc->wr_id; 168 169 ipoib_dbg_data(priv, "called: id %d, op %d, status: %d\n", 170 wr_id, wc->opcode, wc->status); 171 172 if (wr_id & IPOIB_OP_RECV) { 173 wr_id &= ~IPOIB_OP_RECV; 174 175 if (wr_id < IPOIB_RX_RING_SIZE) { 176 struct sk_buff *skb = priv->rx_ring[wr_id].skb; 177 178 priv->rx_ring[wr_id].skb = NULL; 179 180 dma_unmap_single(priv->ca->dma_device, 181 pci_unmap_addr(&priv->rx_ring[wr_id], 182 mapping), 183 IPOIB_BUF_SIZE, 184 DMA_FROM_DEVICE); 185 186 if (wc->status != IB_WC_SUCCESS) { 187 if (wc->status != IB_WC_WR_FLUSH_ERR) 188 ipoib_warn(priv, "failed recv event " 189 "(status=%d, wrid=%d vend_err %x)\n", 190 wc->status, wr_id, wc->vendor_err); 191 dev_kfree_skb_any(skb); 192 return; 193 } 194 195 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", 196 wc->byte_len, wc->slid); 197 198 skb_put(skb, wc->byte_len); 199 skb_pull(skb, IB_GRH_BYTES); 200 201 if (wc->slid != priv->local_lid || 202 wc->src_qp != priv->qp->qp_num) { 203 skb->protocol = ((struct ipoib_header *) skb->data)->proto; 204 205 skb_pull(skb, IPOIB_ENCAP_LEN); 206 207 dev->last_rx = jiffies; 208 ++priv->stats.rx_packets; 209 priv->stats.rx_bytes += skb->len; 210 211 skb->dev = dev; 212 /* XXX get correct PACKET_ type here */ 213 skb->pkt_type = PACKET_HOST; 214 netif_rx_ni(skb); 215 } else { 216 ipoib_dbg_data(priv, "dropping loopback packet\n"); 217 dev_kfree_skb_any(skb); 218 } 219 220 /* repost receive */ 221 if (ipoib_ib_post_receive(dev, wr_id)) 222 ipoib_warn(priv, "ipoib_ib_post_receive failed " 223 "for buf %d\n", wr_id); 224 } else 225 ipoib_warn(priv, "completion event with wrid %d\n", 226 wr_id); 227 228 } else { 229 struct ipoib_buf *tx_req; 230 unsigned long flags; 231 232 if (wr_id >= IPOIB_TX_RING_SIZE) { 233 ipoib_warn(priv, "completion event with wrid %d (> %d)\n", 234 wr_id, IPOIB_TX_RING_SIZE); 235 return; 236 } 237 238 ipoib_dbg_data(priv, "send complete, wrid %d\n", wr_id); 239 240 tx_req = &priv->tx_ring[wr_id]; 241 242 dma_unmap_single(priv->ca->dma_device, 243 pci_unmap_addr(tx_req, mapping), 244 tx_req->skb->len, 245 DMA_TO_DEVICE); 246 247 ++priv->stats.tx_packets; 248 priv->stats.tx_bytes += tx_req->skb->len; 249 250 dev_kfree_skb_any(tx_req->skb); 251 252 spin_lock_irqsave(&priv->tx_lock, flags); 253 ++priv->tx_tail; 254 if (netif_queue_stopped(dev) && 255 priv->tx_head - priv->tx_tail <= IPOIB_TX_RING_SIZE / 2) 256 netif_wake_queue(dev); 257 spin_unlock_irqrestore(&priv->tx_lock, flags); 258 259 if (wc->status != IB_WC_SUCCESS && 260 wc->status != IB_WC_WR_FLUSH_ERR) 261 ipoib_warn(priv, "failed send event " 262 "(status=%d, wrid=%d vend_err %x)\n", 263 wc->status, wr_id, wc->vendor_err); 264 } 265 } 266 267 void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr) 268 { 269 struct net_device *dev = (struct net_device *) dev_ptr; 270 struct ipoib_dev_priv *priv = netdev_priv(dev); 271 int n, i; 272 273 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); 274 do { 275 n = ib_poll_cq(cq, IPOIB_NUM_WC, priv->ibwc); 276 for (i = 0; i < n; ++i) 277 ipoib_ib_handle_wc(dev, priv->ibwc + i); 278 } while (n == IPOIB_NUM_WC); 279 } 280 281 static inline int post_send(struct ipoib_dev_priv *priv, 282 unsigned int wr_id, 283 struct ib_ah *address, u32 qpn, 284 dma_addr_t addr, int len) 285 { 286 struct ib_send_wr *bad_wr; 287 288 priv->tx_sge.addr = addr; 289 priv->tx_sge.length = len; 290 291 priv->tx_wr.wr_id = wr_id; 292 priv->tx_wr.wr.ud.remote_qpn = qpn; 293 priv->tx_wr.wr.ud.ah = address; 294 295 return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr); 296 } 297 298 void ipoib_send(struct net_device *dev, struct sk_buff *skb, 299 struct ipoib_ah *address, u32 qpn) 300 { 301 struct ipoib_dev_priv *priv = netdev_priv(dev); 302 struct ipoib_buf *tx_req; 303 dma_addr_t addr; 304 305 if (skb->len > dev->mtu + INFINIBAND_ALEN) { 306 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", 307 skb->len, dev->mtu + INFINIBAND_ALEN); 308 ++priv->stats.tx_dropped; 309 ++priv->stats.tx_errors; 310 dev_kfree_skb_any(skb); 311 return; 312 } 313 314 ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n", 315 skb->len, address, qpn); 316 317 /* 318 * We put the skb into the tx_ring _before_ we call post_send() 319 * because it's entirely possible that the completion handler will 320 * run before we execute anything after the post_send(). That 321 * means we have to make sure everything is properly recorded and 322 * our state is consistent before we call post_send(). 323 */ 324 tx_req = &priv->tx_ring[priv->tx_head & (IPOIB_TX_RING_SIZE - 1)]; 325 tx_req->skb = skb; 326 addr = dma_map_single(priv->ca->dma_device, skb->data, skb->len, 327 DMA_TO_DEVICE); 328 pci_unmap_addr_set(tx_req, mapping, addr); 329 330 if (unlikely(post_send(priv, priv->tx_head & (IPOIB_TX_RING_SIZE - 1), 331 address->ah, qpn, addr, skb->len))) { 332 ipoib_warn(priv, "post_send failed\n"); 333 ++priv->stats.tx_errors; 334 dma_unmap_single(priv->ca->dma_device, addr, skb->len, 335 DMA_TO_DEVICE); 336 dev_kfree_skb_any(skb); 337 } else { 338 dev->trans_start = jiffies; 339 340 address->last_send = priv->tx_head; 341 ++priv->tx_head; 342 343 if (priv->tx_head - priv->tx_tail == IPOIB_TX_RING_SIZE) { 344 ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n"); 345 netif_stop_queue(dev); 346 } 347 } 348 } 349 350 static void __ipoib_reap_ah(struct net_device *dev) 351 { 352 struct ipoib_dev_priv *priv = netdev_priv(dev); 353 struct ipoib_ah *ah, *tah; 354 LIST_HEAD(remove_list); 355 356 spin_lock_irq(&priv->lock); 357 list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list) 358 if (ah->last_send <= priv->tx_tail) { 359 list_del(&ah->list); 360 list_add_tail(&ah->list, &remove_list); 361 } 362 spin_unlock_irq(&priv->lock); 363 364 list_for_each_entry_safe(ah, tah, &remove_list, list) { 365 ipoib_dbg(priv, "Reaping ah %p\n", ah->ah); 366 ib_destroy_ah(ah->ah); 367 kfree(ah); 368 } 369 } 370 371 void ipoib_reap_ah(void *dev_ptr) 372 { 373 struct net_device *dev = dev_ptr; 374 struct ipoib_dev_priv *priv = netdev_priv(dev); 375 376 __ipoib_reap_ah(dev); 377 378 if (!test_bit(IPOIB_STOP_REAPER, &priv->flags)) 379 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, HZ); 380 } 381 382 int ipoib_ib_dev_open(struct net_device *dev) 383 { 384 struct ipoib_dev_priv *priv = netdev_priv(dev); 385 int ret; 386 387 ret = ipoib_qp_create(dev); 388 if (ret) { 389 ipoib_warn(priv, "ipoib_qp_create returned %d\n", ret); 390 return -1; 391 } 392 393 ret = ipoib_ib_post_receives(dev); 394 if (ret) { 395 ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret); 396 return -1; 397 } 398 399 clear_bit(IPOIB_STOP_REAPER, &priv->flags); 400 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, HZ); 401 402 return 0; 403 } 404 405 int ipoib_ib_dev_up(struct net_device *dev) 406 { 407 struct ipoib_dev_priv *priv = netdev_priv(dev); 408 409 set_bit(IPOIB_FLAG_OPER_UP, &priv->flags); 410 411 return ipoib_mcast_start_thread(dev); 412 } 413 414 int ipoib_ib_dev_down(struct net_device *dev) 415 { 416 struct ipoib_dev_priv *priv = netdev_priv(dev); 417 418 ipoib_dbg(priv, "downing ib_dev\n"); 419 420 clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags); 421 netif_carrier_off(dev); 422 423 /* Shutdown the P_Key thread if still active */ 424 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { 425 down(&pkey_sem); 426 set_bit(IPOIB_PKEY_STOP, &priv->flags); 427 cancel_delayed_work(&priv->pkey_task); 428 up(&pkey_sem); 429 flush_workqueue(ipoib_workqueue); 430 } 431 432 ipoib_mcast_stop_thread(dev); 433 434 /* 435 * Flush the multicast groups first so we stop any multicast joins. The 436 * completion thread may have already died and we may deadlock waiting 437 * for the completion thread to finish some multicast joins. 438 */ 439 ipoib_mcast_dev_flush(dev); 440 441 /* Delete broadcast and local addresses since they will be recreated */ 442 ipoib_mcast_dev_down(dev); 443 444 ipoib_flush_paths(dev); 445 446 return 0; 447 } 448 449 static int recvs_pending(struct net_device *dev) 450 { 451 struct ipoib_dev_priv *priv = netdev_priv(dev); 452 int pending = 0; 453 int i; 454 455 for (i = 0; i < IPOIB_RX_RING_SIZE; ++i) 456 if (priv->rx_ring[i].skb) 457 ++pending; 458 459 return pending; 460 } 461 462 int ipoib_ib_dev_stop(struct net_device *dev) 463 { 464 struct ipoib_dev_priv *priv = netdev_priv(dev); 465 struct ib_qp_attr qp_attr; 466 int attr_mask; 467 unsigned long begin; 468 struct ipoib_buf *tx_req; 469 int i; 470 471 /* Kill the existing QP and allocate a new one */ 472 qp_attr.qp_state = IB_QPS_ERR; 473 attr_mask = IB_QP_STATE; 474 if (ib_modify_qp(priv->qp, &qp_attr, attr_mask)) 475 ipoib_warn(priv, "Failed to modify QP to ERROR state\n"); 476 477 /* Wait for all sends and receives to complete */ 478 begin = jiffies; 479 480 while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) { 481 if (time_after(jiffies, begin + 5 * HZ)) { 482 ipoib_warn(priv, "timing out; %d sends %d receives not completed\n", 483 priv->tx_head - priv->tx_tail, recvs_pending(dev)); 484 485 /* 486 * assume the HW is wedged and just free up 487 * all our pending work requests. 488 */ 489 while (priv->tx_tail < priv->tx_head) { 490 tx_req = &priv->tx_ring[priv->tx_tail & 491 (IPOIB_TX_RING_SIZE - 1)]; 492 dma_unmap_single(priv->ca->dma_device, 493 pci_unmap_addr(tx_req, mapping), 494 tx_req->skb->len, 495 DMA_TO_DEVICE); 496 dev_kfree_skb_any(tx_req->skb); 497 ++priv->tx_tail; 498 } 499 500 for (i = 0; i < IPOIB_RX_RING_SIZE; ++i) 501 if (priv->rx_ring[i].skb) { 502 dma_unmap_single(priv->ca->dma_device, 503 pci_unmap_addr(&priv->rx_ring[i], 504 mapping), 505 IPOIB_BUF_SIZE, 506 DMA_FROM_DEVICE); 507 dev_kfree_skb_any(priv->rx_ring[i].skb); 508 priv->rx_ring[i].skb = NULL; 509 } 510 511 goto timeout; 512 } 513 514 msleep(1); 515 } 516 517 ipoib_dbg(priv, "All sends and receives done.\n"); 518 519 timeout: 520 qp_attr.qp_state = IB_QPS_RESET; 521 attr_mask = IB_QP_STATE; 522 if (ib_modify_qp(priv->qp, &qp_attr, attr_mask)) 523 ipoib_warn(priv, "Failed to modify QP to RESET state\n"); 524 525 /* Wait for all AHs to be reaped */ 526 set_bit(IPOIB_STOP_REAPER, &priv->flags); 527 cancel_delayed_work(&priv->ah_reap_task); 528 flush_workqueue(ipoib_workqueue); 529 530 begin = jiffies; 531 532 while (!list_empty(&priv->dead_ahs)) { 533 __ipoib_reap_ah(dev); 534 535 if (time_after(jiffies, begin + HZ)) { 536 ipoib_warn(priv, "timing out; will leak address handles\n"); 537 break; 538 } 539 540 msleep(1); 541 } 542 543 return 0; 544 } 545 546 int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port) 547 { 548 struct ipoib_dev_priv *priv = netdev_priv(dev); 549 550 priv->ca = ca; 551 priv->port = port; 552 priv->qp = NULL; 553 554 if (ipoib_transport_dev_init(dev, ca)) { 555 printk(KERN_WARNING "%s: ipoib_transport_dev_init failed\n", ca->name); 556 return -ENODEV; 557 } 558 559 if (dev->flags & IFF_UP) { 560 if (ipoib_ib_dev_open(dev)) { 561 ipoib_transport_dev_cleanup(dev); 562 return -ENODEV; 563 } 564 } 565 566 return 0; 567 } 568 569 void ipoib_ib_dev_flush(void *_dev) 570 { 571 struct net_device *dev = (struct net_device *)_dev; 572 struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv; 573 574 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) 575 return; 576 577 ipoib_dbg(priv, "flushing\n"); 578 579 ipoib_ib_dev_down(dev); 580 581 /* 582 * The device could have been brought down between the start and when 583 * we get here, don't bring it back up if it's not configured up 584 */ 585 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) 586 ipoib_ib_dev_up(dev); 587 588 /* Flush any child interfaces too */ 589 list_for_each_entry(cpriv, &priv->child_intfs, list) 590 ipoib_ib_dev_flush(&cpriv->dev); 591 } 592 593 void ipoib_ib_dev_cleanup(struct net_device *dev) 594 { 595 struct ipoib_dev_priv *priv = netdev_priv(dev); 596 597 ipoib_dbg(priv, "cleaning up ib_dev\n"); 598 599 ipoib_mcast_stop_thread(dev); 600 601 /* Delete the broadcast address and the local address */ 602 ipoib_mcast_dev_down(dev); 603 604 ipoib_transport_dev_cleanup(dev); 605 } 606 607 /* 608 * Delayed P_Key Assigment Interim Support 609 * 610 * The following is initial implementation of delayed P_Key assigment 611 * mechanism. It is using the same approach implemented for the multicast 612 * group join. The single goal of this implementation is to quickly address 613 * Bug #2507. This implementation will probably be removed when the P_Key 614 * change async notification is available. 615 */ 616 int ipoib_open(struct net_device *dev); 617 618 static void ipoib_pkey_dev_check_presence(struct net_device *dev) 619 { 620 struct ipoib_dev_priv *priv = netdev_priv(dev); 621 u16 pkey_index = 0; 622 623 if (ib_find_cached_pkey(priv->ca, priv->port, priv->pkey, &pkey_index)) 624 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); 625 else 626 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); 627 } 628 629 void ipoib_pkey_poll(void *dev_ptr) 630 { 631 struct net_device *dev = dev_ptr; 632 struct ipoib_dev_priv *priv = netdev_priv(dev); 633 634 ipoib_pkey_dev_check_presence(dev); 635 636 if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) 637 ipoib_open(dev); 638 else { 639 down(&pkey_sem); 640 if (!test_bit(IPOIB_PKEY_STOP, &priv->flags)) 641 queue_delayed_work(ipoib_workqueue, 642 &priv->pkey_task, 643 HZ); 644 up(&pkey_sem); 645 } 646 } 647 648 int ipoib_pkey_dev_delay_open(struct net_device *dev) 649 { 650 struct ipoib_dev_priv *priv = netdev_priv(dev); 651 652 /* Look for the interface pkey value in the IB Port P_Key table and */ 653 /* set the interface pkey assigment flag */ 654 ipoib_pkey_dev_check_presence(dev); 655 656 /* P_Key value not assigned yet - start polling */ 657 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { 658 down(&pkey_sem); 659 clear_bit(IPOIB_PKEY_STOP, &priv->flags); 660 queue_delayed_work(ipoib_workqueue, 661 &priv->pkey_task, 662 HZ); 663 up(&pkey_sem); 664 return 1; 665 } 666 667 return 0; 668 } 669