1 /* 2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #include <linux/module.h> 33 #include <linux/moduleparam.h> 34 #include <linux/debugfs.h> 35 36 #include <rdma/ib_verbs.h> 37 38 #include "iw_cxgb4.h" 39 40 #define DRV_VERSION "0.1" 41 42 MODULE_AUTHOR("Steve Wise"); 43 MODULE_DESCRIPTION("Chelsio T4 RDMA Driver"); 44 MODULE_LICENSE("Dual BSD/GPL"); 45 MODULE_VERSION(DRV_VERSION); 46 47 static LIST_HEAD(dev_list); 48 static DEFINE_MUTEX(dev_mutex); 49 50 static struct dentry *c4iw_debugfs_root; 51 52 struct c4iw_debugfs_data { 53 struct c4iw_dev *devp; 54 char *buf; 55 int bufsize; 56 int pos; 57 }; 58 59 static int count_idrs(int id, void *p, void *data) 60 { 61 int *countp = data; 62 63 *countp = *countp + 1; 64 return 0; 65 } 66 67 static ssize_t debugfs_read(struct file *file, char __user *buf, size_t count, 68 loff_t *ppos) 69 { 70 struct c4iw_debugfs_data *d = file->private_data; 71 72 return simple_read_from_buffer(buf, count, ppos, d->buf, d->pos); 73 } 74 75 static int dump_qp(int id, void *p, void *data) 76 { 77 struct c4iw_qp *qp = p; 78 struct c4iw_debugfs_data *qpd = data; 79 int space; 80 int cc; 81 82 if (id != qp->wq.sq.qid) 83 return 0; 84 85 space = qpd->bufsize - qpd->pos - 1; 86 if (space == 0) 87 return 1; 88 89 if (qp->ep) 90 cc = snprintf(qpd->buf + qpd->pos, space, 91 "qp sq id %u rq id %u state %u onchip %u " 92 "ep tid %u state %u %pI4:%u->%pI4:%u\n", 93 qp->wq.sq.qid, qp->wq.rq.qid, (int)qp->attr.state, 94 qp->wq.sq.flags & T4_SQ_ONCHIP, 95 qp->ep->hwtid, (int)qp->ep->com.state, 96 &qp->ep->com.local_addr.sin_addr.s_addr, 97 ntohs(qp->ep->com.local_addr.sin_port), 98 &qp->ep->com.remote_addr.sin_addr.s_addr, 99 ntohs(qp->ep->com.remote_addr.sin_port)); 100 else 101 cc = snprintf(qpd->buf + qpd->pos, space, 102 "qp sq id %u rq id %u state %u onchip %u\n", 103 qp->wq.sq.qid, qp->wq.rq.qid, 104 (int)qp->attr.state, 105 qp->wq.sq.flags & T4_SQ_ONCHIP); 106 if (cc < space) 107 qpd->pos += cc; 108 return 0; 109 } 110 111 static int qp_release(struct inode *inode, struct file *file) 112 { 113 struct c4iw_debugfs_data *qpd = file->private_data; 114 if (!qpd) { 115 printk(KERN_INFO "%s null qpd?\n", __func__); 116 return 0; 117 } 118 kfree(qpd->buf); 119 kfree(qpd); 120 return 0; 121 } 122 123 static int qp_open(struct inode *inode, struct file *file) 124 { 125 struct c4iw_debugfs_data *qpd; 126 int ret = 0; 127 int count = 1; 128 129 qpd = kmalloc(sizeof *qpd, GFP_KERNEL); 130 if (!qpd) { 131 ret = -ENOMEM; 132 goto out; 133 } 134 qpd->devp = inode->i_private; 135 qpd->pos = 0; 136 137 spin_lock_irq(&qpd->devp->lock); 138 idr_for_each(&qpd->devp->qpidr, count_idrs, &count); 139 spin_unlock_irq(&qpd->devp->lock); 140 141 qpd->bufsize = count * 128; 142 qpd->buf = kmalloc(qpd->bufsize, GFP_KERNEL); 143 if (!qpd->buf) { 144 ret = -ENOMEM; 145 goto err1; 146 } 147 148 spin_lock_irq(&qpd->devp->lock); 149 idr_for_each(&qpd->devp->qpidr, dump_qp, qpd); 150 spin_unlock_irq(&qpd->devp->lock); 151 152 qpd->buf[qpd->pos++] = 0; 153 file->private_data = qpd; 154 goto out; 155 err1: 156 kfree(qpd); 157 out: 158 return ret; 159 } 160 161 static const struct file_operations qp_debugfs_fops = { 162 .owner = THIS_MODULE, 163 .open = qp_open, 164 .release = qp_release, 165 .read = debugfs_read, 166 .llseek = default_llseek, 167 }; 168 169 static int dump_stag(int id, void *p, void *data) 170 { 171 struct c4iw_debugfs_data *stagd = data; 172 int space; 173 int cc; 174 175 space = stagd->bufsize - stagd->pos - 1; 176 if (space == 0) 177 return 1; 178 179 cc = snprintf(stagd->buf + stagd->pos, space, "0x%x\n", id<<8); 180 if (cc < space) 181 stagd->pos += cc; 182 return 0; 183 } 184 185 static int stag_release(struct inode *inode, struct file *file) 186 { 187 struct c4iw_debugfs_data *stagd = file->private_data; 188 if (!stagd) { 189 printk(KERN_INFO "%s null stagd?\n", __func__); 190 return 0; 191 } 192 kfree(stagd->buf); 193 kfree(stagd); 194 return 0; 195 } 196 197 static int stag_open(struct inode *inode, struct file *file) 198 { 199 struct c4iw_debugfs_data *stagd; 200 int ret = 0; 201 int count = 1; 202 203 stagd = kmalloc(sizeof *stagd, GFP_KERNEL); 204 if (!stagd) { 205 ret = -ENOMEM; 206 goto out; 207 } 208 stagd->devp = inode->i_private; 209 stagd->pos = 0; 210 211 spin_lock_irq(&stagd->devp->lock); 212 idr_for_each(&stagd->devp->mmidr, count_idrs, &count); 213 spin_unlock_irq(&stagd->devp->lock); 214 215 stagd->bufsize = count * sizeof("0x12345678\n"); 216 stagd->buf = kmalloc(stagd->bufsize, GFP_KERNEL); 217 if (!stagd->buf) { 218 ret = -ENOMEM; 219 goto err1; 220 } 221 222 spin_lock_irq(&stagd->devp->lock); 223 idr_for_each(&stagd->devp->mmidr, dump_stag, stagd); 224 spin_unlock_irq(&stagd->devp->lock); 225 226 stagd->buf[stagd->pos++] = 0; 227 file->private_data = stagd; 228 goto out; 229 err1: 230 kfree(stagd); 231 out: 232 return ret; 233 } 234 235 static const struct file_operations stag_debugfs_fops = { 236 .owner = THIS_MODULE, 237 .open = stag_open, 238 .release = stag_release, 239 .read = debugfs_read, 240 .llseek = default_llseek, 241 }; 242 243 static int setup_debugfs(struct c4iw_dev *devp) 244 { 245 struct dentry *de; 246 247 if (!devp->debugfs_root) 248 return -1; 249 250 de = debugfs_create_file("qps", S_IWUSR, devp->debugfs_root, 251 (void *)devp, &qp_debugfs_fops); 252 if (de && de->d_inode) 253 de->d_inode->i_size = 4096; 254 255 de = debugfs_create_file("stags", S_IWUSR, devp->debugfs_root, 256 (void *)devp, &stag_debugfs_fops); 257 if (de && de->d_inode) 258 de->d_inode->i_size = 4096; 259 return 0; 260 } 261 262 void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev, 263 struct c4iw_dev_ucontext *uctx) 264 { 265 struct list_head *pos, *nxt; 266 struct c4iw_qid_list *entry; 267 268 mutex_lock(&uctx->lock); 269 list_for_each_safe(pos, nxt, &uctx->qpids) { 270 entry = list_entry(pos, struct c4iw_qid_list, entry); 271 list_del_init(&entry->entry); 272 if (!(entry->qid & rdev->qpmask)) 273 c4iw_put_resource(&rdev->resource.qid_fifo, entry->qid, 274 &rdev->resource.qid_fifo_lock); 275 kfree(entry); 276 } 277 278 list_for_each_safe(pos, nxt, &uctx->qpids) { 279 entry = list_entry(pos, struct c4iw_qid_list, entry); 280 list_del_init(&entry->entry); 281 kfree(entry); 282 } 283 mutex_unlock(&uctx->lock); 284 } 285 286 void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev, 287 struct c4iw_dev_ucontext *uctx) 288 { 289 INIT_LIST_HEAD(&uctx->qpids); 290 INIT_LIST_HEAD(&uctx->cqids); 291 mutex_init(&uctx->lock); 292 } 293 294 /* Caller takes care of locking if needed */ 295 static int c4iw_rdev_open(struct c4iw_rdev *rdev) 296 { 297 int err; 298 299 c4iw_init_dev_ucontext(rdev, &rdev->uctx); 300 301 /* 302 * qpshift is the number of bits to shift the qpid left in order 303 * to get the correct address of the doorbell for that qp. 304 */ 305 rdev->qpshift = PAGE_SHIFT - ilog2(rdev->lldi.udb_density); 306 rdev->qpmask = rdev->lldi.udb_density - 1; 307 rdev->cqshift = PAGE_SHIFT - ilog2(rdev->lldi.ucq_density); 308 rdev->cqmask = rdev->lldi.ucq_density - 1; 309 PDBG("%s dev %s stag start 0x%0x size 0x%0x num stags %d " 310 "pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x " 311 "qp qid start %u size %u cq qid start %u size %u\n", 312 __func__, pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start, 313 rdev->lldi.vr->stag.size, c4iw_num_stags(rdev), 314 rdev->lldi.vr->pbl.start, 315 rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start, 316 rdev->lldi.vr->rq.size, 317 rdev->lldi.vr->qp.start, 318 rdev->lldi.vr->qp.size, 319 rdev->lldi.vr->cq.start, 320 rdev->lldi.vr->cq.size); 321 PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu " 322 "qpmask 0x%x cqshift %lu cqmask 0x%x\n", 323 (unsigned)pci_resource_len(rdev->lldi.pdev, 2), 324 (void *)pci_resource_start(rdev->lldi.pdev, 2), 325 rdev->lldi.db_reg, 326 rdev->lldi.gts_reg, 327 rdev->qpshift, rdev->qpmask, 328 rdev->cqshift, rdev->cqmask); 329 330 if (c4iw_num_stags(rdev) == 0) { 331 err = -EINVAL; 332 goto err1; 333 } 334 335 err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD); 336 if (err) { 337 printk(KERN_ERR MOD "error %d initializing resources\n", err); 338 goto err1; 339 } 340 err = c4iw_pblpool_create(rdev); 341 if (err) { 342 printk(KERN_ERR MOD "error %d initializing pbl pool\n", err); 343 goto err2; 344 } 345 err = c4iw_rqtpool_create(rdev); 346 if (err) { 347 printk(KERN_ERR MOD "error %d initializing rqt pool\n", err); 348 goto err3; 349 } 350 err = c4iw_ocqp_pool_create(rdev); 351 if (err) { 352 printk(KERN_ERR MOD "error %d initializing ocqp pool\n", err); 353 goto err4; 354 } 355 return 0; 356 err4: 357 c4iw_rqtpool_destroy(rdev); 358 err3: 359 c4iw_pblpool_destroy(rdev); 360 err2: 361 c4iw_destroy_resource(&rdev->resource); 362 err1: 363 return err; 364 } 365 366 static void c4iw_rdev_close(struct c4iw_rdev *rdev) 367 { 368 c4iw_pblpool_destroy(rdev); 369 c4iw_rqtpool_destroy(rdev); 370 c4iw_destroy_resource(&rdev->resource); 371 } 372 373 static void c4iw_remove(struct c4iw_dev *dev) 374 { 375 PDBG("%s c4iw_dev %p\n", __func__, dev); 376 list_del(&dev->entry); 377 if (dev->registered) 378 c4iw_unregister_device(dev); 379 c4iw_rdev_close(&dev->rdev); 380 idr_destroy(&dev->cqidr); 381 idr_destroy(&dev->qpidr); 382 idr_destroy(&dev->mmidr); 383 iounmap(dev->rdev.oc_mw_kva); 384 ib_dealloc_device(&dev->ibdev); 385 } 386 387 static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) 388 { 389 struct c4iw_dev *devp; 390 int ret; 391 392 devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp)); 393 if (!devp) { 394 printk(KERN_ERR MOD "Cannot allocate ib device\n"); 395 return NULL; 396 } 397 devp->rdev.lldi = *infop; 398 399 devp->rdev.oc_mw_pa = pci_resource_start(devp->rdev.lldi.pdev, 2) + 400 (pci_resource_len(devp->rdev.lldi.pdev, 2) - 401 roundup_pow_of_two(devp->rdev.lldi.vr->ocq.size)); 402 devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa, 403 devp->rdev.lldi.vr->ocq.size); 404 405 printk(KERN_INFO MOD "ocq memory: " 406 "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n", 407 devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size, 408 devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva); 409 410 mutex_lock(&dev_mutex); 411 412 ret = c4iw_rdev_open(&devp->rdev); 413 if (ret) { 414 mutex_unlock(&dev_mutex); 415 printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret); 416 ib_dealloc_device(&devp->ibdev); 417 return NULL; 418 } 419 420 idr_init(&devp->cqidr); 421 idr_init(&devp->qpidr); 422 idr_init(&devp->mmidr); 423 spin_lock_init(&devp->lock); 424 list_add_tail(&devp->entry, &dev_list); 425 mutex_unlock(&dev_mutex); 426 427 if (c4iw_debugfs_root) { 428 devp->debugfs_root = debugfs_create_dir( 429 pci_name(devp->rdev.lldi.pdev), 430 c4iw_debugfs_root); 431 setup_debugfs(devp); 432 } 433 return devp; 434 } 435 436 static void *c4iw_uld_add(const struct cxgb4_lld_info *infop) 437 { 438 struct c4iw_dev *dev; 439 static int vers_printed; 440 int i; 441 442 if (!vers_printed++) 443 printk(KERN_INFO MOD "Chelsio T4 RDMA Driver - version %s\n", 444 DRV_VERSION); 445 446 dev = c4iw_alloc(infop); 447 if (!dev) 448 goto out; 449 450 PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n", 451 __func__, pci_name(dev->rdev.lldi.pdev), 452 dev->rdev.lldi.nchan, dev->rdev.lldi.nrxq, 453 dev->rdev.lldi.ntxq, dev->rdev.lldi.nports); 454 455 for (i = 0; i < dev->rdev.lldi.nrxq; i++) 456 PDBG("rxqid[%u] %u\n", i, dev->rdev.lldi.rxq_ids[i]); 457 out: 458 return dev; 459 } 460 461 static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp, 462 const struct pkt_gl *gl) 463 { 464 struct c4iw_dev *dev = handle; 465 struct sk_buff *skb; 466 const struct cpl_act_establish *rpl; 467 unsigned int opcode; 468 469 if (gl == NULL) { 470 /* omit RSS and rsp_ctrl at end of descriptor */ 471 unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8; 472 473 skb = alloc_skb(256, GFP_ATOMIC); 474 if (!skb) 475 goto nomem; 476 __skb_put(skb, len); 477 skb_copy_to_linear_data(skb, &rsp[1], len); 478 } else if (gl == CXGB4_MSG_AN) { 479 const struct rsp_ctrl *rc = (void *)rsp; 480 481 u32 qid = be32_to_cpu(rc->pldbuflen_qid); 482 c4iw_ev_handler(dev, qid); 483 return 0; 484 } else { 485 skb = cxgb4_pktgl_to_skb(gl, 128, 128); 486 if (unlikely(!skb)) 487 goto nomem; 488 } 489 490 rpl = cplhdr(skb); 491 opcode = rpl->ot.opcode; 492 493 if (c4iw_handlers[opcode]) 494 c4iw_handlers[opcode](dev, skb); 495 else 496 printk(KERN_INFO "%s no handler opcode 0x%x...\n", __func__, 497 opcode); 498 499 return 0; 500 nomem: 501 return -1; 502 } 503 504 static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state) 505 { 506 struct c4iw_dev *dev = handle; 507 508 PDBG("%s new_state %u\n", __func__, new_state); 509 switch (new_state) { 510 case CXGB4_STATE_UP: 511 printk(KERN_INFO MOD "%s: Up\n", pci_name(dev->rdev.lldi.pdev)); 512 if (!dev->registered) { 513 int ret; 514 ret = c4iw_register_device(dev); 515 if (ret) 516 printk(KERN_ERR MOD 517 "%s: RDMA registration failed: %d\n", 518 pci_name(dev->rdev.lldi.pdev), ret); 519 } 520 break; 521 case CXGB4_STATE_DOWN: 522 printk(KERN_INFO MOD "%s: Down\n", 523 pci_name(dev->rdev.lldi.pdev)); 524 if (dev->registered) 525 c4iw_unregister_device(dev); 526 break; 527 case CXGB4_STATE_START_RECOVERY: 528 printk(KERN_INFO MOD "%s: Fatal Error\n", 529 pci_name(dev->rdev.lldi.pdev)); 530 dev->rdev.flags |= T4_FATAL_ERROR; 531 if (dev->registered) { 532 struct ib_event event; 533 534 memset(&event, 0, sizeof event); 535 event.event = IB_EVENT_DEVICE_FATAL; 536 event.device = &dev->ibdev; 537 ib_dispatch_event(&event); 538 c4iw_unregister_device(dev); 539 } 540 break; 541 case CXGB4_STATE_DETACH: 542 printk(KERN_INFO MOD "%s: Detach\n", 543 pci_name(dev->rdev.lldi.pdev)); 544 mutex_lock(&dev_mutex); 545 c4iw_remove(dev); 546 mutex_unlock(&dev_mutex); 547 break; 548 } 549 return 0; 550 } 551 552 static struct cxgb4_uld_info c4iw_uld_info = { 553 .name = DRV_NAME, 554 .add = c4iw_uld_add, 555 .rx_handler = c4iw_uld_rx_handler, 556 .state_change = c4iw_uld_state_change, 557 }; 558 559 static int __init c4iw_init_module(void) 560 { 561 int err; 562 563 err = c4iw_cm_init(); 564 if (err) 565 return err; 566 567 c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL); 568 if (!c4iw_debugfs_root) 569 printk(KERN_WARNING MOD 570 "could not create debugfs entry, continuing\n"); 571 572 cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info); 573 574 return 0; 575 } 576 577 static void __exit c4iw_exit_module(void) 578 { 579 struct c4iw_dev *dev, *tmp; 580 581 mutex_lock(&dev_mutex); 582 list_for_each_entry_safe(dev, tmp, &dev_list, entry) { 583 c4iw_remove(dev); 584 } 585 mutex_unlock(&dev_mutex); 586 cxgb4_unregister_uld(CXGB4_ULD_RDMA); 587 c4iw_cm_term(); 588 debugfs_remove_recursive(c4iw_debugfs_root); 589 } 590 591 module_init(c4iw_init_module); 592 module_exit(c4iw_exit_module); 593