1 /* 2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #include <linux/module.h> 33 #include <linux/moduleparam.h> 34 #include <linux/debugfs.h> 35 #include <linux/vmalloc.h> 36 #include <linux/math64.h> 37 38 #include <rdma/ib_verbs.h> 39 40 #include "iw_cxgb4.h" 41 42 #define DRV_VERSION "0.1" 43 44 MODULE_AUTHOR("Steve Wise"); 45 MODULE_DESCRIPTION("Chelsio T4/T5 RDMA Driver"); 46 MODULE_LICENSE("Dual BSD/GPL"); 47 48 static int allow_db_fc_on_t5; 49 module_param(allow_db_fc_on_t5, int, 0644); 50 MODULE_PARM_DESC(allow_db_fc_on_t5, 51 "Allow DB Flow Control on T5 (default = 0)"); 52 53 static int allow_db_coalescing_on_t5; 54 module_param(allow_db_coalescing_on_t5, int, 0644); 55 MODULE_PARM_DESC(allow_db_coalescing_on_t5, 56 "Allow DB Coalescing on T5 (default = 0)"); 57 58 int c4iw_wr_log = 0; 59 module_param(c4iw_wr_log, int, 0444); 60 MODULE_PARM_DESC(c4iw_wr_log, "Enables logging of work request timing data."); 61 62 static int c4iw_wr_log_size_order = 12; 63 module_param(c4iw_wr_log_size_order, int, 0444); 64 MODULE_PARM_DESC(c4iw_wr_log_size_order, 65 "Number of entries (log2) in the work request timing log."); 66 67 static LIST_HEAD(uld_ctx_list); 68 static DEFINE_MUTEX(dev_mutex); 69 static struct workqueue_struct *reg_workq; 70 71 #define DB_FC_RESUME_SIZE 64 72 #define DB_FC_RESUME_DELAY 1 73 #define DB_FC_DRAIN_THRESH 0 74 75 static struct dentry *c4iw_debugfs_root; 76 77 struct c4iw_debugfs_data { 78 struct c4iw_dev *devp; 79 char *buf; 80 int bufsize; 81 int pos; 82 }; 83 84 static int count_idrs(int id, void *p, void *data) 85 { 86 int *countp = data; 87 88 *countp = *countp + 1; 89 return 0; 90 } 91 92 static ssize_t debugfs_read(struct file *file, char __user *buf, size_t count, 93 loff_t *ppos) 94 { 95 struct c4iw_debugfs_data *d = file->private_data; 96 97 return simple_read_from_buffer(buf, count, ppos, d->buf, d->pos); 98 } 99 100 void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe) 101 { 102 struct wr_log_entry le; 103 int idx; 104 105 if (!wq->rdev->wr_log) 106 return; 107 108 idx = (atomic_inc_return(&wq->rdev->wr_log_idx) - 1) & 109 (wq->rdev->wr_log_size - 1); 110 le.poll_sge_ts = cxgb4_read_sge_timestamp(wq->rdev->lldi.ports[0]); 111 le.poll_host_time = ktime_get(); 112 le.valid = 1; 113 le.cqe_sge_ts = CQE_TS(cqe); 114 if (SQ_TYPE(cqe)) { 115 le.qid = wq->sq.qid; 116 le.opcode = CQE_OPCODE(cqe); 117 le.post_host_time = wq->sq.sw_sq[wq->sq.cidx].host_time; 118 le.post_sge_ts = wq->sq.sw_sq[wq->sq.cidx].sge_ts; 119 le.wr_id = CQE_WRID_SQ_IDX(cqe); 120 } else { 121 le.qid = wq->rq.qid; 122 le.opcode = FW_RI_RECEIVE; 123 le.post_host_time = wq->rq.sw_rq[wq->rq.cidx].host_time; 124 le.post_sge_ts = wq->rq.sw_rq[wq->rq.cidx].sge_ts; 125 le.wr_id = CQE_WRID_MSN(cqe); 126 } 127 wq->rdev->wr_log[idx] = le; 128 } 129 130 static int wr_log_show(struct seq_file *seq, void *v) 131 { 132 struct c4iw_dev *dev = seq->private; 133 ktime_t prev_time; 134 struct wr_log_entry *lep; 135 int prev_time_set = 0; 136 int idx, end; 137 138 #define ts2ns(ts) div64_u64((ts) * dev->rdev.lldi.cclk_ps, 1000) 139 140 idx = atomic_read(&dev->rdev.wr_log_idx) & 141 (dev->rdev.wr_log_size - 1); 142 end = idx - 1; 143 if (end < 0) 144 end = dev->rdev.wr_log_size - 1; 145 lep = &dev->rdev.wr_log[idx]; 146 while (idx != end) { 147 if (lep->valid) { 148 if (!prev_time_set) { 149 prev_time_set = 1; 150 prev_time = lep->poll_host_time; 151 } 152 seq_printf(seq, "%04u: nsec %llu qid %u opcode " 153 "%u %s 0x%x host_wr_delta nsec %llu " 154 "post_sge_ts 0x%llx cqe_sge_ts 0x%llx " 155 "poll_sge_ts 0x%llx post_poll_delta_ns %llu " 156 "cqe_poll_delta_ns %llu\n", 157 idx, 158 ktime_to_ns(ktime_sub(lep->poll_host_time, 159 prev_time)), 160 lep->qid, lep->opcode, 161 lep->opcode == FW_RI_RECEIVE ? 162 "msn" : "wrid", 163 lep->wr_id, 164 ktime_to_ns(ktime_sub(lep->poll_host_time, 165 lep->post_host_time)), 166 lep->post_sge_ts, lep->cqe_sge_ts, 167 lep->poll_sge_ts, 168 ts2ns(lep->poll_sge_ts - lep->post_sge_ts), 169 ts2ns(lep->poll_sge_ts - lep->cqe_sge_ts)); 170 prev_time = lep->poll_host_time; 171 } 172 idx++; 173 if (idx > (dev->rdev.wr_log_size - 1)) 174 idx = 0; 175 lep = &dev->rdev.wr_log[idx]; 176 } 177 #undef ts2ns 178 return 0; 179 } 180 181 static int wr_log_open(struct inode *inode, struct file *file) 182 { 183 return single_open(file, wr_log_show, inode->i_private); 184 } 185 186 static ssize_t wr_log_clear(struct file *file, const char __user *buf, 187 size_t count, loff_t *pos) 188 { 189 struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private; 190 int i; 191 192 if (dev->rdev.wr_log) 193 for (i = 0; i < dev->rdev.wr_log_size; i++) 194 dev->rdev.wr_log[i].valid = 0; 195 return count; 196 } 197 198 static const struct file_operations wr_log_debugfs_fops = { 199 .owner = THIS_MODULE, 200 .open = wr_log_open, 201 .release = single_release, 202 .read = seq_read, 203 .llseek = seq_lseek, 204 .write = wr_log_clear, 205 }; 206 207 static struct sockaddr_in zero_sin = { 208 .sin_family = AF_INET, 209 }; 210 211 static struct sockaddr_in6 zero_sin6 = { 212 .sin6_family = AF_INET6, 213 }; 214 215 static void set_ep_sin_addrs(struct c4iw_ep *ep, 216 struct sockaddr_in **lsin, 217 struct sockaddr_in **rsin, 218 struct sockaddr_in **m_lsin, 219 struct sockaddr_in **m_rsin) 220 { 221 struct iw_cm_id *id = ep->com.cm_id; 222 223 *m_lsin = (struct sockaddr_in *)&ep->com.local_addr; 224 *m_rsin = (struct sockaddr_in *)&ep->com.remote_addr; 225 if (id) { 226 *lsin = (struct sockaddr_in *)&id->local_addr; 227 *rsin = (struct sockaddr_in *)&id->remote_addr; 228 } else { 229 *lsin = &zero_sin; 230 *rsin = &zero_sin; 231 } 232 } 233 234 static void set_ep_sin6_addrs(struct c4iw_ep *ep, 235 struct sockaddr_in6 **lsin6, 236 struct sockaddr_in6 **rsin6, 237 struct sockaddr_in6 **m_lsin6, 238 struct sockaddr_in6 **m_rsin6) 239 { 240 struct iw_cm_id *id = ep->com.cm_id; 241 242 *m_lsin6 = (struct sockaddr_in6 *)&ep->com.local_addr; 243 *m_rsin6 = (struct sockaddr_in6 *)&ep->com.remote_addr; 244 if (id) { 245 *lsin6 = (struct sockaddr_in6 *)&id->local_addr; 246 *rsin6 = (struct sockaddr_in6 *)&id->remote_addr; 247 } else { 248 *lsin6 = &zero_sin6; 249 *rsin6 = &zero_sin6; 250 } 251 } 252 253 static int dump_qp(int id, void *p, void *data) 254 { 255 struct c4iw_qp *qp = p; 256 struct c4iw_debugfs_data *qpd = data; 257 int space; 258 int cc; 259 260 if (id != qp->wq.sq.qid) 261 return 0; 262 263 space = qpd->bufsize - qpd->pos - 1; 264 if (space == 0) 265 return 1; 266 267 if (qp->ep) { 268 struct c4iw_ep *ep = qp->ep; 269 270 if (ep->com.local_addr.ss_family == AF_INET) { 271 struct sockaddr_in *lsin; 272 struct sockaddr_in *rsin; 273 struct sockaddr_in *m_lsin; 274 struct sockaddr_in *m_rsin; 275 276 set_ep_sin_addrs(ep, &lsin, &rsin, &m_lsin, &m_rsin); 277 cc = snprintf(qpd->buf + qpd->pos, space, 278 "rc qp sq id %u %s id %u state %u " 279 "onchip %u ep tid %u state %u " 280 "%pI4:%u/%u->%pI4:%u/%u\n", 281 qp->wq.sq.qid, qp->srq ? "srq" : "rq", 282 qp->srq ? qp->srq->idx : qp->wq.rq.qid, 283 (int)qp->attr.state, 284 qp->wq.sq.flags & T4_SQ_ONCHIP, 285 ep->hwtid, (int)ep->com.state, 286 &lsin->sin_addr, ntohs(lsin->sin_port), 287 ntohs(m_lsin->sin_port), 288 &rsin->sin_addr, ntohs(rsin->sin_port), 289 ntohs(m_rsin->sin_port)); 290 } else { 291 struct sockaddr_in6 *lsin6; 292 struct sockaddr_in6 *rsin6; 293 struct sockaddr_in6 *m_lsin6; 294 struct sockaddr_in6 *m_rsin6; 295 296 set_ep_sin6_addrs(ep, &lsin6, &rsin6, &m_lsin6, 297 &m_rsin6); 298 cc = snprintf(qpd->buf + qpd->pos, space, 299 "rc qp sq id %u rq id %u state %u " 300 "onchip %u ep tid %u state %u " 301 "%pI6:%u/%u->%pI6:%u/%u\n", 302 qp->wq.sq.qid, qp->wq.rq.qid, 303 (int)qp->attr.state, 304 qp->wq.sq.flags & T4_SQ_ONCHIP, 305 ep->hwtid, (int)ep->com.state, 306 &lsin6->sin6_addr, 307 ntohs(lsin6->sin6_port), 308 ntohs(m_lsin6->sin6_port), 309 &rsin6->sin6_addr, 310 ntohs(rsin6->sin6_port), 311 ntohs(m_rsin6->sin6_port)); 312 } 313 } else 314 cc = snprintf(qpd->buf + qpd->pos, space, 315 "qp sq id %u rq id %u state %u onchip %u\n", 316 qp->wq.sq.qid, qp->wq.rq.qid, 317 (int)qp->attr.state, 318 qp->wq.sq.flags & T4_SQ_ONCHIP); 319 if (cc < space) 320 qpd->pos += cc; 321 return 0; 322 } 323 324 static int qp_release(struct inode *inode, struct file *file) 325 { 326 struct c4iw_debugfs_data *qpd = file->private_data; 327 if (!qpd) { 328 pr_info("%s null qpd?\n", __func__); 329 return 0; 330 } 331 vfree(qpd->buf); 332 kfree(qpd); 333 return 0; 334 } 335 336 static int qp_open(struct inode *inode, struct file *file) 337 { 338 struct c4iw_debugfs_data *qpd; 339 int count = 1; 340 341 qpd = kmalloc(sizeof *qpd, GFP_KERNEL); 342 if (!qpd) 343 return -ENOMEM; 344 345 qpd->devp = inode->i_private; 346 qpd->pos = 0; 347 348 spin_lock_irq(&qpd->devp->lock); 349 idr_for_each(&qpd->devp->qpidr, count_idrs, &count); 350 spin_unlock_irq(&qpd->devp->lock); 351 352 qpd->bufsize = count * 180; 353 qpd->buf = vmalloc(qpd->bufsize); 354 if (!qpd->buf) { 355 kfree(qpd); 356 return -ENOMEM; 357 } 358 359 spin_lock_irq(&qpd->devp->lock); 360 idr_for_each(&qpd->devp->qpidr, dump_qp, qpd); 361 spin_unlock_irq(&qpd->devp->lock); 362 363 qpd->buf[qpd->pos++] = 0; 364 file->private_data = qpd; 365 return 0; 366 } 367 368 static const struct file_operations qp_debugfs_fops = { 369 .owner = THIS_MODULE, 370 .open = qp_open, 371 .release = qp_release, 372 .read = debugfs_read, 373 .llseek = default_llseek, 374 }; 375 376 static int dump_stag(int id, void *p, void *data) 377 { 378 struct c4iw_debugfs_data *stagd = data; 379 int space; 380 int cc; 381 struct fw_ri_tpte tpte; 382 int ret; 383 384 space = stagd->bufsize - stagd->pos - 1; 385 if (space == 0) 386 return 1; 387 388 ret = cxgb4_read_tpte(stagd->devp->rdev.lldi.ports[0], (u32)id<<8, 389 (__be32 *)&tpte); 390 if (ret) { 391 dev_err(&stagd->devp->rdev.lldi.pdev->dev, 392 "%s cxgb4_read_tpte err %d\n", __func__, ret); 393 return ret; 394 } 395 cc = snprintf(stagd->buf + stagd->pos, space, 396 "stag: idx 0x%x valid %d key 0x%x state %d pdid %d " 397 "perm 0x%x ps %d len 0x%llx va 0x%llx\n", 398 (u32)id<<8, 399 FW_RI_TPTE_VALID_G(ntohl(tpte.valid_to_pdid)), 400 FW_RI_TPTE_STAGKEY_G(ntohl(tpte.valid_to_pdid)), 401 FW_RI_TPTE_STAGSTATE_G(ntohl(tpte.valid_to_pdid)), 402 FW_RI_TPTE_PDID_G(ntohl(tpte.valid_to_pdid)), 403 FW_RI_TPTE_PERM_G(ntohl(tpte.locread_to_qpid)), 404 FW_RI_TPTE_PS_G(ntohl(tpte.locread_to_qpid)), 405 ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo), 406 ((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo)); 407 if (cc < space) 408 stagd->pos += cc; 409 return 0; 410 } 411 412 static int stag_release(struct inode *inode, struct file *file) 413 { 414 struct c4iw_debugfs_data *stagd = file->private_data; 415 if (!stagd) { 416 pr_info("%s null stagd?\n", __func__); 417 return 0; 418 } 419 vfree(stagd->buf); 420 kfree(stagd); 421 return 0; 422 } 423 424 static int stag_open(struct inode *inode, struct file *file) 425 { 426 struct c4iw_debugfs_data *stagd; 427 int ret = 0; 428 int count = 1; 429 430 stagd = kmalloc(sizeof *stagd, GFP_KERNEL); 431 if (!stagd) { 432 ret = -ENOMEM; 433 goto out; 434 } 435 stagd->devp = inode->i_private; 436 stagd->pos = 0; 437 438 spin_lock_irq(&stagd->devp->lock); 439 idr_for_each(&stagd->devp->mmidr, count_idrs, &count); 440 spin_unlock_irq(&stagd->devp->lock); 441 442 stagd->bufsize = count * 256; 443 stagd->buf = vmalloc(stagd->bufsize); 444 if (!stagd->buf) { 445 ret = -ENOMEM; 446 goto err1; 447 } 448 449 spin_lock_irq(&stagd->devp->lock); 450 idr_for_each(&stagd->devp->mmidr, dump_stag, stagd); 451 spin_unlock_irq(&stagd->devp->lock); 452 453 stagd->buf[stagd->pos++] = 0; 454 file->private_data = stagd; 455 goto out; 456 err1: 457 kfree(stagd); 458 out: 459 return ret; 460 } 461 462 static const struct file_operations stag_debugfs_fops = { 463 .owner = THIS_MODULE, 464 .open = stag_open, 465 .release = stag_release, 466 .read = debugfs_read, 467 .llseek = default_llseek, 468 }; 469 470 static char *db_state_str[] = {"NORMAL", "FLOW_CONTROL", "RECOVERY", "STOPPED"}; 471 472 static int stats_show(struct seq_file *seq, void *v) 473 { 474 struct c4iw_dev *dev = seq->private; 475 476 seq_printf(seq, " Object: %10s %10s %10s %10s\n", "Total", "Current", 477 "Max", "Fail"); 478 seq_printf(seq, " PDID: %10llu %10llu %10llu %10llu\n", 479 dev->rdev.stats.pd.total, dev->rdev.stats.pd.cur, 480 dev->rdev.stats.pd.max, dev->rdev.stats.pd.fail); 481 seq_printf(seq, " QID: %10llu %10llu %10llu %10llu\n", 482 dev->rdev.stats.qid.total, dev->rdev.stats.qid.cur, 483 dev->rdev.stats.qid.max, dev->rdev.stats.qid.fail); 484 seq_printf(seq, " SRQS: %10llu %10llu %10llu %10llu\n", 485 dev->rdev.stats.srqt.total, dev->rdev.stats.srqt.cur, 486 dev->rdev.stats.srqt.max, dev->rdev.stats.srqt.fail); 487 seq_printf(seq, " TPTMEM: %10llu %10llu %10llu %10llu\n", 488 dev->rdev.stats.stag.total, dev->rdev.stats.stag.cur, 489 dev->rdev.stats.stag.max, dev->rdev.stats.stag.fail); 490 seq_printf(seq, " PBLMEM: %10llu %10llu %10llu %10llu\n", 491 dev->rdev.stats.pbl.total, dev->rdev.stats.pbl.cur, 492 dev->rdev.stats.pbl.max, dev->rdev.stats.pbl.fail); 493 seq_printf(seq, " RQTMEM: %10llu %10llu %10llu %10llu\n", 494 dev->rdev.stats.rqt.total, dev->rdev.stats.rqt.cur, 495 dev->rdev.stats.rqt.max, dev->rdev.stats.rqt.fail); 496 seq_printf(seq, " OCQPMEM: %10llu %10llu %10llu %10llu\n", 497 dev->rdev.stats.ocqp.total, dev->rdev.stats.ocqp.cur, 498 dev->rdev.stats.ocqp.max, dev->rdev.stats.ocqp.fail); 499 seq_printf(seq, " DB FULL: %10llu\n", dev->rdev.stats.db_full); 500 seq_printf(seq, " DB EMPTY: %10llu\n", dev->rdev.stats.db_empty); 501 seq_printf(seq, " DB DROP: %10llu\n", dev->rdev.stats.db_drop); 502 seq_printf(seq, " DB State: %s Transitions %llu FC Interruptions %llu\n", 503 db_state_str[dev->db_state], 504 dev->rdev.stats.db_state_transitions, 505 dev->rdev.stats.db_fc_interruptions); 506 seq_printf(seq, "TCAM_FULL: %10llu\n", dev->rdev.stats.tcam_full); 507 seq_printf(seq, "ACT_OFLD_CONN_FAILS: %10llu\n", 508 dev->rdev.stats.act_ofld_conn_fails); 509 seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n", 510 dev->rdev.stats.pas_ofld_conn_fails); 511 seq_printf(seq, "NEG_ADV_RCVD: %10llu\n", dev->rdev.stats.neg_adv); 512 seq_printf(seq, "AVAILABLE IRD: %10u\n", dev->avail_ird); 513 return 0; 514 } 515 516 static int stats_open(struct inode *inode, struct file *file) 517 { 518 return single_open(file, stats_show, inode->i_private); 519 } 520 521 static ssize_t stats_clear(struct file *file, const char __user *buf, 522 size_t count, loff_t *pos) 523 { 524 struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private; 525 526 mutex_lock(&dev->rdev.stats.lock); 527 dev->rdev.stats.pd.max = 0; 528 dev->rdev.stats.pd.fail = 0; 529 dev->rdev.stats.qid.max = 0; 530 dev->rdev.stats.qid.fail = 0; 531 dev->rdev.stats.stag.max = 0; 532 dev->rdev.stats.stag.fail = 0; 533 dev->rdev.stats.pbl.max = 0; 534 dev->rdev.stats.pbl.fail = 0; 535 dev->rdev.stats.rqt.max = 0; 536 dev->rdev.stats.rqt.fail = 0; 537 dev->rdev.stats.rqt.max = 0; 538 dev->rdev.stats.rqt.fail = 0; 539 dev->rdev.stats.ocqp.max = 0; 540 dev->rdev.stats.ocqp.fail = 0; 541 dev->rdev.stats.db_full = 0; 542 dev->rdev.stats.db_empty = 0; 543 dev->rdev.stats.db_drop = 0; 544 dev->rdev.stats.db_state_transitions = 0; 545 dev->rdev.stats.tcam_full = 0; 546 dev->rdev.stats.act_ofld_conn_fails = 0; 547 dev->rdev.stats.pas_ofld_conn_fails = 0; 548 mutex_unlock(&dev->rdev.stats.lock); 549 return count; 550 } 551 552 static const struct file_operations stats_debugfs_fops = { 553 .owner = THIS_MODULE, 554 .open = stats_open, 555 .release = single_release, 556 .read = seq_read, 557 .llseek = seq_lseek, 558 .write = stats_clear, 559 }; 560 561 static int dump_ep(int id, void *p, void *data) 562 { 563 struct c4iw_ep *ep = p; 564 struct c4iw_debugfs_data *epd = data; 565 int space; 566 int cc; 567 568 space = epd->bufsize - epd->pos - 1; 569 if (space == 0) 570 return 1; 571 572 if (ep->com.local_addr.ss_family == AF_INET) { 573 struct sockaddr_in *lsin; 574 struct sockaddr_in *rsin; 575 struct sockaddr_in *m_lsin; 576 struct sockaddr_in *m_rsin; 577 578 set_ep_sin_addrs(ep, &lsin, &rsin, &m_lsin, &m_rsin); 579 cc = snprintf(epd->buf + epd->pos, space, 580 "ep %p cm_id %p qp %p state %d flags 0x%lx " 581 "history 0x%lx hwtid %d atid %d " 582 "conn_na %u abort_na %u " 583 "%pI4:%d/%d <-> %pI4:%d/%d\n", 584 ep, ep->com.cm_id, ep->com.qp, 585 (int)ep->com.state, ep->com.flags, 586 ep->com.history, ep->hwtid, ep->atid, 587 ep->stats.connect_neg_adv, 588 ep->stats.abort_neg_adv, 589 &lsin->sin_addr, ntohs(lsin->sin_port), 590 ntohs(m_lsin->sin_port), 591 &rsin->sin_addr, ntohs(rsin->sin_port), 592 ntohs(m_rsin->sin_port)); 593 } else { 594 struct sockaddr_in6 *lsin6; 595 struct sockaddr_in6 *rsin6; 596 struct sockaddr_in6 *m_lsin6; 597 struct sockaddr_in6 *m_rsin6; 598 599 set_ep_sin6_addrs(ep, &lsin6, &rsin6, &m_lsin6, &m_rsin6); 600 cc = snprintf(epd->buf + epd->pos, space, 601 "ep %p cm_id %p qp %p state %d flags 0x%lx " 602 "history 0x%lx hwtid %d atid %d " 603 "conn_na %u abort_na %u " 604 "%pI6:%d/%d <-> %pI6:%d/%d\n", 605 ep, ep->com.cm_id, ep->com.qp, 606 (int)ep->com.state, ep->com.flags, 607 ep->com.history, ep->hwtid, ep->atid, 608 ep->stats.connect_neg_adv, 609 ep->stats.abort_neg_adv, 610 &lsin6->sin6_addr, ntohs(lsin6->sin6_port), 611 ntohs(m_lsin6->sin6_port), 612 &rsin6->sin6_addr, ntohs(rsin6->sin6_port), 613 ntohs(m_rsin6->sin6_port)); 614 } 615 if (cc < space) 616 epd->pos += cc; 617 return 0; 618 } 619 620 static int dump_listen_ep(int id, void *p, void *data) 621 { 622 struct c4iw_listen_ep *ep = p; 623 struct c4iw_debugfs_data *epd = data; 624 int space; 625 int cc; 626 627 space = epd->bufsize - epd->pos - 1; 628 if (space == 0) 629 return 1; 630 631 if (ep->com.local_addr.ss_family == AF_INET) { 632 struct sockaddr_in *lsin = (struct sockaddr_in *) 633 &ep->com.cm_id->local_addr; 634 struct sockaddr_in *m_lsin = (struct sockaddr_in *) 635 &ep->com.cm_id->m_local_addr; 636 637 cc = snprintf(epd->buf + epd->pos, space, 638 "ep %p cm_id %p state %d flags 0x%lx stid %d " 639 "backlog %d %pI4:%d/%d\n", 640 ep, ep->com.cm_id, (int)ep->com.state, 641 ep->com.flags, ep->stid, ep->backlog, 642 &lsin->sin_addr, ntohs(lsin->sin_port), 643 ntohs(m_lsin->sin_port)); 644 } else { 645 struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *) 646 &ep->com.cm_id->local_addr; 647 struct sockaddr_in6 *m_lsin6 = (struct sockaddr_in6 *) 648 &ep->com.cm_id->m_local_addr; 649 650 cc = snprintf(epd->buf + epd->pos, space, 651 "ep %p cm_id %p state %d flags 0x%lx stid %d " 652 "backlog %d %pI6:%d/%d\n", 653 ep, ep->com.cm_id, (int)ep->com.state, 654 ep->com.flags, ep->stid, ep->backlog, 655 &lsin6->sin6_addr, ntohs(lsin6->sin6_port), 656 ntohs(m_lsin6->sin6_port)); 657 } 658 if (cc < space) 659 epd->pos += cc; 660 return 0; 661 } 662 663 static int ep_release(struct inode *inode, struct file *file) 664 { 665 struct c4iw_debugfs_data *epd = file->private_data; 666 if (!epd) { 667 pr_info("%s null qpd?\n", __func__); 668 return 0; 669 } 670 vfree(epd->buf); 671 kfree(epd); 672 return 0; 673 } 674 675 static int ep_open(struct inode *inode, struct file *file) 676 { 677 struct c4iw_debugfs_data *epd; 678 int ret = 0; 679 int count = 1; 680 681 epd = kmalloc(sizeof(*epd), GFP_KERNEL); 682 if (!epd) { 683 ret = -ENOMEM; 684 goto out; 685 } 686 epd->devp = inode->i_private; 687 epd->pos = 0; 688 689 spin_lock_irq(&epd->devp->lock); 690 idr_for_each(&epd->devp->hwtid_idr, count_idrs, &count); 691 idr_for_each(&epd->devp->atid_idr, count_idrs, &count); 692 idr_for_each(&epd->devp->stid_idr, count_idrs, &count); 693 spin_unlock_irq(&epd->devp->lock); 694 695 epd->bufsize = count * 240; 696 epd->buf = vmalloc(epd->bufsize); 697 if (!epd->buf) { 698 ret = -ENOMEM; 699 goto err1; 700 } 701 702 spin_lock_irq(&epd->devp->lock); 703 idr_for_each(&epd->devp->hwtid_idr, dump_ep, epd); 704 idr_for_each(&epd->devp->atid_idr, dump_ep, epd); 705 idr_for_each(&epd->devp->stid_idr, dump_listen_ep, epd); 706 spin_unlock_irq(&epd->devp->lock); 707 708 file->private_data = epd; 709 goto out; 710 err1: 711 kfree(epd); 712 out: 713 return ret; 714 } 715 716 static const struct file_operations ep_debugfs_fops = { 717 .owner = THIS_MODULE, 718 .open = ep_open, 719 .release = ep_release, 720 .read = debugfs_read, 721 }; 722 723 static int setup_debugfs(struct c4iw_dev *devp) 724 { 725 if (!devp->debugfs_root) 726 return -1; 727 728 debugfs_create_file_size("qps", S_IWUSR, devp->debugfs_root, 729 (void *)devp, &qp_debugfs_fops, 4096); 730 731 debugfs_create_file_size("stags", S_IWUSR, devp->debugfs_root, 732 (void *)devp, &stag_debugfs_fops, 4096); 733 734 debugfs_create_file_size("stats", S_IWUSR, devp->debugfs_root, 735 (void *)devp, &stats_debugfs_fops, 4096); 736 737 debugfs_create_file_size("eps", S_IWUSR, devp->debugfs_root, 738 (void *)devp, &ep_debugfs_fops, 4096); 739 740 if (c4iw_wr_log) 741 debugfs_create_file_size("wr_log", S_IWUSR, devp->debugfs_root, 742 (void *)devp, &wr_log_debugfs_fops, 4096); 743 return 0; 744 } 745 746 void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev, 747 struct c4iw_dev_ucontext *uctx) 748 { 749 struct list_head *pos, *nxt; 750 struct c4iw_qid_list *entry; 751 752 mutex_lock(&uctx->lock); 753 list_for_each_safe(pos, nxt, &uctx->qpids) { 754 entry = list_entry(pos, struct c4iw_qid_list, entry); 755 list_del_init(&entry->entry); 756 if (!(entry->qid & rdev->qpmask)) { 757 c4iw_put_resource(&rdev->resource.qid_table, 758 entry->qid); 759 mutex_lock(&rdev->stats.lock); 760 rdev->stats.qid.cur -= rdev->qpmask + 1; 761 mutex_unlock(&rdev->stats.lock); 762 } 763 kfree(entry); 764 } 765 766 list_for_each_safe(pos, nxt, &uctx->cqids) { 767 entry = list_entry(pos, struct c4iw_qid_list, entry); 768 list_del_init(&entry->entry); 769 kfree(entry); 770 } 771 mutex_unlock(&uctx->lock); 772 } 773 774 void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev, 775 struct c4iw_dev_ucontext *uctx) 776 { 777 INIT_LIST_HEAD(&uctx->qpids); 778 INIT_LIST_HEAD(&uctx->cqids); 779 mutex_init(&uctx->lock); 780 } 781 782 /* Caller takes care of locking if needed */ 783 static int c4iw_rdev_open(struct c4iw_rdev *rdev) 784 { 785 int err; 786 787 c4iw_init_dev_ucontext(rdev, &rdev->uctx); 788 789 /* 790 * This implementation assumes udb_density == ucq_density! Eventually 791 * we might need to support this but for now fail the open. Also the 792 * cqid and qpid range must match for now. 793 */ 794 if (rdev->lldi.udb_density != rdev->lldi.ucq_density) { 795 pr_err("%s: unsupported udb/ucq densities %u/%u\n", 796 pci_name(rdev->lldi.pdev), rdev->lldi.udb_density, 797 rdev->lldi.ucq_density); 798 return -EINVAL; 799 } 800 if (rdev->lldi.vr->qp.start != rdev->lldi.vr->cq.start || 801 rdev->lldi.vr->qp.size != rdev->lldi.vr->cq.size) { 802 pr_err("%s: unsupported qp and cq id ranges qp start %u size %u cq start %u size %u\n", 803 pci_name(rdev->lldi.pdev), rdev->lldi.vr->qp.start, 804 rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.size, 805 rdev->lldi.vr->cq.size); 806 return -EINVAL; 807 } 808 809 rdev->qpmask = rdev->lldi.udb_density - 1; 810 rdev->cqmask = rdev->lldi.ucq_density - 1; 811 pr_debug("dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u srq size %u\n", 812 pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start, 813 rdev->lldi.vr->stag.size, c4iw_num_stags(rdev), 814 rdev->lldi.vr->pbl.start, 815 rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start, 816 rdev->lldi.vr->rq.size, 817 rdev->lldi.vr->qp.start, 818 rdev->lldi.vr->qp.size, 819 rdev->lldi.vr->cq.start, 820 rdev->lldi.vr->cq.size, 821 rdev->lldi.vr->srq.size); 822 pr_debug("udb %pR db_reg %p gts_reg %p qpmask 0x%x cqmask 0x%x\n", 823 &rdev->lldi.pdev->resource[2], 824 rdev->lldi.db_reg, rdev->lldi.gts_reg, 825 rdev->qpmask, rdev->cqmask); 826 827 if (c4iw_num_stags(rdev) == 0) 828 return -EINVAL; 829 830 rdev->stats.pd.total = T4_MAX_NUM_PD; 831 rdev->stats.stag.total = rdev->lldi.vr->stag.size; 832 rdev->stats.pbl.total = rdev->lldi.vr->pbl.size; 833 rdev->stats.rqt.total = rdev->lldi.vr->rq.size; 834 rdev->stats.srqt.total = rdev->lldi.vr->srq.size; 835 rdev->stats.ocqp.total = rdev->lldi.vr->ocq.size; 836 rdev->stats.qid.total = rdev->lldi.vr->qp.size; 837 838 err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), 839 T4_MAX_NUM_PD, rdev->lldi.vr->srq.size); 840 if (err) { 841 pr_err("error %d initializing resources\n", err); 842 return err; 843 } 844 err = c4iw_pblpool_create(rdev); 845 if (err) { 846 pr_err("error %d initializing pbl pool\n", err); 847 goto destroy_resource; 848 } 849 err = c4iw_rqtpool_create(rdev); 850 if (err) { 851 pr_err("error %d initializing rqt pool\n", err); 852 goto destroy_pblpool; 853 } 854 err = c4iw_ocqp_pool_create(rdev); 855 if (err) { 856 pr_err("error %d initializing ocqp pool\n", err); 857 goto destroy_rqtpool; 858 } 859 rdev->status_page = (struct t4_dev_status_page *) 860 __get_free_page(GFP_KERNEL); 861 if (!rdev->status_page) { 862 err = -ENOMEM; 863 goto destroy_ocqp_pool; 864 } 865 rdev->status_page->qp_start = rdev->lldi.vr->qp.start; 866 rdev->status_page->qp_size = rdev->lldi.vr->qp.size; 867 rdev->status_page->cq_start = rdev->lldi.vr->cq.start; 868 rdev->status_page->cq_size = rdev->lldi.vr->cq.size; 869 rdev->status_page->write_cmpl_supported = rdev->lldi.write_cmpl_support; 870 871 if (c4iw_wr_log) { 872 rdev->wr_log = kcalloc(1 << c4iw_wr_log_size_order, 873 sizeof(*rdev->wr_log), 874 GFP_KERNEL); 875 if (rdev->wr_log) { 876 rdev->wr_log_size = 1 << c4iw_wr_log_size_order; 877 atomic_set(&rdev->wr_log_idx, 0); 878 } 879 } 880 881 rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free"); 882 if (!rdev->free_workq) { 883 err = -ENOMEM; 884 goto err_free_status_page_and_wr_log; 885 } 886 887 rdev->status_page->db_off = 0; 888 889 init_completion(&rdev->rqt_compl); 890 init_completion(&rdev->pbl_compl); 891 kref_init(&rdev->rqt_kref); 892 kref_init(&rdev->pbl_kref); 893 894 return 0; 895 err_free_status_page_and_wr_log: 896 if (c4iw_wr_log && rdev->wr_log) 897 kfree(rdev->wr_log); 898 free_page((unsigned long)rdev->status_page); 899 destroy_ocqp_pool: 900 c4iw_ocqp_pool_destroy(rdev); 901 destroy_rqtpool: 902 c4iw_rqtpool_destroy(rdev); 903 destroy_pblpool: 904 c4iw_pblpool_destroy(rdev); 905 destroy_resource: 906 c4iw_destroy_resource(&rdev->resource); 907 return err; 908 } 909 910 static void c4iw_rdev_close(struct c4iw_rdev *rdev) 911 { 912 kfree(rdev->wr_log); 913 c4iw_release_dev_ucontext(rdev, &rdev->uctx); 914 free_page((unsigned long)rdev->status_page); 915 c4iw_pblpool_destroy(rdev); 916 c4iw_rqtpool_destroy(rdev); 917 wait_for_completion(&rdev->pbl_compl); 918 wait_for_completion(&rdev->rqt_compl); 919 c4iw_ocqp_pool_destroy(rdev); 920 destroy_workqueue(rdev->free_workq); 921 c4iw_destroy_resource(&rdev->resource); 922 } 923 924 void c4iw_dealloc(struct uld_ctx *ctx) 925 { 926 c4iw_rdev_close(&ctx->dev->rdev); 927 WARN_ON_ONCE(!idr_is_empty(&ctx->dev->cqidr)); 928 idr_destroy(&ctx->dev->cqidr); 929 WARN_ON_ONCE(!idr_is_empty(&ctx->dev->qpidr)); 930 idr_destroy(&ctx->dev->qpidr); 931 WARN_ON_ONCE(!idr_is_empty(&ctx->dev->mmidr)); 932 idr_destroy(&ctx->dev->mmidr); 933 wait_event(ctx->dev->wait, idr_is_empty(&ctx->dev->hwtid_idr)); 934 idr_destroy(&ctx->dev->hwtid_idr); 935 idr_destroy(&ctx->dev->stid_idr); 936 idr_destroy(&ctx->dev->atid_idr); 937 if (ctx->dev->rdev.bar2_kva) 938 iounmap(ctx->dev->rdev.bar2_kva); 939 if (ctx->dev->rdev.oc_mw_kva) 940 iounmap(ctx->dev->rdev.oc_mw_kva); 941 ib_dealloc_device(&ctx->dev->ibdev); 942 ctx->dev = NULL; 943 } 944 945 static void c4iw_remove(struct uld_ctx *ctx) 946 { 947 pr_debug("c4iw_dev %p\n", ctx->dev); 948 c4iw_unregister_device(ctx->dev); 949 c4iw_dealloc(ctx); 950 } 951 952 static int rdma_supported(const struct cxgb4_lld_info *infop) 953 { 954 return infop->vr->stag.size > 0 && infop->vr->pbl.size > 0 && 955 infop->vr->rq.size > 0 && infop->vr->qp.size > 0 && 956 infop->vr->cq.size > 0; 957 } 958 959 static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) 960 { 961 struct c4iw_dev *devp; 962 int ret; 963 964 if (!rdma_supported(infop)) { 965 pr_info("%s: RDMA not supported on this device\n", 966 pci_name(infop->pdev)); 967 return ERR_PTR(-ENOSYS); 968 } 969 if (!ocqp_supported(infop)) 970 pr_info("%s: On-Chip Queues not supported on this device\n", 971 pci_name(infop->pdev)); 972 973 devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp)); 974 if (!devp) { 975 pr_err("Cannot allocate ib device\n"); 976 return ERR_PTR(-ENOMEM); 977 } 978 devp->rdev.lldi = *infop; 979 980 /* init various hw-queue params based on lld info */ 981 pr_debug("Ing. padding boundary is %d, egrsstatuspagesize = %d\n", 982 devp->rdev.lldi.sge_ingpadboundary, 983 devp->rdev.lldi.sge_egrstatuspagesize); 984 985 devp->rdev.hw_queue.t4_eq_status_entries = 986 devp->rdev.lldi.sge_egrstatuspagesize / 64; 987 devp->rdev.hw_queue.t4_max_eq_size = 65520; 988 devp->rdev.hw_queue.t4_max_iq_size = 65520; 989 devp->rdev.hw_queue.t4_max_rq_size = 8192 - 990 devp->rdev.hw_queue.t4_eq_status_entries - 1; 991 devp->rdev.hw_queue.t4_max_sq_size = 992 devp->rdev.hw_queue.t4_max_eq_size - 993 devp->rdev.hw_queue.t4_eq_status_entries - 1; 994 devp->rdev.hw_queue.t4_max_qp_depth = 995 devp->rdev.hw_queue.t4_max_rq_size; 996 devp->rdev.hw_queue.t4_max_cq_depth = 997 devp->rdev.hw_queue.t4_max_iq_size - 2; 998 devp->rdev.hw_queue.t4_stat_len = 999 devp->rdev.lldi.sge_egrstatuspagesize; 1000 1001 /* 1002 * For T5/T6 devices, we map all of BAR2 with WC. 1003 * For T4 devices with onchip qp mem, we map only that part 1004 * of BAR2 with WC. 1005 */ 1006 devp->rdev.bar2_pa = pci_resource_start(devp->rdev.lldi.pdev, 2); 1007 if (!is_t4(devp->rdev.lldi.adapter_type)) { 1008 devp->rdev.bar2_kva = ioremap_wc(devp->rdev.bar2_pa, 1009 pci_resource_len(devp->rdev.lldi.pdev, 2)); 1010 if (!devp->rdev.bar2_kva) { 1011 pr_err("Unable to ioremap BAR2\n"); 1012 ib_dealloc_device(&devp->ibdev); 1013 return ERR_PTR(-EINVAL); 1014 } 1015 } else if (ocqp_supported(infop)) { 1016 devp->rdev.oc_mw_pa = 1017 pci_resource_start(devp->rdev.lldi.pdev, 2) + 1018 pci_resource_len(devp->rdev.lldi.pdev, 2) - 1019 roundup_pow_of_two(devp->rdev.lldi.vr->ocq.size); 1020 devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa, 1021 devp->rdev.lldi.vr->ocq.size); 1022 if (!devp->rdev.oc_mw_kva) { 1023 pr_err("Unable to ioremap onchip mem\n"); 1024 ib_dealloc_device(&devp->ibdev); 1025 return ERR_PTR(-EINVAL); 1026 } 1027 } 1028 1029 pr_debug("ocq memory: hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n", 1030 devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size, 1031 devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva); 1032 1033 ret = c4iw_rdev_open(&devp->rdev); 1034 if (ret) { 1035 pr_err("Unable to open CXIO rdev err %d\n", ret); 1036 ib_dealloc_device(&devp->ibdev); 1037 return ERR_PTR(ret); 1038 } 1039 1040 idr_init(&devp->cqidr); 1041 idr_init(&devp->qpidr); 1042 idr_init(&devp->mmidr); 1043 idr_init(&devp->hwtid_idr); 1044 idr_init(&devp->stid_idr); 1045 idr_init(&devp->atid_idr); 1046 spin_lock_init(&devp->lock); 1047 mutex_init(&devp->rdev.stats.lock); 1048 mutex_init(&devp->db_mutex); 1049 INIT_LIST_HEAD(&devp->db_fc_list); 1050 init_waitqueue_head(&devp->wait); 1051 devp->avail_ird = devp->rdev.lldi.max_ird_adapter; 1052 1053 if (c4iw_debugfs_root) { 1054 devp->debugfs_root = debugfs_create_dir( 1055 pci_name(devp->rdev.lldi.pdev), 1056 c4iw_debugfs_root); 1057 setup_debugfs(devp); 1058 } 1059 1060 1061 return devp; 1062 } 1063 1064 static void *c4iw_uld_add(const struct cxgb4_lld_info *infop) 1065 { 1066 struct uld_ctx *ctx; 1067 static int vers_printed; 1068 int i; 1069 1070 if (!vers_printed++) 1071 pr_info("Chelsio T4/T5 RDMA Driver - version %s\n", 1072 DRV_VERSION); 1073 1074 ctx = kzalloc(sizeof *ctx, GFP_KERNEL); 1075 if (!ctx) { 1076 ctx = ERR_PTR(-ENOMEM); 1077 goto out; 1078 } 1079 ctx->lldi = *infop; 1080 1081 pr_debug("found device %s nchan %u nrxq %u ntxq %u nports %u\n", 1082 pci_name(ctx->lldi.pdev), 1083 ctx->lldi.nchan, ctx->lldi.nrxq, 1084 ctx->lldi.ntxq, ctx->lldi.nports); 1085 1086 mutex_lock(&dev_mutex); 1087 list_add_tail(&ctx->entry, &uld_ctx_list); 1088 mutex_unlock(&dev_mutex); 1089 1090 for (i = 0; i < ctx->lldi.nrxq; i++) 1091 pr_debug("rxqid[%u] %u\n", i, ctx->lldi.rxq_ids[i]); 1092 out: 1093 return ctx; 1094 } 1095 1096 static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl, 1097 const __be64 *rsp, 1098 u32 pktshift) 1099 { 1100 struct sk_buff *skb; 1101 1102 /* 1103 * Allocate space for cpl_pass_accept_req which will be synthesized by 1104 * driver. Once the driver synthesizes the request the skb will go 1105 * through the regular cpl_pass_accept_req processing. 1106 * The math here assumes sizeof cpl_pass_accept_req >= sizeof 1107 * cpl_rx_pkt. 1108 */ 1109 skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) + 1110 sizeof(struct rss_header) - pktshift, GFP_ATOMIC); 1111 if (unlikely(!skb)) 1112 return NULL; 1113 1114 __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) + 1115 sizeof(struct rss_header) - pktshift); 1116 1117 /* 1118 * This skb will contain: 1119 * rss_header from the rspq descriptor (1 flit) 1120 * cpl_rx_pkt struct from the rspq descriptor (2 flits) 1121 * space for the difference between the size of an 1122 * rx_pkt and pass_accept_req cpl (1 flit) 1123 * the packet data from the gl 1124 */ 1125 skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_pass_accept_req) + 1126 sizeof(struct rss_header)); 1127 skb_copy_to_linear_data_offset(skb, sizeof(struct rss_header) + 1128 sizeof(struct cpl_pass_accept_req), 1129 gl->va + pktshift, 1130 gl->tot_len - pktshift); 1131 return skb; 1132 } 1133 1134 static inline int recv_rx_pkt(struct c4iw_dev *dev, const struct pkt_gl *gl, 1135 const __be64 *rsp) 1136 { 1137 unsigned int opcode = *(u8 *)rsp; 1138 struct sk_buff *skb; 1139 1140 if (opcode != CPL_RX_PKT) 1141 goto out; 1142 1143 skb = copy_gl_to_skb_pkt(gl , rsp, dev->rdev.lldi.sge_pktshift); 1144 if (skb == NULL) 1145 goto out; 1146 1147 if (c4iw_handlers[opcode] == NULL) { 1148 pr_info("%s no handler opcode 0x%x...\n", __func__, opcode); 1149 kfree_skb(skb); 1150 goto out; 1151 } 1152 c4iw_handlers[opcode](dev, skb); 1153 return 1; 1154 out: 1155 return 0; 1156 } 1157 1158 static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp, 1159 const struct pkt_gl *gl) 1160 { 1161 struct uld_ctx *ctx = handle; 1162 struct c4iw_dev *dev = ctx->dev; 1163 struct sk_buff *skb; 1164 u8 opcode; 1165 1166 if (gl == NULL) { 1167 /* omit RSS and rsp_ctrl at end of descriptor */ 1168 unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8; 1169 1170 skb = alloc_skb(256, GFP_ATOMIC); 1171 if (!skb) 1172 goto nomem; 1173 __skb_put(skb, len); 1174 skb_copy_to_linear_data(skb, &rsp[1], len); 1175 } else if (gl == CXGB4_MSG_AN) { 1176 const struct rsp_ctrl *rc = (void *)rsp; 1177 1178 u32 qid = be32_to_cpu(rc->pldbuflen_qid); 1179 c4iw_ev_handler(dev, qid); 1180 return 0; 1181 } else if (unlikely(*(u8 *)rsp != *(u8 *)gl->va)) { 1182 if (recv_rx_pkt(dev, gl, rsp)) 1183 return 0; 1184 1185 pr_info("%s: unexpected FL contents at %p, RSS %#llx, FL %#llx, len %u\n", 1186 pci_name(ctx->lldi.pdev), gl->va, 1187 be64_to_cpu(*rsp), 1188 be64_to_cpu(*(__force __be64 *)gl->va), 1189 gl->tot_len); 1190 1191 return 0; 1192 } else { 1193 skb = cxgb4_pktgl_to_skb(gl, 128, 128); 1194 if (unlikely(!skb)) 1195 goto nomem; 1196 } 1197 1198 opcode = *(u8 *)rsp; 1199 if (c4iw_handlers[opcode]) { 1200 c4iw_handlers[opcode](dev, skb); 1201 } else { 1202 pr_info("%s no handler opcode 0x%x...\n", __func__, opcode); 1203 kfree_skb(skb); 1204 } 1205 1206 return 0; 1207 nomem: 1208 return -1; 1209 } 1210 1211 static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state) 1212 { 1213 struct uld_ctx *ctx = handle; 1214 1215 pr_debug("new_state %u\n", new_state); 1216 switch (new_state) { 1217 case CXGB4_STATE_UP: 1218 pr_info("%s: Up\n", pci_name(ctx->lldi.pdev)); 1219 if (!ctx->dev) { 1220 ctx->dev = c4iw_alloc(&ctx->lldi); 1221 if (IS_ERR(ctx->dev)) { 1222 pr_err("%s: initialization failed: %ld\n", 1223 pci_name(ctx->lldi.pdev), 1224 PTR_ERR(ctx->dev)); 1225 ctx->dev = NULL; 1226 break; 1227 } 1228 1229 INIT_WORK(&ctx->reg_work, c4iw_register_device); 1230 queue_work(reg_workq, &ctx->reg_work); 1231 } 1232 break; 1233 case CXGB4_STATE_DOWN: 1234 pr_info("%s: Down\n", pci_name(ctx->lldi.pdev)); 1235 if (ctx->dev) 1236 c4iw_remove(ctx); 1237 break; 1238 case CXGB4_STATE_FATAL_ERROR: 1239 case CXGB4_STATE_START_RECOVERY: 1240 pr_info("%s: Fatal Error\n", pci_name(ctx->lldi.pdev)); 1241 if (ctx->dev) { 1242 struct ib_event event; 1243 1244 ctx->dev->rdev.flags |= T4_FATAL_ERROR; 1245 memset(&event, 0, sizeof event); 1246 event.event = IB_EVENT_DEVICE_FATAL; 1247 event.device = &ctx->dev->ibdev; 1248 ib_dispatch_event(&event); 1249 c4iw_remove(ctx); 1250 } 1251 break; 1252 case CXGB4_STATE_DETACH: 1253 pr_info("%s: Detach\n", pci_name(ctx->lldi.pdev)); 1254 if (ctx->dev) 1255 c4iw_remove(ctx); 1256 break; 1257 } 1258 return 0; 1259 } 1260 1261 static int disable_qp_db(int id, void *p, void *data) 1262 { 1263 struct c4iw_qp *qp = p; 1264 1265 t4_disable_wq_db(&qp->wq); 1266 return 0; 1267 } 1268 1269 static void stop_queues(struct uld_ctx *ctx) 1270 { 1271 unsigned long flags; 1272 1273 spin_lock_irqsave(&ctx->dev->lock, flags); 1274 ctx->dev->rdev.stats.db_state_transitions++; 1275 ctx->dev->db_state = STOPPED; 1276 if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) 1277 idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL); 1278 else 1279 ctx->dev->rdev.status_page->db_off = 1; 1280 spin_unlock_irqrestore(&ctx->dev->lock, flags); 1281 } 1282 1283 static int enable_qp_db(int id, void *p, void *data) 1284 { 1285 struct c4iw_qp *qp = p; 1286 1287 t4_enable_wq_db(&qp->wq); 1288 return 0; 1289 } 1290 1291 static void resume_rc_qp(struct c4iw_qp *qp) 1292 { 1293 spin_lock(&qp->lock); 1294 t4_ring_sq_db(&qp->wq, qp->wq.sq.wq_pidx_inc, NULL); 1295 qp->wq.sq.wq_pidx_inc = 0; 1296 t4_ring_rq_db(&qp->wq, qp->wq.rq.wq_pidx_inc, NULL); 1297 qp->wq.rq.wq_pidx_inc = 0; 1298 spin_unlock(&qp->lock); 1299 } 1300 1301 static void resume_a_chunk(struct uld_ctx *ctx) 1302 { 1303 int i; 1304 struct c4iw_qp *qp; 1305 1306 for (i = 0; i < DB_FC_RESUME_SIZE; i++) { 1307 qp = list_first_entry(&ctx->dev->db_fc_list, struct c4iw_qp, 1308 db_fc_entry); 1309 list_del_init(&qp->db_fc_entry); 1310 resume_rc_qp(qp); 1311 if (list_empty(&ctx->dev->db_fc_list)) 1312 break; 1313 } 1314 } 1315 1316 static void resume_queues(struct uld_ctx *ctx) 1317 { 1318 spin_lock_irq(&ctx->dev->lock); 1319 if (ctx->dev->db_state != STOPPED) 1320 goto out; 1321 ctx->dev->db_state = FLOW_CONTROL; 1322 while (1) { 1323 if (list_empty(&ctx->dev->db_fc_list)) { 1324 WARN_ON(ctx->dev->db_state != FLOW_CONTROL); 1325 ctx->dev->db_state = NORMAL; 1326 ctx->dev->rdev.stats.db_state_transitions++; 1327 if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) { 1328 idr_for_each(&ctx->dev->qpidr, enable_qp_db, 1329 NULL); 1330 } else { 1331 ctx->dev->rdev.status_page->db_off = 0; 1332 } 1333 break; 1334 } else { 1335 if (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1) 1336 < (ctx->dev->rdev.lldi.dbfifo_int_thresh << 1337 DB_FC_DRAIN_THRESH)) { 1338 resume_a_chunk(ctx); 1339 } 1340 if (!list_empty(&ctx->dev->db_fc_list)) { 1341 spin_unlock_irq(&ctx->dev->lock); 1342 if (DB_FC_RESUME_DELAY) { 1343 set_current_state(TASK_UNINTERRUPTIBLE); 1344 schedule_timeout(DB_FC_RESUME_DELAY); 1345 } 1346 spin_lock_irq(&ctx->dev->lock); 1347 if (ctx->dev->db_state != FLOW_CONTROL) 1348 break; 1349 } 1350 } 1351 } 1352 out: 1353 if (ctx->dev->db_state != NORMAL) 1354 ctx->dev->rdev.stats.db_fc_interruptions++; 1355 spin_unlock_irq(&ctx->dev->lock); 1356 } 1357 1358 struct qp_list { 1359 unsigned idx; 1360 struct c4iw_qp **qps; 1361 }; 1362 1363 static int add_and_ref_qp(int id, void *p, void *data) 1364 { 1365 struct qp_list *qp_listp = data; 1366 struct c4iw_qp *qp = p; 1367 1368 c4iw_qp_add_ref(&qp->ibqp); 1369 qp_listp->qps[qp_listp->idx++] = qp; 1370 return 0; 1371 } 1372 1373 static int count_qps(int id, void *p, void *data) 1374 { 1375 unsigned *countp = data; 1376 (*countp)++; 1377 return 0; 1378 } 1379 1380 static void deref_qps(struct qp_list *qp_list) 1381 { 1382 int idx; 1383 1384 for (idx = 0; idx < qp_list->idx; idx++) 1385 c4iw_qp_rem_ref(&qp_list->qps[idx]->ibqp); 1386 } 1387 1388 static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list) 1389 { 1390 int idx; 1391 int ret; 1392 1393 for (idx = 0; idx < qp_list->idx; idx++) { 1394 struct c4iw_qp *qp = qp_list->qps[idx]; 1395 1396 spin_lock_irq(&qp->rhp->lock); 1397 spin_lock(&qp->lock); 1398 ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0], 1399 qp->wq.sq.qid, 1400 t4_sq_host_wq_pidx(&qp->wq), 1401 t4_sq_wq_size(&qp->wq)); 1402 if (ret) { 1403 pr_err("%s: Fatal error - DB overflow recovery failed - error syncing SQ qid %u\n", 1404 pci_name(ctx->lldi.pdev), qp->wq.sq.qid); 1405 spin_unlock(&qp->lock); 1406 spin_unlock_irq(&qp->rhp->lock); 1407 return; 1408 } 1409 qp->wq.sq.wq_pidx_inc = 0; 1410 1411 ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0], 1412 qp->wq.rq.qid, 1413 t4_rq_host_wq_pidx(&qp->wq), 1414 t4_rq_wq_size(&qp->wq)); 1415 1416 if (ret) { 1417 pr_err("%s: Fatal error - DB overflow recovery failed - error syncing RQ qid %u\n", 1418 pci_name(ctx->lldi.pdev), qp->wq.rq.qid); 1419 spin_unlock(&qp->lock); 1420 spin_unlock_irq(&qp->rhp->lock); 1421 return; 1422 } 1423 qp->wq.rq.wq_pidx_inc = 0; 1424 spin_unlock(&qp->lock); 1425 spin_unlock_irq(&qp->rhp->lock); 1426 1427 /* Wait for the dbfifo to drain */ 1428 while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) { 1429 set_current_state(TASK_UNINTERRUPTIBLE); 1430 schedule_timeout(usecs_to_jiffies(10)); 1431 } 1432 } 1433 } 1434 1435 static void recover_queues(struct uld_ctx *ctx) 1436 { 1437 int count = 0; 1438 struct qp_list qp_list; 1439 int ret; 1440 1441 /* slow everybody down */ 1442 set_current_state(TASK_UNINTERRUPTIBLE); 1443 schedule_timeout(usecs_to_jiffies(1000)); 1444 1445 /* flush the SGE contexts */ 1446 ret = cxgb4_flush_eq_cache(ctx->dev->rdev.lldi.ports[0]); 1447 if (ret) { 1448 pr_err("%s: Fatal error - DB overflow recovery failed\n", 1449 pci_name(ctx->lldi.pdev)); 1450 return; 1451 } 1452 1453 /* Count active queues so we can build a list of queues to recover */ 1454 spin_lock_irq(&ctx->dev->lock); 1455 WARN_ON(ctx->dev->db_state != STOPPED); 1456 ctx->dev->db_state = RECOVERY; 1457 idr_for_each(&ctx->dev->qpidr, count_qps, &count); 1458 1459 qp_list.qps = kcalloc(count, sizeof(*qp_list.qps), GFP_ATOMIC); 1460 if (!qp_list.qps) { 1461 spin_unlock_irq(&ctx->dev->lock); 1462 return; 1463 } 1464 qp_list.idx = 0; 1465 1466 /* add and ref each qp so it doesn't get freed */ 1467 idr_for_each(&ctx->dev->qpidr, add_and_ref_qp, &qp_list); 1468 1469 spin_unlock_irq(&ctx->dev->lock); 1470 1471 /* now traverse the list in a safe context to recover the db state*/ 1472 recover_lost_dbs(ctx, &qp_list); 1473 1474 /* we're almost done! deref the qps and clean up */ 1475 deref_qps(&qp_list); 1476 kfree(qp_list.qps); 1477 1478 spin_lock_irq(&ctx->dev->lock); 1479 WARN_ON(ctx->dev->db_state != RECOVERY); 1480 ctx->dev->db_state = STOPPED; 1481 spin_unlock_irq(&ctx->dev->lock); 1482 } 1483 1484 static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...) 1485 { 1486 struct uld_ctx *ctx = handle; 1487 1488 switch (control) { 1489 case CXGB4_CONTROL_DB_FULL: 1490 stop_queues(ctx); 1491 ctx->dev->rdev.stats.db_full++; 1492 break; 1493 case CXGB4_CONTROL_DB_EMPTY: 1494 resume_queues(ctx); 1495 mutex_lock(&ctx->dev->rdev.stats.lock); 1496 ctx->dev->rdev.stats.db_empty++; 1497 mutex_unlock(&ctx->dev->rdev.stats.lock); 1498 break; 1499 case CXGB4_CONTROL_DB_DROP: 1500 recover_queues(ctx); 1501 mutex_lock(&ctx->dev->rdev.stats.lock); 1502 ctx->dev->rdev.stats.db_drop++; 1503 mutex_unlock(&ctx->dev->rdev.stats.lock); 1504 break; 1505 default: 1506 pr_warn("%s: unknown control cmd %u\n", 1507 pci_name(ctx->lldi.pdev), control); 1508 break; 1509 } 1510 return 0; 1511 } 1512 1513 static struct cxgb4_uld_info c4iw_uld_info = { 1514 .name = DRV_NAME, 1515 .nrxq = MAX_ULD_QSETS, 1516 .ntxq = MAX_ULD_QSETS, 1517 .rxq_size = 511, 1518 .ciq = true, 1519 .lro = false, 1520 .add = c4iw_uld_add, 1521 .rx_handler = c4iw_uld_rx_handler, 1522 .state_change = c4iw_uld_state_change, 1523 .control = c4iw_uld_control, 1524 }; 1525 1526 void _c4iw_free_wr_wait(struct kref *kref) 1527 { 1528 struct c4iw_wr_wait *wr_waitp; 1529 1530 wr_waitp = container_of(kref, struct c4iw_wr_wait, kref); 1531 pr_debug("Free wr_wait %p\n", wr_waitp); 1532 kfree(wr_waitp); 1533 } 1534 1535 struct c4iw_wr_wait *c4iw_alloc_wr_wait(gfp_t gfp) 1536 { 1537 struct c4iw_wr_wait *wr_waitp; 1538 1539 wr_waitp = kzalloc(sizeof(*wr_waitp), gfp); 1540 if (wr_waitp) { 1541 kref_init(&wr_waitp->kref); 1542 pr_debug("wr_wait %p\n", wr_waitp); 1543 } 1544 return wr_waitp; 1545 } 1546 1547 static int __init c4iw_init_module(void) 1548 { 1549 int err; 1550 1551 err = c4iw_cm_init(); 1552 if (err) 1553 return err; 1554 1555 c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL); 1556 if (!c4iw_debugfs_root) 1557 pr_warn("could not create debugfs entry, continuing\n"); 1558 1559 reg_workq = create_singlethread_workqueue("Register_iWARP_device"); 1560 if (!reg_workq) { 1561 pr_err("Failed creating workqueue to register iwarp device\n"); 1562 return -ENOMEM; 1563 } 1564 1565 cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info); 1566 1567 return 0; 1568 } 1569 1570 static void __exit c4iw_exit_module(void) 1571 { 1572 struct uld_ctx *ctx, *tmp; 1573 1574 mutex_lock(&dev_mutex); 1575 list_for_each_entry_safe(ctx, tmp, &uld_ctx_list, entry) { 1576 if (ctx->dev) 1577 c4iw_remove(ctx); 1578 kfree(ctx); 1579 } 1580 mutex_unlock(&dev_mutex); 1581 flush_workqueue(reg_workq); 1582 destroy_workqueue(reg_workq); 1583 cxgb4_unregister_uld(CXGB4_ULD_RDMA); 1584 c4iw_cm_term(); 1585 debugfs_remove_recursive(c4iw_debugfs_root); 1586 } 1587 1588 module_init(c4iw_init_module); 1589 module_exit(c4iw_exit_module); 1590