1cfdda9d7SSteve Wise /* 2cfdda9d7SSteve Wise * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. 3cfdda9d7SSteve Wise * 4cfdda9d7SSteve Wise * This software is available to you under a choice of one of two 5cfdda9d7SSteve Wise * licenses. You may choose to be licensed under the terms of the GNU 6cfdda9d7SSteve Wise * General Public License (GPL) Version 2, available from the file 7cfdda9d7SSteve Wise * COPYING in the main directory of this source tree, or the 8cfdda9d7SSteve Wise * OpenIB.org BSD license below: 9cfdda9d7SSteve Wise * 10cfdda9d7SSteve Wise * Redistribution and use in source and binary forms, with or 11cfdda9d7SSteve Wise * without modification, are permitted provided that the following 12cfdda9d7SSteve Wise * conditions are met: 13cfdda9d7SSteve Wise * 14cfdda9d7SSteve Wise * - Redistributions of source code must retain the above 15cfdda9d7SSteve Wise * copyright notice, this list of conditions and the following 16cfdda9d7SSteve Wise * disclaimer. 17cfdda9d7SSteve Wise * 18cfdda9d7SSteve Wise * - Redistributions in binary form must reproduce the above 19cfdda9d7SSteve Wise * copyright notice, this list of conditions and the following 20cfdda9d7SSteve Wise * disclaimer in the documentation and/or other materials 21cfdda9d7SSteve Wise * provided with the distribution. 22cfdda9d7SSteve Wise * 23cfdda9d7SSteve Wise * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24cfdda9d7SSteve Wise * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25cfdda9d7SSteve Wise * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26cfdda9d7SSteve Wise * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27cfdda9d7SSteve Wise * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28cfdda9d7SSteve Wise * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29cfdda9d7SSteve Wise * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30cfdda9d7SSteve Wise * SOFTWARE. 31cfdda9d7SSteve Wise */ 32cfdda9d7SSteve Wise #include <linux/module.h> 33cfdda9d7SSteve Wise #include <linux/moduleparam.h> 34cfdda9d7SSteve Wise #include <linux/debugfs.h> 35e572568fSVipul Pandya #include <linux/vmalloc.h> 36cfdda9d7SSteve Wise 37cfdda9d7SSteve Wise #include <rdma/ib_verbs.h> 38cfdda9d7SSteve Wise 39cfdda9d7SSteve Wise #include "iw_cxgb4.h" 40cfdda9d7SSteve Wise 41cfdda9d7SSteve Wise #define DRV_VERSION "0.1" 42cfdda9d7SSteve Wise 43cfdda9d7SSteve Wise MODULE_AUTHOR("Steve Wise"); 44f079af7aSVipul Pandya MODULE_DESCRIPTION("Chelsio T4/T5 RDMA Driver"); 45cfdda9d7SSteve Wise MODULE_LICENSE("Dual BSD/GPL"); 46cfdda9d7SSteve Wise MODULE_VERSION(DRV_VERSION); 47cfdda9d7SSteve Wise 4880ccdd60SVipul Pandya static int allow_db_fc_on_t5; 4980ccdd60SVipul Pandya module_param(allow_db_fc_on_t5, int, 0644); 5080ccdd60SVipul Pandya MODULE_PARM_DESC(allow_db_fc_on_t5, 5180ccdd60SVipul Pandya "Allow DB Flow Control on T5 (default = 0)"); 5280ccdd60SVipul Pandya 5380ccdd60SVipul Pandya static int allow_db_coalescing_on_t5; 5480ccdd60SVipul Pandya module_param(allow_db_coalescing_on_t5, int, 0644); 5580ccdd60SVipul Pandya MODULE_PARM_DESC(allow_db_coalescing_on_t5, 5680ccdd60SVipul Pandya "Allow DB Coalescing on T5 (default = 0)"); 5780ccdd60SVipul Pandya 582c974781SVipul Pandya struct uld_ctx { 592c974781SVipul Pandya struct list_head entry; 602c974781SVipul Pandya struct cxgb4_lld_info lldi; 612c974781SVipul Pandya struct c4iw_dev *dev; 622c974781SVipul Pandya }; 632c974781SVipul Pandya 642f25e9a5SSteve Wise static LIST_HEAD(uld_ctx_list); 65cfdda9d7SSteve Wise static DEFINE_MUTEX(dev_mutex); 66cfdda9d7SSteve Wise 6705eb2389SSteve Wise #define DB_FC_RESUME_SIZE 64 6805eb2389SSteve Wise #define DB_FC_RESUME_DELAY 1 6905eb2389SSteve Wise #define DB_FC_DRAIN_THRESH 0 7005eb2389SSteve Wise 71cfdda9d7SSteve Wise static struct dentry *c4iw_debugfs_root; 72cfdda9d7SSteve Wise 739e8d1fa3SSteve Wise struct c4iw_debugfs_data { 74cfdda9d7SSteve Wise struct c4iw_dev *devp; 75cfdda9d7SSteve Wise char *buf; 76cfdda9d7SSteve Wise int bufsize; 77cfdda9d7SSteve Wise int pos; 78cfdda9d7SSteve Wise }; 79cfdda9d7SSteve Wise 809eccfe10SSteve Wise /* registered cxgb4 netlink callbacks */ 819eccfe10SSteve Wise static struct ibnl_client_cbs c4iw_nl_cb_table[] = { 829eccfe10SSteve Wise [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb}, 839eccfe10SSteve Wise [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb}, 849eccfe10SSteve Wise [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb}, 859eccfe10SSteve Wise [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb}, 869eccfe10SSteve Wise [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb}, 879eccfe10SSteve Wise [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb} 889eccfe10SSteve Wise }; 899eccfe10SSteve Wise 909e8d1fa3SSteve Wise static int count_idrs(int id, void *p, void *data) 91cfdda9d7SSteve Wise { 92cfdda9d7SSteve Wise int *countp = data; 93cfdda9d7SSteve Wise 94cfdda9d7SSteve Wise *countp = *countp + 1; 95cfdda9d7SSteve Wise return 0; 96cfdda9d7SSteve Wise } 97cfdda9d7SSteve Wise 989e8d1fa3SSteve Wise static ssize_t debugfs_read(struct file *file, char __user *buf, size_t count, 999e8d1fa3SSteve Wise loff_t *ppos) 1009e8d1fa3SSteve Wise { 1019e8d1fa3SSteve Wise struct c4iw_debugfs_data *d = file->private_data; 1029e8d1fa3SSteve Wise 1033160977aSSteve Wise return simple_read_from_buffer(buf, count, ppos, d->buf, d->pos); 1049e8d1fa3SSteve Wise } 1059e8d1fa3SSteve Wise 1069e8d1fa3SSteve Wise static int dump_qp(int id, void *p, void *data) 107cfdda9d7SSteve Wise { 108cfdda9d7SSteve Wise struct c4iw_qp *qp = p; 1099e8d1fa3SSteve Wise struct c4iw_debugfs_data *qpd = data; 110cfdda9d7SSteve Wise int space; 111cfdda9d7SSteve Wise int cc; 112cfdda9d7SSteve Wise 113cfdda9d7SSteve Wise if (id != qp->wq.sq.qid) 114cfdda9d7SSteve Wise return 0; 115cfdda9d7SSteve Wise 116cfdda9d7SSteve Wise space = qpd->bufsize - qpd->pos - 1; 117cfdda9d7SSteve Wise if (space == 0) 118cfdda9d7SSteve Wise return 1; 119cfdda9d7SSteve Wise 120830662f6SVipul Pandya if (qp->ep) { 121830662f6SVipul Pandya if (qp->ep->com.local_addr.ss_family == AF_INET) { 122830662f6SVipul Pandya struct sockaddr_in *lsin = (struct sockaddr_in *) 123830662f6SVipul Pandya &qp->ep->com.local_addr; 124830662f6SVipul Pandya struct sockaddr_in *rsin = (struct sockaddr_in *) 125830662f6SVipul Pandya &qp->ep->com.remote_addr; 1269eccfe10SSteve Wise struct sockaddr_in *mapped_lsin = (struct sockaddr_in *) 1279eccfe10SSteve Wise &qp->ep->com.mapped_local_addr; 1289eccfe10SSteve Wise struct sockaddr_in *mapped_rsin = (struct sockaddr_in *) 1299eccfe10SSteve Wise &qp->ep->com.mapped_remote_addr; 130830662f6SVipul Pandya 131db5d040dSSteve Wise cc = snprintf(qpd->buf + qpd->pos, space, 132830662f6SVipul Pandya "rc qp sq id %u rq id %u state %u " 133830662f6SVipul Pandya "onchip %u ep tid %u state %u " 1349eccfe10SSteve Wise "%pI4:%u/%u->%pI4:%u/%u\n", 135830662f6SVipul Pandya qp->wq.sq.qid, qp->wq.rq.qid, 136830662f6SVipul Pandya (int)qp->attr.state, 137db5d040dSSteve Wise qp->wq.sq.flags & T4_SQ_ONCHIP, 138cfdda9d7SSteve Wise qp->ep->hwtid, (int)qp->ep->com.state, 139830662f6SVipul Pandya &lsin->sin_addr, ntohs(lsin->sin_port), 1409eccfe10SSteve Wise ntohs(mapped_lsin->sin_port), 1419eccfe10SSteve Wise &rsin->sin_addr, ntohs(rsin->sin_port), 1429eccfe10SSteve Wise ntohs(mapped_rsin->sin_port)); 143830662f6SVipul Pandya } else { 144830662f6SVipul Pandya struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *) 145830662f6SVipul Pandya &qp->ep->com.local_addr; 146830662f6SVipul Pandya struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *) 147830662f6SVipul Pandya &qp->ep->com.remote_addr; 1489eccfe10SSteve Wise struct sockaddr_in6 *mapped_lsin6 = 1499eccfe10SSteve Wise (struct sockaddr_in6 *) 1509eccfe10SSteve Wise &qp->ep->com.mapped_local_addr; 1519eccfe10SSteve Wise struct sockaddr_in6 *mapped_rsin6 = 1529eccfe10SSteve Wise (struct sockaddr_in6 *) 1539eccfe10SSteve Wise &qp->ep->com.mapped_remote_addr; 154830662f6SVipul Pandya 155830662f6SVipul Pandya cc = snprintf(qpd->buf + qpd->pos, space, 156830662f6SVipul Pandya "rc qp sq id %u rq id %u state %u " 157830662f6SVipul Pandya "onchip %u ep tid %u state %u " 1589eccfe10SSteve Wise "%pI6:%u/%u->%pI6:%u/%u\n", 159830662f6SVipul Pandya qp->wq.sq.qid, qp->wq.rq.qid, 160830662f6SVipul Pandya (int)qp->attr.state, 161830662f6SVipul Pandya qp->wq.sq.flags & T4_SQ_ONCHIP, 162830662f6SVipul Pandya qp->ep->hwtid, (int)qp->ep->com.state, 163830662f6SVipul Pandya &lsin6->sin6_addr, 164830662f6SVipul Pandya ntohs(lsin6->sin6_port), 1659eccfe10SSteve Wise ntohs(mapped_lsin6->sin6_port), 166830662f6SVipul Pandya &rsin6->sin6_addr, 1679eccfe10SSteve Wise ntohs(rsin6->sin6_port), 1689eccfe10SSteve Wise ntohs(mapped_rsin6->sin6_port)); 169830662f6SVipul Pandya } 170830662f6SVipul Pandya } else 171db5d040dSSteve Wise cc = snprintf(qpd->buf + qpd->pos, space, 172db5d040dSSteve Wise "qp sq id %u rq id %u state %u onchip %u\n", 173db5d040dSSteve Wise qp->wq.sq.qid, qp->wq.rq.qid, 174db5d040dSSteve Wise (int)qp->attr.state, 175db5d040dSSteve Wise qp->wq.sq.flags & T4_SQ_ONCHIP); 176cfdda9d7SSteve Wise if (cc < space) 177cfdda9d7SSteve Wise qpd->pos += cc; 178cfdda9d7SSteve Wise return 0; 179cfdda9d7SSteve Wise } 180cfdda9d7SSteve Wise 181cfdda9d7SSteve Wise static int qp_release(struct inode *inode, struct file *file) 182cfdda9d7SSteve Wise { 1839e8d1fa3SSteve Wise struct c4iw_debugfs_data *qpd = file->private_data; 184cfdda9d7SSteve Wise if (!qpd) { 185cfdda9d7SSteve Wise printk(KERN_INFO "%s null qpd?\n", __func__); 186cfdda9d7SSteve Wise return 0; 187cfdda9d7SSteve Wise } 188d716a2a0SVipul Pandya vfree(qpd->buf); 189cfdda9d7SSteve Wise kfree(qpd); 190cfdda9d7SSteve Wise return 0; 191cfdda9d7SSteve Wise } 192cfdda9d7SSteve Wise 193cfdda9d7SSteve Wise static int qp_open(struct inode *inode, struct file *file) 194cfdda9d7SSteve Wise { 1959e8d1fa3SSteve Wise struct c4iw_debugfs_data *qpd; 196cfdda9d7SSteve Wise int ret = 0; 197cfdda9d7SSteve Wise int count = 1; 198cfdda9d7SSteve Wise 199cfdda9d7SSteve Wise qpd = kmalloc(sizeof *qpd, GFP_KERNEL); 200cfdda9d7SSteve Wise if (!qpd) { 201cfdda9d7SSteve Wise ret = -ENOMEM; 202cfdda9d7SSteve Wise goto out; 203cfdda9d7SSteve Wise } 204cfdda9d7SSteve Wise qpd->devp = inode->i_private; 205cfdda9d7SSteve Wise qpd->pos = 0; 206cfdda9d7SSteve Wise 207cfdda9d7SSteve Wise spin_lock_irq(&qpd->devp->lock); 2089e8d1fa3SSteve Wise idr_for_each(&qpd->devp->qpidr, count_idrs, &count); 209cfdda9d7SSteve Wise spin_unlock_irq(&qpd->devp->lock); 210cfdda9d7SSteve Wise 211cfdda9d7SSteve Wise qpd->bufsize = count * 128; 212d716a2a0SVipul Pandya qpd->buf = vmalloc(qpd->bufsize); 213cfdda9d7SSteve Wise if (!qpd->buf) { 214cfdda9d7SSteve Wise ret = -ENOMEM; 215cfdda9d7SSteve Wise goto err1; 216cfdda9d7SSteve Wise } 217cfdda9d7SSteve Wise 218cfdda9d7SSteve Wise spin_lock_irq(&qpd->devp->lock); 2199e8d1fa3SSteve Wise idr_for_each(&qpd->devp->qpidr, dump_qp, qpd); 220cfdda9d7SSteve Wise spin_unlock_irq(&qpd->devp->lock); 221cfdda9d7SSteve Wise 222cfdda9d7SSteve Wise qpd->buf[qpd->pos++] = 0; 223cfdda9d7SSteve Wise file->private_data = qpd; 224cfdda9d7SSteve Wise goto out; 225cfdda9d7SSteve Wise err1: 226cfdda9d7SSteve Wise kfree(qpd); 227cfdda9d7SSteve Wise out: 228cfdda9d7SSteve Wise return ret; 229cfdda9d7SSteve Wise } 230cfdda9d7SSteve Wise 231cfdda9d7SSteve Wise static const struct file_operations qp_debugfs_fops = { 232cfdda9d7SSteve Wise .owner = THIS_MODULE, 233cfdda9d7SSteve Wise .open = qp_open, 234cfdda9d7SSteve Wise .release = qp_release, 2359e8d1fa3SSteve Wise .read = debugfs_read, 2368bbac892SSteve Wise .llseek = default_llseek, 2379e8d1fa3SSteve Wise }; 2389e8d1fa3SSteve Wise 2399e8d1fa3SSteve Wise static int dump_stag(int id, void *p, void *data) 2409e8d1fa3SSteve Wise { 2419e8d1fa3SSteve Wise struct c4iw_debugfs_data *stagd = data; 2429e8d1fa3SSteve Wise int space; 2439e8d1fa3SSteve Wise int cc; 2449e8d1fa3SSteve Wise 2459e8d1fa3SSteve Wise space = stagd->bufsize - stagd->pos - 1; 2469e8d1fa3SSteve Wise if (space == 0) 2479e8d1fa3SSteve Wise return 1; 2489e8d1fa3SSteve Wise 2499e8d1fa3SSteve Wise cc = snprintf(stagd->buf + stagd->pos, space, "0x%x\n", id<<8); 2509e8d1fa3SSteve Wise if (cc < space) 2519e8d1fa3SSteve Wise stagd->pos += cc; 2529e8d1fa3SSteve Wise return 0; 2539e8d1fa3SSteve Wise } 2549e8d1fa3SSteve Wise 2559e8d1fa3SSteve Wise static int stag_release(struct inode *inode, struct file *file) 2569e8d1fa3SSteve Wise { 2579e8d1fa3SSteve Wise struct c4iw_debugfs_data *stagd = file->private_data; 2589e8d1fa3SSteve Wise if (!stagd) { 2599e8d1fa3SSteve Wise printk(KERN_INFO "%s null stagd?\n", __func__); 2609e8d1fa3SSteve Wise return 0; 2619e8d1fa3SSteve Wise } 2629e8d1fa3SSteve Wise kfree(stagd->buf); 2639e8d1fa3SSteve Wise kfree(stagd); 2649e8d1fa3SSteve Wise return 0; 2659e8d1fa3SSteve Wise } 2669e8d1fa3SSteve Wise 2679e8d1fa3SSteve Wise static int stag_open(struct inode *inode, struct file *file) 2689e8d1fa3SSteve Wise { 2699e8d1fa3SSteve Wise struct c4iw_debugfs_data *stagd; 2709e8d1fa3SSteve Wise int ret = 0; 2719e8d1fa3SSteve Wise int count = 1; 2729e8d1fa3SSteve Wise 2739e8d1fa3SSteve Wise stagd = kmalloc(sizeof *stagd, GFP_KERNEL); 2749e8d1fa3SSteve Wise if (!stagd) { 2759e8d1fa3SSteve Wise ret = -ENOMEM; 2769e8d1fa3SSteve Wise goto out; 2779e8d1fa3SSteve Wise } 2789e8d1fa3SSteve Wise stagd->devp = inode->i_private; 2799e8d1fa3SSteve Wise stagd->pos = 0; 2809e8d1fa3SSteve Wise 2819e8d1fa3SSteve Wise spin_lock_irq(&stagd->devp->lock); 2829e8d1fa3SSteve Wise idr_for_each(&stagd->devp->mmidr, count_idrs, &count); 2839e8d1fa3SSteve Wise spin_unlock_irq(&stagd->devp->lock); 2849e8d1fa3SSteve Wise 2859e8d1fa3SSteve Wise stagd->bufsize = count * sizeof("0x12345678\n"); 2869e8d1fa3SSteve Wise stagd->buf = kmalloc(stagd->bufsize, GFP_KERNEL); 2879e8d1fa3SSteve Wise if (!stagd->buf) { 2889e8d1fa3SSteve Wise ret = -ENOMEM; 2899e8d1fa3SSteve Wise goto err1; 2909e8d1fa3SSteve Wise } 2919e8d1fa3SSteve Wise 2929e8d1fa3SSteve Wise spin_lock_irq(&stagd->devp->lock); 2939e8d1fa3SSteve Wise idr_for_each(&stagd->devp->mmidr, dump_stag, stagd); 2949e8d1fa3SSteve Wise spin_unlock_irq(&stagd->devp->lock); 2959e8d1fa3SSteve Wise 2969e8d1fa3SSteve Wise stagd->buf[stagd->pos++] = 0; 2979e8d1fa3SSteve Wise file->private_data = stagd; 2989e8d1fa3SSteve Wise goto out; 2999e8d1fa3SSteve Wise err1: 3009e8d1fa3SSteve Wise kfree(stagd); 3019e8d1fa3SSteve Wise out: 3029e8d1fa3SSteve Wise return ret; 3039e8d1fa3SSteve Wise } 3049e8d1fa3SSteve Wise 3059e8d1fa3SSteve Wise static const struct file_operations stag_debugfs_fops = { 3069e8d1fa3SSteve Wise .owner = THIS_MODULE, 3079e8d1fa3SSteve Wise .open = stag_open, 3089e8d1fa3SSteve Wise .release = stag_release, 3099e8d1fa3SSteve Wise .read = debugfs_read, 3108bbac892SSteve Wise .llseek = default_llseek, 311cfdda9d7SSteve Wise }; 312cfdda9d7SSteve Wise 31305eb2389SSteve Wise static char *db_state_str[] = {"NORMAL", "FLOW_CONTROL", "RECOVERY", "STOPPED"}; 314422eea0aSVipul Pandya 3158d81ef34SVipul Pandya static int stats_show(struct seq_file *seq, void *v) 3168d81ef34SVipul Pandya { 3178d81ef34SVipul Pandya struct c4iw_dev *dev = seq->private; 3188d81ef34SVipul Pandya 319ec3eead2SVipul Pandya seq_printf(seq, " Object: %10s %10s %10s %10s\n", "Total", "Current", 320ec3eead2SVipul Pandya "Max", "Fail"); 321ec3eead2SVipul Pandya seq_printf(seq, " PDID: %10llu %10llu %10llu %10llu\n", 3228d81ef34SVipul Pandya dev->rdev.stats.pd.total, dev->rdev.stats.pd.cur, 323ec3eead2SVipul Pandya dev->rdev.stats.pd.max, dev->rdev.stats.pd.fail); 324ec3eead2SVipul Pandya seq_printf(seq, " QID: %10llu %10llu %10llu %10llu\n", 3258d81ef34SVipul Pandya dev->rdev.stats.qid.total, dev->rdev.stats.qid.cur, 326ec3eead2SVipul Pandya dev->rdev.stats.qid.max, dev->rdev.stats.qid.fail); 327ec3eead2SVipul Pandya seq_printf(seq, " TPTMEM: %10llu %10llu %10llu %10llu\n", 3288d81ef34SVipul Pandya dev->rdev.stats.stag.total, dev->rdev.stats.stag.cur, 329ec3eead2SVipul Pandya dev->rdev.stats.stag.max, dev->rdev.stats.stag.fail); 330ec3eead2SVipul Pandya seq_printf(seq, " PBLMEM: %10llu %10llu %10llu %10llu\n", 3318d81ef34SVipul Pandya dev->rdev.stats.pbl.total, dev->rdev.stats.pbl.cur, 332ec3eead2SVipul Pandya dev->rdev.stats.pbl.max, dev->rdev.stats.pbl.fail); 333ec3eead2SVipul Pandya seq_printf(seq, " RQTMEM: %10llu %10llu %10llu %10llu\n", 3348d81ef34SVipul Pandya dev->rdev.stats.rqt.total, dev->rdev.stats.rqt.cur, 335ec3eead2SVipul Pandya dev->rdev.stats.rqt.max, dev->rdev.stats.rqt.fail); 336ec3eead2SVipul Pandya seq_printf(seq, " OCQPMEM: %10llu %10llu %10llu %10llu\n", 3378d81ef34SVipul Pandya dev->rdev.stats.ocqp.total, dev->rdev.stats.ocqp.cur, 338ec3eead2SVipul Pandya dev->rdev.stats.ocqp.max, dev->rdev.stats.ocqp.fail); 3392c974781SVipul Pandya seq_printf(seq, " DB FULL: %10llu\n", dev->rdev.stats.db_full); 3402c974781SVipul Pandya seq_printf(seq, " DB EMPTY: %10llu\n", dev->rdev.stats.db_empty); 3412c974781SVipul Pandya seq_printf(seq, " DB DROP: %10llu\n", dev->rdev.stats.db_drop); 34205eb2389SSteve Wise seq_printf(seq, " DB State: %s Transitions %llu FC Interruptions %llu\n", 343422eea0aSVipul Pandya db_state_str[dev->db_state], 34405eb2389SSteve Wise dev->rdev.stats.db_state_transitions, 34505eb2389SSteve Wise dev->rdev.stats.db_fc_interruptions); 3461cab775cSVipul Pandya seq_printf(seq, "TCAM_FULL: %10llu\n", dev->rdev.stats.tcam_full); 347793dad94SVipul Pandya seq_printf(seq, "ACT_OFLD_CONN_FAILS: %10llu\n", 348793dad94SVipul Pandya dev->rdev.stats.act_ofld_conn_fails); 349793dad94SVipul Pandya seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n", 350793dad94SVipul Pandya dev->rdev.stats.pas_ofld_conn_fails); 3518d81ef34SVipul Pandya return 0; 3528d81ef34SVipul Pandya } 3538d81ef34SVipul Pandya 3548d81ef34SVipul Pandya static int stats_open(struct inode *inode, struct file *file) 3558d81ef34SVipul Pandya { 3568d81ef34SVipul Pandya return single_open(file, stats_show, inode->i_private); 3578d81ef34SVipul Pandya } 3588d81ef34SVipul Pandya 3598d81ef34SVipul Pandya static ssize_t stats_clear(struct file *file, const char __user *buf, 3608d81ef34SVipul Pandya size_t count, loff_t *pos) 3618d81ef34SVipul Pandya { 3628d81ef34SVipul Pandya struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private; 3638d81ef34SVipul Pandya 3648d81ef34SVipul Pandya mutex_lock(&dev->rdev.stats.lock); 3658d81ef34SVipul Pandya dev->rdev.stats.pd.max = 0; 366ec3eead2SVipul Pandya dev->rdev.stats.pd.fail = 0; 3678d81ef34SVipul Pandya dev->rdev.stats.qid.max = 0; 368ec3eead2SVipul Pandya dev->rdev.stats.qid.fail = 0; 3698d81ef34SVipul Pandya dev->rdev.stats.stag.max = 0; 370ec3eead2SVipul Pandya dev->rdev.stats.stag.fail = 0; 3718d81ef34SVipul Pandya dev->rdev.stats.pbl.max = 0; 372ec3eead2SVipul Pandya dev->rdev.stats.pbl.fail = 0; 3738d81ef34SVipul Pandya dev->rdev.stats.rqt.max = 0; 374ec3eead2SVipul Pandya dev->rdev.stats.rqt.fail = 0; 3758d81ef34SVipul Pandya dev->rdev.stats.ocqp.max = 0; 376ec3eead2SVipul Pandya dev->rdev.stats.ocqp.fail = 0; 3772c974781SVipul Pandya dev->rdev.stats.db_full = 0; 3782c974781SVipul Pandya dev->rdev.stats.db_empty = 0; 3792c974781SVipul Pandya dev->rdev.stats.db_drop = 0; 380422eea0aSVipul Pandya dev->rdev.stats.db_state_transitions = 0; 381793dad94SVipul Pandya dev->rdev.stats.tcam_full = 0; 382793dad94SVipul Pandya dev->rdev.stats.act_ofld_conn_fails = 0; 383793dad94SVipul Pandya dev->rdev.stats.pas_ofld_conn_fails = 0; 3848d81ef34SVipul Pandya mutex_unlock(&dev->rdev.stats.lock); 3858d81ef34SVipul Pandya return count; 3868d81ef34SVipul Pandya } 3878d81ef34SVipul Pandya 3888d81ef34SVipul Pandya static const struct file_operations stats_debugfs_fops = { 3898d81ef34SVipul Pandya .owner = THIS_MODULE, 3908d81ef34SVipul Pandya .open = stats_open, 3918d81ef34SVipul Pandya .release = single_release, 3928d81ef34SVipul Pandya .read = seq_read, 3938d81ef34SVipul Pandya .llseek = seq_lseek, 3948d81ef34SVipul Pandya .write = stats_clear, 3958d81ef34SVipul Pandya }; 3968d81ef34SVipul Pandya 397793dad94SVipul Pandya static int dump_ep(int id, void *p, void *data) 398793dad94SVipul Pandya { 399793dad94SVipul Pandya struct c4iw_ep *ep = p; 400793dad94SVipul Pandya struct c4iw_debugfs_data *epd = data; 401793dad94SVipul Pandya int space; 402793dad94SVipul Pandya int cc; 403793dad94SVipul Pandya 404793dad94SVipul Pandya space = epd->bufsize - epd->pos - 1; 405793dad94SVipul Pandya if (space == 0) 406793dad94SVipul Pandya return 1; 407793dad94SVipul Pandya 408830662f6SVipul Pandya if (ep->com.local_addr.ss_family == AF_INET) { 409830662f6SVipul Pandya struct sockaddr_in *lsin = (struct sockaddr_in *) 410830662f6SVipul Pandya &ep->com.local_addr; 411830662f6SVipul Pandya struct sockaddr_in *rsin = (struct sockaddr_in *) 412830662f6SVipul Pandya &ep->com.remote_addr; 4139eccfe10SSteve Wise struct sockaddr_in *mapped_lsin = (struct sockaddr_in *) 4149eccfe10SSteve Wise &ep->com.mapped_local_addr; 4159eccfe10SSteve Wise struct sockaddr_in *mapped_rsin = (struct sockaddr_in *) 4169eccfe10SSteve Wise &ep->com.mapped_remote_addr; 417830662f6SVipul Pandya 418793dad94SVipul Pandya cc = snprintf(epd->buf + epd->pos, space, 419830662f6SVipul Pandya "ep %p cm_id %p qp %p state %d flags 0x%lx " 420830662f6SVipul Pandya "history 0x%lx hwtid %d atid %d " 4219eccfe10SSteve Wise "%pI4:%d/%d <-> %pI4:%d/%d\n", 422830662f6SVipul Pandya ep, ep->com.cm_id, ep->com.qp, 423830662f6SVipul Pandya (int)ep->com.state, ep->com.flags, 424830662f6SVipul Pandya ep->com.history, ep->hwtid, ep->atid, 425830662f6SVipul Pandya &lsin->sin_addr, ntohs(lsin->sin_port), 4269eccfe10SSteve Wise ntohs(mapped_lsin->sin_port), 4279eccfe10SSteve Wise &rsin->sin_addr, ntohs(rsin->sin_port), 4289eccfe10SSteve Wise ntohs(mapped_rsin->sin_port)); 429830662f6SVipul Pandya } else { 430830662f6SVipul Pandya struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *) 431830662f6SVipul Pandya &ep->com.local_addr; 432830662f6SVipul Pandya struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *) 433830662f6SVipul Pandya &ep->com.remote_addr; 4349eccfe10SSteve Wise struct sockaddr_in6 *mapped_lsin6 = (struct sockaddr_in6 *) 4359eccfe10SSteve Wise &ep->com.mapped_local_addr; 4369eccfe10SSteve Wise struct sockaddr_in6 *mapped_rsin6 = (struct sockaddr_in6 *) 4379eccfe10SSteve Wise &ep->com.mapped_remote_addr; 438830662f6SVipul Pandya 439830662f6SVipul Pandya cc = snprintf(epd->buf + epd->pos, space, 440830662f6SVipul Pandya "ep %p cm_id %p qp %p state %d flags 0x%lx " 441830662f6SVipul Pandya "history 0x%lx hwtid %d atid %d " 4429eccfe10SSteve Wise "%pI6:%d/%d <-> %pI6:%d/%d\n", 443830662f6SVipul Pandya ep, ep->com.cm_id, ep->com.qp, 444830662f6SVipul Pandya (int)ep->com.state, ep->com.flags, 445830662f6SVipul Pandya ep->com.history, ep->hwtid, ep->atid, 446830662f6SVipul Pandya &lsin6->sin6_addr, ntohs(lsin6->sin6_port), 4479eccfe10SSteve Wise ntohs(mapped_lsin6->sin6_port), 4489eccfe10SSteve Wise &rsin6->sin6_addr, ntohs(rsin6->sin6_port), 4499eccfe10SSteve Wise ntohs(mapped_rsin6->sin6_port)); 450830662f6SVipul Pandya } 451793dad94SVipul Pandya if (cc < space) 452793dad94SVipul Pandya epd->pos += cc; 453793dad94SVipul Pandya return 0; 454793dad94SVipul Pandya } 455793dad94SVipul Pandya 456793dad94SVipul Pandya static int dump_listen_ep(int id, void *p, void *data) 457793dad94SVipul Pandya { 458793dad94SVipul Pandya struct c4iw_listen_ep *ep = p; 459793dad94SVipul Pandya struct c4iw_debugfs_data *epd = data; 460793dad94SVipul Pandya int space; 461793dad94SVipul Pandya int cc; 462793dad94SVipul Pandya 463793dad94SVipul Pandya space = epd->bufsize - epd->pos - 1; 464793dad94SVipul Pandya if (space == 0) 465793dad94SVipul Pandya return 1; 466793dad94SVipul Pandya 467830662f6SVipul Pandya if (ep->com.local_addr.ss_family == AF_INET) { 468830662f6SVipul Pandya struct sockaddr_in *lsin = (struct sockaddr_in *) 469830662f6SVipul Pandya &ep->com.local_addr; 4709eccfe10SSteve Wise struct sockaddr_in *mapped_lsin = (struct sockaddr_in *) 4719eccfe10SSteve Wise &ep->com.mapped_local_addr; 472830662f6SVipul Pandya 473793dad94SVipul Pandya cc = snprintf(epd->buf + epd->pos, space, 474830662f6SVipul Pandya "ep %p cm_id %p state %d flags 0x%lx stid %d " 4759eccfe10SSteve Wise "backlog %d %pI4:%d/%d\n", 476830662f6SVipul Pandya ep, ep->com.cm_id, (int)ep->com.state, 477793dad94SVipul Pandya ep->com.flags, ep->stid, ep->backlog, 4789eccfe10SSteve Wise &lsin->sin_addr, ntohs(lsin->sin_port), 4799eccfe10SSteve Wise ntohs(mapped_lsin->sin_port)); 480830662f6SVipul Pandya } else { 481830662f6SVipul Pandya struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *) 482830662f6SVipul Pandya &ep->com.local_addr; 4839eccfe10SSteve Wise struct sockaddr_in6 *mapped_lsin6 = (struct sockaddr_in6 *) 4849eccfe10SSteve Wise &ep->com.mapped_local_addr; 485830662f6SVipul Pandya 486830662f6SVipul Pandya cc = snprintf(epd->buf + epd->pos, space, 487830662f6SVipul Pandya "ep %p cm_id %p state %d flags 0x%lx stid %d " 4889eccfe10SSteve Wise "backlog %d %pI6:%d/%d\n", 489830662f6SVipul Pandya ep, ep->com.cm_id, (int)ep->com.state, 490830662f6SVipul Pandya ep->com.flags, ep->stid, ep->backlog, 4919eccfe10SSteve Wise &lsin6->sin6_addr, ntohs(lsin6->sin6_port), 4929eccfe10SSteve Wise ntohs(mapped_lsin6->sin6_port)); 493830662f6SVipul Pandya } 494793dad94SVipul Pandya if (cc < space) 495793dad94SVipul Pandya epd->pos += cc; 496793dad94SVipul Pandya return 0; 497793dad94SVipul Pandya } 498793dad94SVipul Pandya 499793dad94SVipul Pandya static int ep_release(struct inode *inode, struct file *file) 500793dad94SVipul Pandya { 501793dad94SVipul Pandya struct c4iw_debugfs_data *epd = file->private_data; 502793dad94SVipul Pandya if (!epd) { 503793dad94SVipul Pandya pr_info("%s null qpd?\n", __func__); 504793dad94SVipul Pandya return 0; 505793dad94SVipul Pandya } 506793dad94SVipul Pandya vfree(epd->buf); 507793dad94SVipul Pandya kfree(epd); 508793dad94SVipul Pandya return 0; 509793dad94SVipul Pandya } 510793dad94SVipul Pandya 511793dad94SVipul Pandya static int ep_open(struct inode *inode, struct file *file) 512793dad94SVipul Pandya { 513793dad94SVipul Pandya struct c4iw_debugfs_data *epd; 514793dad94SVipul Pandya int ret = 0; 515793dad94SVipul Pandya int count = 1; 516793dad94SVipul Pandya 517793dad94SVipul Pandya epd = kmalloc(sizeof(*epd), GFP_KERNEL); 518793dad94SVipul Pandya if (!epd) { 519793dad94SVipul Pandya ret = -ENOMEM; 520793dad94SVipul Pandya goto out; 521793dad94SVipul Pandya } 522793dad94SVipul Pandya epd->devp = inode->i_private; 523793dad94SVipul Pandya epd->pos = 0; 524793dad94SVipul Pandya 525793dad94SVipul Pandya spin_lock_irq(&epd->devp->lock); 526793dad94SVipul Pandya idr_for_each(&epd->devp->hwtid_idr, count_idrs, &count); 527793dad94SVipul Pandya idr_for_each(&epd->devp->atid_idr, count_idrs, &count); 528793dad94SVipul Pandya idr_for_each(&epd->devp->stid_idr, count_idrs, &count); 529793dad94SVipul Pandya spin_unlock_irq(&epd->devp->lock); 530793dad94SVipul Pandya 531793dad94SVipul Pandya epd->bufsize = count * 160; 532793dad94SVipul Pandya epd->buf = vmalloc(epd->bufsize); 533793dad94SVipul Pandya if (!epd->buf) { 534793dad94SVipul Pandya ret = -ENOMEM; 535793dad94SVipul Pandya goto err1; 536793dad94SVipul Pandya } 537793dad94SVipul Pandya 538793dad94SVipul Pandya spin_lock_irq(&epd->devp->lock); 539793dad94SVipul Pandya idr_for_each(&epd->devp->hwtid_idr, dump_ep, epd); 540793dad94SVipul Pandya idr_for_each(&epd->devp->atid_idr, dump_ep, epd); 541793dad94SVipul Pandya idr_for_each(&epd->devp->stid_idr, dump_listen_ep, epd); 542793dad94SVipul Pandya spin_unlock_irq(&epd->devp->lock); 543793dad94SVipul Pandya 544793dad94SVipul Pandya file->private_data = epd; 545793dad94SVipul Pandya goto out; 546793dad94SVipul Pandya err1: 547793dad94SVipul Pandya kfree(epd); 548793dad94SVipul Pandya out: 549793dad94SVipul Pandya return ret; 550793dad94SVipul Pandya } 551793dad94SVipul Pandya 552793dad94SVipul Pandya static const struct file_operations ep_debugfs_fops = { 553793dad94SVipul Pandya .owner = THIS_MODULE, 554793dad94SVipul Pandya .open = ep_open, 555793dad94SVipul Pandya .release = ep_release, 556793dad94SVipul Pandya .read = debugfs_read, 557793dad94SVipul Pandya }; 558793dad94SVipul Pandya 559cfdda9d7SSteve Wise static int setup_debugfs(struct c4iw_dev *devp) 560cfdda9d7SSteve Wise { 561cfdda9d7SSteve Wise struct dentry *de; 562cfdda9d7SSteve Wise 563cfdda9d7SSteve Wise if (!devp->debugfs_root) 564cfdda9d7SSteve Wise return -1; 565cfdda9d7SSteve Wise 566cfdda9d7SSteve Wise de = debugfs_create_file("qps", S_IWUSR, devp->debugfs_root, 567cfdda9d7SSteve Wise (void *)devp, &qp_debugfs_fops); 568cfdda9d7SSteve Wise if (de && de->d_inode) 569cfdda9d7SSteve Wise de->d_inode->i_size = 4096; 5709e8d1fa3SSteve Wise 5719e8d1fa3SSteve Wise de = debugfs_create_file("stags", S_IWUSR, devp->debugfs_root, 5729e8d1fa3SSteve Wise (void *)devp, &stag_debugfs_fops); 5739e8d1fa3SSteve Wise if (de && de->d_inode) 5749e8d1fa3SSteve Wise de->d_inode->i_size = 4096; 5758d81ef34SVipul Pandya 5768d81ef34SVipul Pandya de = debugfs_create_file("stats", S_IWUSR, devp->debugfs_root, 5778d81ef34SVipul Pandya (void *)devp, &stats_debugfs_fops); 5788d81ef34SVipul Pandya if (de && de->d_inode) 5798d81ef34SVipul Pandya de->d_inode->i_size = 4096; 5808d81ef34SVipul Pandya 581793dad94SVipul Pandya de = debugfs_create_file("eps", S_IWUSR, devp->debugfs_root, 582793dad94SVipul Pandya (void *)devp, &ep_debugfs_fops); 583793dad94SVipul Pandya if (de && de->d_inode) 584793dad94SVipul Pandya de->d_inode->i_size = 4096; 585793dad94SVipul Pandya 586cfdda9d7SSteve Wise return 0; 587cfdda9d7SSteve Wise } 588cfdda9d7SSteve Wise 589cfdda9d7SSteve Wise void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev, 590cfdda9d7SSteve Wise struct c4iw_dev_ucontext *uctx) 591cfdda9d7SSteve Wise { 592cfdda9d7SSteve Wise struct list_head *pos, *nxt; 593cfdda9d7SSteve Wise struct c4iw_qid_list *entry; 594cfdda9d7SSteve Wise 595cfdda9d7SSteve Wise mutex_lock(&uctx->lock); 596cfdda9d7SSteve Wise list_for_each_safe(pos, nxt, &uctx->qpids) { 597cfdda9d7SSteve Wise entry = list_entry(pos, struct c4iw_qid_list, entry); 598cfdda9d7SSteve Wise list_del_init(&entry->entry); 5998d81ef34SVipul Pandya if (!(entry->qid & rdev->qpmask)) { 600ec3eead2SVipul Pandya c4iw_put_resource(&rdev->resource.qid_table, 601ec3eead2SVipul Pandya entry->qid); 6028d81ef34SVipul Pandya mutex_lock(&rdev->stats.lock); 6038d81ef34SVipul Pandya rdev->stats.qid.cur -= rdev->qpmask + 1; 6048d81ef34SVipul Pandya mutex_unlock(&rdev->stats.lock); 6058d81ef34SVipul Pandya } 606cfdda9d7SSteve Wise kfree(entry); 607cfdda9d7SSteve Wise } 608cfdda9d7SSteve Wise 609cfdda9d7SSteve Wise list_for_each_safe(pos, nxt, &uctx->qpids) { 610cfdda9d7SSteve Wise entry = list_entry(pos, struct c4iw_qid_list, entry); 611cfdda9d7SSteve Wise list_del_init(&entry->entry); 612cfdda9d7SSteve Wise kfree(entry); 613cfdda9d7SSteve Wise } 614cfdda9d7SSteve Wise mutex_unlock(&uctx->lock); 615cfdda9d7SSteve Wise } 616cfdda9d7SSteve Wise 617cfdda9d7SSteve Wise void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev, 618cfdda9d7SSteve Wise struct c4iw_dev_ucontext *uctx) 619cfdda9d7SSteve Wise { 620cfdda9d7SSteve Wise INIT_LIST_HEAD(&uctx->qpids); 621cfdda9d7SSteve Wise INIT_LIST_HEAD(&uctx->cqids); 622cfdda9d7SSteve Wise mutex_init(&uctx->lock); 623cfdda9d7SSteve Wise } 624cfdda9d7SSteve Wise 625cfdda9d7SSteve Wise /* Caller takes care of locking if needed */ 626cfdda9d7SSteve Wise static int c4iw_rdev_open(struct c4iw_rdev *rdev) 627cfdda9d7SSteve Wise { 628cfdda9d7SSteve Wise int err; 629cfdda9d7SSteve Wise 630cfdda9d7SSteve Wise c4iw_init_dev_ucontext(rdev, &rdev->uctx); 631cfdda9d7SSteve Wise 632cfdda9d7SSteve Wise /* 633cfdda9d7SSteve Wise * qpshift is the number of bits to shift the qpid left in order 634cfdda9d7SSteve Wise * to get the correct address of the doorbell for that qp. 635cfdda9d7SSteve Wise */ 636cfdda9d7SSteve Wise rdev->qpshift = PAGE_SHIFT - ilog2(rdev->lldi.udb_density); 637cfdda9d7SSteve Wise rdev->qpmask = rdev->lldi.udb_density - 1; 638cfdda9d7SSteve Wise rdev->cqshift = PAGE_SHIFT - ilog2(rdev->lldi.ucq_density); 639cfdda9d7SSteve Wise rdev->cqmask = rdev->lldi.ucq_density - 1; 640cfdda9d7SSteve Wise PDBG("%s dev %s stag start 0x%0x size 0x%0x num stags %d " 64193fb72e4SSteve Wise "pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x " 64293fb72e4SSteve Wise "qp qid start %u size %u cq qid start %u size %u\n", 643cfdda9d7SSteve Wise __func__, pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start, 644cfdda9d7SSteve Wise rdev->lldi.vr->stag.size, c4iw_num_stags(rdev), 645cfdda9d7SSteve Wise rdev->lldi.vr->pbl.start, 646cfdda9d7SSteve Wise rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start, 64793fb72e4SSteve Wise rdev->lldi.vr->rq.size, 64893fb72e4SSteve Wise rdev->lldi.vr->qp.start, 64993fb72e4SSteve Wise rdev->lldi.vr->qp.size, 65093fb72e4SSteve Wise rdev->lldi.vr->cq.start, 65193fb72e4SSteve Wise rdev->lldi.vr->cq.size); 652649fb5ecSBen Hutchings PDBG("udb len 0x%x udb base %llx db_reg %p gts_reg %p qpshift %lu " 653cfdda9d7SSteve Wise "qpmask 0x%x cqshift %lu cqmask 0x%x\n", 654cfdda9d7SSteve Wise (unsigned)pci_resource_len(rdev->lldi.pdev, 2), 655649fb5ecSBen Hutchings (u64)pci_resource_start(rdev->lldi.pdev, 2), 656cfdda9d7SSteve Wise rdev->lldi.db_reg, 657cfdda9d7SSteve Wise rdev->lldi.gts_reg, 658cfdda9d7SSteve Wise rdev->qpshift, rdev->qpmask, 659cfdda9d7SSteve Wise rdev->cqshift, rdev->cqmask); 660cfdda9d7SSteve Wise 661cfdda9d7SSteve Wise if (c4iw_num_stags(rdev) == 0) { 662cfdda9d7SSteve Wise err = -EINVAL; 663cfdda9d7SSteve Wise goto err1; 664cfdda9d7SSteve Wise } 665cfdda9d7SSteve Wise 6668d81ef34SVipul Pandya rdev->stats.pd.total = T4_MAX_NUM_PD; 6678d81ef34SVipul Pandya rdev->stats.stag.total = rdev->lldi.vr->stag.size; 6688d81ef34SVipul Pandya rdev->stats.pbl.total = rdev->lldi.vr->pbl.size; 6698d81ef34SVipul Pandya rdev->stats.rqt.total = rdev->lldi.vr->rq.size; 6708d81ef34SVipul Pandya rdev->stats.ocqp.total = rdev->lldi.vr->ocq.size; 6718d81ef34SVipul Pandya rdev->stats.qid.total = rdev->lldi.vr->qp.size; 6728d81ef34SVipul Pandya 673cfdda9d7SSteve Wise err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD); 674cfdda9d7SSteve Wise if (err) { 675cfdda9d7SSteve Wise printk(KERN_ERR MOD "error %d initializing resources\n", err); 676cfdda9d7SSteve Wise goto err1; 677cfdda9d7SSteve Wise } 678cfdda9d7SSteve Wise err = c4iw_pblpool_create(rdev); 679cfdda9d7SSteve Wise if (err) { 680cfdda9d7SSteve Wise printk(KERN_ERR MOD "error %d initializing pbl pool\n", err); 681cfdda9d7SSteve Wise goto err2; 682cfdda9d7SSteve Wise } 683cfdda9d7SSteve Wise err = c4iw_rqtpool_create(rdev); 684cfdda9d7SSteve Wise if (err) { 685cfdda9d7SSteve Wise printk(KERN_ERR MOD "error %d initializing rqt pool\n", err); 686cfdda9d7SSteve Wise goto err3; 687cfdda9d7SSteve Wise } 688c6d7b267SSteve Wise err = c4iw_ocqp_pool_create(rdev); 689c6d7b267SSteve Wise if (err) { 690c6d7b267SSteve Wise printk(KERN_ERR MOD "error %d initializing ocqp pool\n", err); 691c6d7b267SSteve Wise goto err4; 692c6d7b267SSteve Wise } 69305eb2389SSteve Wise rdev->status_page = (struct t4_dev_status_page *) 69405eb2389SSteve Wise __get_free_page(GFP_KERNEL); 69505eb2389SSteve Wise if (!rdev->status_page) { 69605eb2389SSteve Wise pr_err(MOD "error allocating status page\n"); 69705eb2389SSteve Wise goto err4; 69805eb2389SSteve Wise } 699cfdda9d7SSteve Wise return 0; 700c6d7b267SSteve Wise err4: 701c6d7b267SSteve Wise c4iw_rqtpool_destroy(rdev); 702cfdda9d7SSteve Wise err3: 703cfdda9d7SSteve Wise c4iw_pblpool_destroy(rdev); 704cfdda9d7SSteve Wise err2: 705cfdda9d7SSteve Wise c4iw_destroy_resource(&rdev->resource); 706cfdda9d7SSteve Wise err1: 707cfdda9d7SSteve Wise return err; 708cfdda9d7SSteve Wise } 709cfdda9d7SSteve Wise 710cfdda9d7SSteve Wise static void c4iw_rdev_close(struct c4iw_rdev *rdev) 711cfdda9d7SSteve Wise { 71205eb2389SSteve Wise free_page((unsigned long)rdev->status_page); 713cfdda9d7SSteve Wise c4iw_pblpool_destroy(rdev); 714cfdda9d7SSteve Wise c4iw_rqtpool_destroy(rdev); 715cfdda9d7SSteve Wise c4iw_destroy_resource(&rdev->resource); 716cfdda9d7SSteve Wise } 717cfdda9d7SSteve Wise 7189efe10a1SSteve Wise static void c4iw_dealloc(struct uld_ctx *ctx) 719cfdda9d7SSteve Wise { 7202f25e9a5SSteve Wise c4iw_rdev_close(&ctx->dev->rdev); 7212f25e9a5SSteve Wise idr_destroy(&ctx->dev->cqidr); 7222f25e9a5SSteve Wise idr_destroy(&ctx->dev->qpidr); 7232f25e9a5SSteve Wise idr_destroy(&ctx->dev->mmidr); 724793dad94SVipul Pandya idr_destroy(&ctx->dev->hwtid_idr); 725793dad94SVipul Pandya idr_destroy(&ctx->dev->stid_idr); 726793dad94SVipul Pandya idr_destroy(&ctx->dev->atid_idr); 727fa658a98SSteve Wise if (ctx->dev->rdev.bar2_kva) 728fa658a98SSteve Wise iounmap(ctx->dev->rdev.bar2_kva); 729fa658a98SSteve Wise if (ctx->dev->rdev.oc_mw_kva) 7302f25e9a5SSteve Wise iounmap(ctx->dev->rdev.oc_mw_kva); 7312f25e9a5SSteve Wise ib_dealloc_device(&ctx->dev->ibdev); 7329eccfe10SSteve Wise iwpm_exit(RDMA_NL_C4IW); 7332f25e9a5SSteve Wise ctx->dev = NULL; 734cfdda9d7SSteve Wise } 735cfdda9d7SSteve Wise 7369efe10a1SSteve Wise static void c4iw_remove(struct uld_ctx *ctx) 7379efe10a1SSteve Wise { 7389efe10a1SSteve Wise PDBG("%s c4iw_dev %p\n", __func__, ctx->dev); 7399efe10a1SSteve Wise c4iw_unregister_device(ctx->dev); 7409efe10a1SSteve Wise c4iw_dealloc(ctx); 7419efe10a1SSteve Wise } 7429efe10a1SSteve Wise 7439efe10a1SSteve Wise static int rdma_supported(const struct cxgb4_lld_info *infop) 7449efe10a1SSteve Wise { 7459efe10a1SSteve Wise return infop->vr->stag.size > 0 && infop->vr->pbl.size > 0 && 7469efe10a1SSteve Wise infop->vr->rq.size > 0 && infop->vr->qp.size > 0 && 747f079af7aSVipul Pandya infop->vr->cq.size > 0; 7489efe10a1SSteve Wise } 7499efe10a1SSteve Wise 750cfdda9d7SSteve Wise static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) 751cfdda9d7SSteve Wise { 752cfdda9d7SSteve Wise struct c4iw_dev *devp; 753cfdda9d7SSteve Wise int ret; 754cfdda9d7SSteve Wise 7559efe10a1SSteve Wise if (!rdma_supported(infop)) { 7569efe10a1SSteve Wise printk(KERN_INFO MOD "%s: RDMA not supported on this device.\n", 7579efe10a1SSteve Wise pci_name(infop->pdev)); 7589efe10a1SSteve Wise return ERR_PTR(-ENOSYS); 7599efe10a1SSteve Wise } 760f079af7aSVipul Pandya if (!ocqp_supported(infop)) 761f079af7aSVipul Pandya pr_info("%s: On-Chip Queues not supported on this device.\n", 762f079af7aSVipul Pandya pci_name(infop->pdev)); 76380ccdd60SVipul Pandya 764cfdda9d7SSteve Wise devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp)); 765cfdda9d7SSteve Wise if (!devp) { 766cfdda9d7SSteve Wise printk(KERN_ERR MOD "Cannot allocate ib device\n"); 767bbe9a0a2SSteve Wise return ERR_PTR(-ENOMEM); 768cfdda9d7SSteve Wise } 769cfdda9d7SSteve Wise devp->rdev.lldi = *infop; 770cfdda9d7SSteve Wise 77104e10e21SHariprasad Shenai /* init various hw-queue params based on lld info */ 77204e10e21SHariprasad Shenai PDBG("%s: Ing. padding boundary is %d, egrsstatuspagesize = %d\n", 77304e10e21SHariprasad Shenai __func__, devp->rdev.lldi.sge_ingpadboundary, 77404e10e21SHariprasad Shenai devp->rdev.lldi.sge_egrstatuspagesize); 77504e10e21SHariprasad Shenai 77604e10e21SHariprasad Shenai devp->rdev.hw_queue.t4_eq_status_entries = 77704e10e21SHariprasad Shenai devp->rdev.lldi.sge_ingpadboundary > 64 ? 2 : 1; 77804e10e21SHariprasad Shenai devp->rdev.hw_queue.t4_max_eq_size = 77904e10e21SHariprasad Shenai 65520 - devp->rdev.hw_queue.t4_eq_status_entries; 78004e10e21SHariprasad Shenai devp->rdev.hw_queue.t4_max_iq_size = 65520 - 1; 78104e10e21SHariprasad Shenai devp->rdev.hw_queue.t4_max_rq_size = 78204e10e21SHariprasad Shenai 8192 - devp->rdev.hw_queue.t4_eq_status_entries; 78304e10e21SHariprasad Shenai devp->rdev.hw_queue.t4_max_sq_size = 78404e10e21SHariprasad Shenai devp->rdev.hw_queue.t4_max_eq_size - 1; 78504e10e21SHariprasad Shenai devp->rdev.hw_queue.t4_max_qp_depth = 78604e10e21SHariprasad Shenai devp->rdev.hw_queue.t4_max_rq_size - 1; 78704e10e21SHariprasad Shenai devp->rdev.hw_queue.t4_max_cq_depth = 78804e10e21SHariprasad Shenai devp->rdev.hw_queue.t4_max_iq_size - 1; 78904e10e21SHariprasad Shenai devp->rdev.hw_queue.t4_stat_len = 79004e10e21SHariprasad Shenai devp->rdev.lldi.sge_egrstatuspagesize; 79104e10e21SHariprasad Shenai 792fa658a98SSteve Wise /* 793fa658a98SSteve Wise * For T5 devices, we map all of BAR2 with WC. 794fa658a98SSteve Wise * For T4 devices with onchip qp mem, we map only that part 795fa658a98SSteve Wise * of BAR2 with WC. 796fa658a98SSteve Wise */ 797fa658a98SSteve Wise devp->rdev.bar2_pa = pci_resource_start(devp->rdev.lldi.pdev, 2); 798fa658a98SSteve Wise if (is_t5(devp->rdev.lldi.adapter_type)) { 799fa658a98SSteve Wise devp->rdev.bar2_kva = ioremap_wc(devp->rdev.bar2_pa, 800fa658a98SSteve Wise pci_resource_len(devp->rdev.lldi.pdev, 2)); 801fa658a98SSteve Wise if (!devp->rdev.bar2_kva) { 802fa658a98SSteve Wise pr_err(MOD "Unable to ioremap BAR2\n"); 80365b302adSChristoph Jaeger ib_dealloc_device(&devp->ibdev); 804fa658a98SSteve Wise return ERR_PTR(-EINVAL); 805fa658a98SSteve Wise } 806fa658a98SSteve Wise } else if (ocqp_supported(infop)) { 807fa658a98SSteve Wise devp->rdev.oc_mw_pa = 808fa658a98SSteve Wise pci_resource_start(devp->rdev.lldi.pdev, 2) + 809fa658a98SSteve Wise pci_resource_len(devp->rdev.lldi.pdev, 2) - 810fa658a98SSteve Wise roundup_pow_of_two(devp->rdev.lldi.vr->ocq.size); 811c6d7b267SSteve Wise devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa, 812c6d7b267SSteve Wise devp->rdev.lldi.vr->ocq.size); 813fa658a98SSteve Wise if (!devp->rdev.oc_mw_kva) { 814fa658a98SSteve Wise pr_err(MOD "Unable to ioremap onchip mem\n"); 81565b302adSChristoph Jaeger ib_dealloc_device(&devp->ibdev); 816fa658a98SSteve Wise return ERR_PTR(-EINVAL); 817fa658a98SSteve Wise } 818fa658a98SSteve Wise } 819c6d7b267SSteve Wise 8202f25e9a5SSteve Wise PDBG(KERN_INFO MOD "ocq memory: " 821c6d7b267SSteve Wise "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n", 822c6d7b267SSteve Wise devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size, 823c6d7b267SSteve Wise devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva); 824c6d7b267SSteve Wise 825cfdda9d7SSteve Wise ret = c4iw_rdev_open(&devp->rdev); 826cfdda9d7SSteve Wise if (ret) { 827cfdda9d7SSteve Wise printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret); 828cfdda9d7SSteve Wise ib_dealloc_device(&devp->ibdev); 829bbe9a0a2SSteve Wise return ERR_PTR(ret); 830cfdda9d7SSteve Wise } 831cfdda9d7SSteve Wise 832cfdda9d7SSteve Wise idr_init(&devp->cqidr); 833cfdda9d7SSteve Wise idr_init(&devp->qpidr); 834cfdda9d7SSteve Wise idr_init(&devp->mmidr); 835793dad94SVipul Pandya idr_init(&devp->hwtid_idr); 836793dad94SVipul Pandya idr_init(&devp->stid_idr); 837793dad94SVipul Pandya idr_init(&devp->atid_idr); 838cfdda9d7SSteve Wise spin_lock_init(&devp->lock); 8398d81ef34SVipul Pandya mutex_init(&devp->rdev.stats.lock); 8402c974781SVipul Pandya mutex_init(&devp->db_mutex); 84105eb2389SSteve Wise INIT_LIST_HEAD(&devp->db_fc_list); 842cfdda9d7SSteve Wise 843cfdda9d7SSteve Wise if (c4iw_debugfs_root) { 844cfdda9d7SSteve Wise devp->debugfs_root = debugfs_create_dir( 845cfdda9d7SSteve Wise pci_name(devp->rdev.lldi.pdev), 846cfdda9d7SSteve Wise c4iw_debugfs_root); 847cfdda9d7SSteve Wise setup_debugfs(devp); 848cfdda9d7SSteve Wise } 8499eccfe10SSteve Wise 8509eccfe10SSteve Wise ret = iwpm_init(RDMA_NL_C4IW); 8519eccfe10SSteve Wise if (ret) { 8529eccfe10SSteve Wise pr_err("port mapper initialization failed with %d\n", ret); 8539eccfe10SSteve Wise ib_dealloc_device(&devp->ibdev); 8549eccfe10SSteve Wise return ERR_PTR(ret); 8559eccfe10SSteve Wise } 8569eccfe10SSteve Wise 857cfdda9d7SSteve Wise return devp; 858cfdda9d7SSteve Wise } 859cfdda9d7SSteve Wise 860cfdda9d7SSteve Wise static void *c4iw_uld_add(const struct cxgb4_lld_info *infop) 861cfdda9d7SSteve Wise { 8622f25e9a5SSteve Wise struct uld_ctx *ctx; 863cfdda9d7SSteve Wise static int vers_printed; 864cfdda9d7SSteve Wise int i; 865cfdda9d7SSteve Wise 866cfdda9d7SSteve Wise if (!vers_printed++) 867f079af7aSVipul Pandya pr_info("Chelsio T4/T5 RDMA Driver - version %s\n", 868cfdda9d7SSteve Wise DRV_VERSION); 869cfdda9d7SSteve Wise 8702f25e9a5SSteve Wise ctx = kzalloc(sizeof *ctx, GFP_KERNEL); 8712f25e9a5SSteve Wise if (!ctx) { 8722f25e9a5SSteve Wise ctx = ERR_PTR(-ENOMEM); 873cfdda9d7SSteve Wise goto out; 8742f25e9a5SSteve Wise } 8752f25e9a5SSteve Wise ctx->lldi = *infop; 876cfdda9d7SSteve Wise 877cfdda9d7SSteve Wise PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n", 8782f25e9a5SSteve Wise __func__, pci_name(ctx->lldi.pdev), 8792f25e9a5SSteve Wise ctx->lldi.nchan, ctx->lldi.nrxq, 8802f25e9a5SSteve Wise ctx->lldi.ntxq, ctx->lldi.nports); 881cfdda9d7SSteve Wise 8822f25e9a5SSteve Wise mutex_lock(&dev_mutex); 8832f25e9a5SSteve Wise list_add_tail(&ctx->entry, &uld_ctx_list); 8842f25e9a5SSteve Wise mutex_unlock(&dev_mutex); 8852f25e9a5SSteve Wise 8862f25e9a5SSteve Wise for (i = 0; i < ctx->lldi.nrxq; i++) 8872f25e9a5SSteve Wise PDBG("rxqid[%u] %u\n", i, ctx->lldi.rxq_ids[i]); 888cfdda9d7SSteve Wise out: 8892f25e9a5SSteve Wise return ctx; 890cfdda9d7SSteve Wise } 891cfdda9d7SSteve Wise 8921cab775cSVipul Pandya static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl, 8931cab775cSVipul Pandya const __be64 *rsp, 8941cab775cSVipul Pandya u32 pktshift) 8951cab775cSVipul Pandya { 8961cab775cSVipul Pandya struct sk_buff *skb; 8971cab775cSVipul Pandya 8981cab775cSVipul Pandya /* 8991cab775cSVipul Pandya * Allocate space for cpl_pass_accept_req which will be synthesized by 9001cab775cSVipul Pandya * driver. Once the driver synthesizes the request the skb will go 9011cab775cSVipul Pandya * through the regular cpl_pass_accept_req processing. 9021cab775cSVipul Pandya * The math here assumes sizeof cpl_pass_accept_req >= sizeof 9031cab775cSVipul Pandya * cpl_rx_pkt. 9041cab775cSVipul Pandya */ 9051cab775cSVipul Pandya skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) + 9061cab775cSVipul Pandya sizeof(struct rss_header) - pktshift, GFP_ATOMIC); 9071cab775cSVipul Pandya if (unlikely(!skb)) 9081cab775cSVipul Pandya return NULL; 9091cab775cSVipul Pandya 9101cab775cSVipul Pandya __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) + 9111cab775cSVipul Pandya sizeof(struct rss_header) - pktshift); 9121cab775cSVipul Pandya 9131cab775cSVipul Pandya /* 9141cab775cSVipul Pandya * This skb will contain: 9151cab775cSVipul Pandya * rss_header from the rspq descriptor (1 flit) 9161cab775cSVipul Pandya * cpl_rx_pkt struct from the rspq descriptor (2 flits) 9171cab775cSVipul Pandya * space for the difference between the size of an 9181cab775cSVipul Pandya * rx_pkt and pass_accept_req cpl (1 flit) 9191cab775cSVipul Pandya * the packet data from the gl 9201cab775cSVipul Pandya */ 9211cab775cSVipul Pandya skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_pass_accept_req) + 9221cab775cSVipul Pandya sizeof(struct rss_header)); 9231cab775cSVipul Pandya skb_copy_to_linear_data_offset(skb, sizeof(struct rss_header) + 9241cab775cSVipul Pandya sizeof(struct cpl_pass_accept_req), 9251cab775cSVipul Pandya gl->va + pktshift, 9261cab775cSVipul Pandya gl->tot_len - pktshift); 9271cab775cSVipul Pandya return skb; 9281cab775cSVipul Pandya } 9291cab775cSVipul Pandya 9301cab775cSVipul Pandya static inline int recv_rx_pkt(struct c4iw_dev *dev, const struct pkt_gl *gl, 9311cab775cSVipul Pandya const __be64 *rsp) 9321cab775cSVipul Pandya { 9331cab775cSVipul Pandya unsigned int opcode = *(u8 *)rsp; 9341cab775cSVipul Pandya struct sk_buff *skb; 9351cab775cSVipul Pandya 9361cab775cSVipul Pandya if (opcode != CPL_RX_PKT) 9371cab775cSVipul Pandya goto out; 9381cab775cSVipul Pandya 9391cab775cSVipul Pandya skb = copy_gl_to_skb_pkt(gl , rsp, dev->rdev.lldi.sge_pktshift); 9401cab775cSVipul Pandya if (skb == NULL) 9411cab775cSVipul Pandya goto out; 9421cab775cSVipul Pandya 9431cab775cSVipul Pandya if (c4iw_handlers[opcode] == NULL) { 9441cab775cSVipul Pandya pr_info("%s no handler opcode 0x%x...\n", __func__, 9451cab775cSVipul Pandya opcode); 9461cab775cSVipul Pandya kfree_skb(skb); 9471cab775cSVipul Pandya goto out; 9481cab775cSVipul Pandya } 9491cab775cSVipul Pandya c4iw_handlers[opcode](dev, skb); 9501cab775cSVipul Pandya return 1; 9511cab775cSVipul Pandya out: 9521cab775cSVipul Pandya return 0; 9531cab775cSVipul Pandya } 9541cab775cSVipul Pandya 955cfdda9d7SSteve Wise static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp, 956cfdda9d7SSteve Wise const struct pkt_gl *gl) 957cfdda9d7SSteve Wise { 9582f25e9a5SSteve Wise struct uld_ctx *ctx = handle; 9592f25e9a5SSteve Wise struct c4iw_dev *dev = ctx->dev; 960cfdda9d7SSteve Wise struct sk_buff *skb; 9611cab775cSVipul Pandya u8 opcode; 962cfdda9d7SSteve Wise 963cfdda9d7SSteve Wise if (gl == NULL) { 964cfdda9d7SSteve Wise /* omit RSS and rsp_ctrl at end of descriptor */ 965cfdda9d7SSteve Wise unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8; 966cfdda9d7SSteve Wise 967cfdda9d7SSteve Wise skb = alloc_skb(256, GFP_ATOMIC); 968cfdda9d7SSteve Wise if (!skb) 969cfdda9d7SSteve Wise goto nomem; 970cfdda9d7SSteve Wise __skb_put(skb, len); 971cfdda9d7SSteve Wise skb_copy_to_linear_data(skb, &rsp[1], len); 972cfdda9d7SSteve Wise } else if (gl == CXGB4_MSG_AN) { 973cfdda9d7SSteve Wise const struct rsp_ctrl *rc = (void *)rsp; 974cfdda9d7SSteve Wise 975cfdda9d7SSteve Wise u32 qid = be32_to_cpu(rc->pldbuflen_qid); 976cfdda9d7SSteve Wise c4iw_ev_handler(dev, qid); 977cfdda9d7SSteve Wise return 0; 9781cab775cSVipul Pandya } else if (unlikely(*(u8 *)rsp != *(u8 *)gl->va)) { 9791cab775cSVipul Pandya if (recv_rx_pkt(dev, gl, rsp)) 9801cab775cSVipul Pandya return 0; 9811cab775cSVipul Pandya 9821cab775cSVipul Pandya pr_info("%s: unexpected FL contents at %p, " \ 9831cab775cSVipul Pandya "RSS %#llx, FL %#llx, len %u\n", 9841cab775cSVipul Pandya pci_name(ctx->lldi.pdev), gl->va, 9851cab775cSVipul Pandya (unsigned long long)be64_to_cpu(*rsp), 986ef5d6355SVipul Pandya (unsigned long long)be64_to_cpu( 987ef5d6355SVipul Pandya *(__force __be64 *)gl->va), 9881cab775cSVipul Pandya gl->tot_len); 9891cab775cSVipul Pandya 9901cab775cSVipul Pandya return 0; 991cfdda9d7SSteve Wise } else { 992da411ba1SSteve Wise skb = cxgb4_pktgl_to_skb(gl, 128, 128); 993cfdda9d7SSteve Wise if (unlikely(!skb)) 994cfdda9d7SSteve Wise goto nomem; 995cfdda9d7SSteve Wise } 996cfdda9d7SSteve Wise 9971cab775cSVipul Pandya opcode = *(u8 *)rsp; 998dbb084ccSSteve Wise if (c4iw_handlers[opcode]) { 999cfdda9d7SSteve Wise c4iw_handlers[opcode](dev, skb); 1000dbb084ccSSteve Wise } else { 10011cab775cSVipul Pandya pr_info("%s no handler opcode 0x%x...\n", __func__, 1002cfdda9d7SSteve Wise opcode); 1003dbb084ccSSteve Wise kfree_skb(skb); 1004dbb084ccSSteve Wise } 1005cfdda9d7SSteve Wise 1006cfdda9d7SSteve Wise return 0; 1007cfdda9d7SSteve Wise nomem: 1008cfdda9d7SSteve Wise return -1; 1009cfdda9d7SSteve Wise } 1010cfdda9d7SSteve Wise 1011cfdda9d7SSteve Wise static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state) 1012cfdda9d7SSteve Wise { 10132f25e9a5SSteve Wise struct uld_ctx *ctx = handle; 10141c01c538SSteve Wise 1015cfdda9d7SSteve Wise PDBG("%s new_state %u\n", __func__, new_state); 10161c01c538SSteve Wise switch (new_state) { 10171c01c538SSteve Wise case CXGB4_STATE_UP: 10182f25e9a5SSteve Wise printk(KERN_INFO MOD "%s: Up\n", pci_name(ctx->lldi.pdev)); 10192f25e9a5SSteve Wise if (!ctx->dev) { 10209efe10a1SSteve Wise int ret; 10212f25e9a5SSteve Wise 10222f25e9a5SSteve Wise ctx->dev = c4iw_alloc(&ctx->lldi); 10239efe10a1SSteve Wise if (IS_ERR(ctx->dev)) { 10249efe10a1SSteve Wise printk(KERN_ERR MOD 10259efe10a1SSteve Wise "%s: initialization failed: %ld\n", 10269efe10a1SSteve Wise pci_name(ctx->lldi.pdev), 10279efe10a1SSteve Wise PTR_ERR(ctx->dev)); 10289efe10a1SSteve Wise ctx->dev = NULL; 10299efe10a1SSteve Wise break; 10309efe10a1SSteve Wise } 10312f25e9a5SSteve Wise ret = c4iw_register_device(ctx->dev); 10329efe10a1SSteve Wise if (ret) { 10331c01c538SSteve Wise printk(KERN_ERR MOD 10341c01c538SSteve Wise "%s: RDMA registration failed: %d\n", 10352f25e9a5SSteve Wise pci_name(ctx->lldi.pdev), ret); 10369efe10a1SSteve Wise c4iw_dealloc(ctx); 10379efe10a1SSteve Wise } 10381c01c538SSteve Wise } 10391c01c538SSteve Wise break; 10401c01c538SSteve Wise case CXGB4_STATE_DOWN: 10411c01c538SSteve Wise printk(KERN_INFO MOD "%s: Down\n", 10422f25e9a5SSteve Wise pci_name(ctx->lldi.pdev)); 10432f25e9a5SSteve Wise if (ctx->dev) 10442f25e9a5SSteve Wise c4iw_remove(ctx); 10451c01c538SSteve Wise break; 10461c01c538SSteve Wise case CXGB4_STATE_START_RECOVERY: 10471c01c538SSteve Wise printk(KERN_INFO MOD "%s: Fatal Error\n", 10482f25e9a5SSteve Wise pci_name(ctx->lldi.pdev)); 10492f25e9a5SSteve Wise if (ctx->dev) { 1050767fbe81SSteve Wise struct ib_event event; 1051767fbe81SSteve Wise 10522f25e9a5SSteve Wise ctx->dev->rdev.flags |= T4_FATAL_ERROR; 1053767fbe81SSteve Wise memset(&event, 0, sizeof event); 1054767fbe81SSteve Wise event.event = IB_EVENT_DEVICE_FATAL; 10552f25e9a5SSteve Wise event.device = &ctx->dev->ibdev; 1056767fbe81SSteve Wise ib_dispatch_event(&event); 10572f25e9a5SSteve Wise c4iw_remove(ctx); 1058767fbe81SSteve Wise } 10591c01c538SSteve Wise break; 10601c01c538SSteve Wise case CXGB4_STATE_DETACH: 10611c01c538SSteve Wise printk(KERN_INFO MOD "%s: Detach\n", 10622f25e9a5SSteve Wise pci_name(ctx->lldi.pdev)); 10632f25e9a5SSteve Wise if (ctx->dev) 10642f25e9a5SSteve Wise c4iw_remove(ctx); 10651c01c538SSteve Wise break; 10661c01c538SSteve Wise } 1067cfdda9d7SSteve Wise return 0; 1068cfdda9d7SSteve Wise } 1069cfdda9d7SSteve Wise 10702c974781SVipul Pandya static int disable_qp_db(int id, void *p, void *data) 10712c974781SVipul Pandya { 10722c974781SVipul Pandya struct c4iw_qp *qp = p; 10732c974781SVipul Pandya 10742c974781SVipul Pandya t4_disable_wq_db(&qp->wq); 10752c974781SVipul Pandya return 0; 10762c974781SVipul Pandya } 10772c974781SVipul Pandya 10782c974781SVipul Pandya static void stop_queues(struct uld_ctx *ctx) 10792c974781SVipul Pandya { 108005eb2389SSteve Wise unsigned long flags; 108105eb2389SSteve Wise 108205eb2389SSteve Wise spin_lock_irqsave(&ctx->dev->lock, flags); 1083422eea0aSVipul Pandya ctx->dev->rdev.stats.db_state_transitions++; 108405eb2389SSteve Wise ctx->dev->db_state = STOPPED; 108505eb2389SSteve Wise if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) 10862c974781SVipul Pandya idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL); 108705eb2389SSteve Wise else 108805eb2389SSteve Wise ctx->dev->rdev.status_page->db_off = 1; 108905eb2389SSteve Wise spin_unlock_irqrestore(&ctx->dev->lock, flags); 10902c974781SVipul Pandya } 10912c974781SVipul Pandya 10922c974781SVipul Pandya static int enable_qp_db(int id, void *p, void *data) 10932c974781SVipul Pandya { 10942c974781SVipul Pandya struct c4iw_qp *qp = p; 10952c974781SVipul Pandya 10962c974781SVipul Pandya t4_enable_wq_db(&qp->wq); 10972c974781SVipul Pandya return 0; 10982c974781SVipul Pandya } 10992c974781SVipul Pandya 110005eb2389SSteve Wise static void resume_rc_qp(struct c4iw_qp *qp) 110105eb2389SSteve Wise { 110205eb2389SSteve Wise spin_lock(&qp->lock); 1103fa658a98SSteve Wise t4_ring_sq_db(&qp->wq, qp->wq.sq.wq_pidx_inc, 1104fa658a98SSteve Wise is_t5(qp->rhp->rdev.lldi.adapter_type), NULL); 110505eb2389SSteve Wise qp->wq.sq.wq_pidx_inc = 0; 1106fa658a98SSteve Wise t4_ring_rq_db(&qp->wq, qp->wq.rq.wq_pidx_inc, 1107fa658a98SSteve Wise is_t5(qp->rhp->rdev.lldi.adapter_type), NULL); 110805eb2389SSteve Wise qp->wq.rq.wq_pidx_inc = 0; 110905eb2389SSteve Wise spin_unlock(&qp->lock); 111005eb2389SSteve Wise } 111105eb2389SSteve Wise 111205eb2389SSteve Wise static void resume_a_chunk(struct uld_ctx *ctx) 111305eb2389SSteve Wise { 111405eb2389SSteve Wise int i; 111505eb2389SSteve Wise struct c4iw_qp *qp; 111605eb2389SSteve Wise 111705eb2389SSteve Wise for (i = 0; i < DB_FC_RESUME_SIZE; i++) { 111805eb2389SSteve Wise qp = list_first_entry(&ctx->dev->db_fc_list, struct c4iw_qp, 111905eb2389SSteve Wise db_fc_entry); 112005eb2389SSteve Wise list_del_init(&qp->db_fc_entry); 112105eb2389SSteve Wise resume_rc_qp(qp); 112205eb2389SSteve Wise if (list_empty(&ctx->dev->db_fc_list)) 112305eb2389SSteve Wise break; 112405eb2389SSteve Wise } 112505eb2389SSteve Wise } 112605eb2389SSteve Wise 11272c974781SVipul Pandya static void resume_queues(struct uld_ctx *ctx) 11282c974781SVipul Pandya { 11292c974781SVipul Pandya spin_lock_irq(&ctx->dev->lock); 113005eb2389SSteve Wise if (ctx->dev->db_state != STOPPED) 113105eb2389SSteve Wise goto out; 113205eb2389SSteve Wise ctx->dev->db_state = FLOW_CONTROL; 113305eb2389SSteve Wise while (1) { 113405eb2389SSteve Wise if (list_empty(&ctx->dev->db_fc_list)) { 113505eb2389SSteve Wise WARN_ON(ctx->dev->db_state != FLOW_CONTROL); 1136422eea0aSVipul Pandya ctx->dev->db_state = NORMAL; 1137422eea0aSVipul Pandya ctx->dev->rdev.stats.db_state_transitions++; 113805eb2389SSteve Wise if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) { 113905eb2389SSteve Wise idr_for_each(&ctx->dev->qpidr, enable_qp_db, 114005eb2389SSteve Wise NULL); 114105eb2389SSteve Wise } else { 114205eb2389SSteve Wise ctx->dev->rdev.status_page->db_off = 0; 1143422eea0aSVipul Pandya } 114405eb2389SSteve Wise break; 114505eb2389SSteve Wise } else { 114605eb2389SSteve Wise if (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1) 114705eb2389SSteve Wise < (ctx->dev->rdev.lldi.dbfifo_int_thresh << 114805eb2389SSteve Wise DB_FC_DRAIN_THRESH)) { 114905eb2389SSteve Wise resume_a_chunk(ctx); 115005eb2389SSteve Wise } 115105eb2389SSteve Wise if (!list_empty(&ctx->dev->db_fc_list)) { 115205eb2389SSteve Wise spin_unlock_irq(&ctx->dev->lock); 115305eb2389SSteve Wise if (DB_FC_RESUME_DELAY) { 115405eb2389SSteve Wise set_current_state(TASK_UNINTERRUPTIBLE); 115505eb2389SSteve Wise schedule_timeout(DB_FC_RESUME_DELAY); 115605eb2389SSteve Wise } 115705eb2389SSteve Wise spin_lock_irq(&ctx->dev->lock); 115805eb2389SSteve Wise if (ctx->dev->db_state != FLOW_CONTROL) 115905eb2389SSteve Wise break; 116005eb2389SSteve Wise } 116105eb2389SSteve Wise } 116205eb2389SSteve Wise } 116305eb2389SSteve Wise out: 116405eb2389SSteve Wise if (ctx->dev->db_state != NORMAL) 116505eb2389SSteve Wise ctx->dev->rdev.stats.db_fc_interruptions++; 1166422eea0aSVipul Pandya spin_unlock_irq(&ctx->dev->lock); 1167422eea0aSVipul Pandya } 1168422eea0aSVipul Pandya 1169422eea0aSVipul Pandya struct qp_list { 1170422eea0aSVipul Pandya unsigned idx; 1171422eea0aSVipul Pandya struct c4iw_qp **qps; 1172422eea0aSVipul Pandya }; 1173422eea0aSVipul Pandya 1174422eea0aSVipul Pandya static int add_and_ref_qp(int id, void *p, void *data) 1175422eea0aSVipul Pandya { 1176422eea0aSVipul Pandya struct qp_list *qp_listp = data; 1177422eea0aSVipul Pandya struct c4iw_qp *qp = p; 1178422eea0aSVipul Pandya 1179422eea0aSVipul Pandya c4iw_qp_add_ref(&qp->ibqp); 1180422eea0aSVipul Pandya qp_listp->qps[qp_listp->idx++] = qp; 1181422eea0aSVipul Pandya return 0; 1182422eea0aSVipul Pandya } 1183422eea0aSVipul Pandya 1184422eea0aSVipul Pandya static int count_qps(int id, void *p, void *data) 1185422eea0aSVipul Pandya { 1186422eea0aSVipul Pandya unsigned *countp = data; 1187422eea0aSVipul Pandya (*countp)++; 1188422eea0aSVipul Pandya return 0; 1189422eea0aSVipul Pandya } 1190422eea0aSVipul Pandya 119105eb2389SSteve Wise static void deref_qps(struct qp_list *qp_list) 1192422eea0aSVipul Pandya { 1193422eea0aSVipul Pandya int idx; 1194422eea0aSVipul Pandya 119505eb2389SSteve Wise for (idx = 0; idx < qp_list->idx; idx++) 119605eb2389SSteve Wise c4iw_qp_rem_ref(&qp_list->qps[idx]->ibqp); 1197422eea0aSVipul Pandya } 1198422eea0aSVipul Pandya 1199422eea0aSVipul Pandya static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list) 1200422eea0aSVipul Pandya { 1201422eea0aSVipul Pandya int idx; 1202422eea0aSVipul Pandya int ret; 1203422eea0aSVipul Pandya 1204422eea0aSVipul Pandya for (idx = 0; idx < qp_list->idx; idx++) { 1205422eea0aSVipul Pandya struct c4iw_qp *qp = qp_list->qps[idx]; 1206422eea0aSVipul Pandya 120705eb2389SSteve Wise spin_lock_irq(&qp->rhp->lock); 120805eb2389SSteve Wise spin_lock(&qp->lock); 1209422eea0aSVipul Pandya ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0], 1210422eea0aSVipul Pandya qp->wq.sq.qid, 1211422eea0aSVipul Pandya t4_sq_host_wq_pidx(&qp->wq), 1212422eea0aSVipul Pandya t4_sq_wq_size(&qp->wq)); 1213422eea0aSVipul Pandya if (ret) { 121405eb2389SSteve Wise pr_err(KERN_ERR MOD "%s: Fatal error - " 1215422eea0aSVipul Pandya "DB overflow recovery failed - " 1216422eea0aSVipul Pandya "error syncing SQ qid %u\n", 1217422eea0aSVipul Pandya pci_name(ctx->lldi.pdev), qp->wq.sq.qid); 121805eb2389SSteve Wise spin_unlock(&qp->lock); 121905eb2389SSteve Wise spin_unlock_irq(&qp->rhp->lock); 1220422eea0aSVipul Pandya return; 1221422eea0aSVipul Pandya } 122205eb2389SSteve Wise qp->wq.sq.wq_pidx_inc = 0; 1223422eea0aSVipul Pandya 1224422eea0aSVipul Pandya ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0], 1225422eea0aSVipul Pandya qp->wq.rq.qid, 1226422eea0aSVipul Pandya t4_rq_host_wq_pidx(&qp->wq), 1227422eea0aSVipul Pandya t4_rq_wq_size(&qp->wq)); 1228422eea0aSVipul Pandya 1229422eea0aSVipul Pandya if (ret) { 123005eb2389SSteve Wise pr_err(KERN_ERR MOD "%s: Fatal error - " 1231422eea0aSVipul Pandya "DB overflow recovery failed - " 1232422eea0aSVipul Pandya "error syncing RQ qid %u\n", 1233422eea0aSVipul Pandya pci_name(ctx->lldi.pdev), qp->wq.rq.qid); 123405eb2389SSteve Wise spin_unlock(&qp->lock); 123505eb2389SSteve Wise spin_unlock_irq(&qp->rhp->lock); 1236422eea0aSVipul Pandya return; 1237422eea0aSVipul Pandya } 123805eb2389SSteve Wise qp->wq.rq.wq_pidx_inc = 0; 123905eb2389SSteve Wise spin_unlock(&qp->lock); 124005eb2389SSteve Wise spin_unlock_irq(&qp->rhp->lock); 1241422eea0aSVipul Pandya 1242422eea0aSVipul Pandya /* Wait for the dbfifo to drain */ 1243422eea0aSVipul Pandya while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) { 1244422eea0aSVipul Pandya set_current_state(TASK_UNINTERRUPTIBLE); 1245422eea0aSVipul Pandya schedule_timeout(usecs_to_jiffies(10)); 1246422eea0aSVipul Pandya } 1247422eea0aSVipul Pandya } 1248422eea0aSVipul Pandya } 1249422eea0aSVipul Pandya 1250422eea0aSVipul Pandya static void recover_queues(struct uld_ctx *ctx) 1251422eea0aSVipul Pandya { 1252422eea0aSVipul Pandya int count = 0; 1253422eea0aSVipul Pandya struct qp_list qp_list; 1254422eea0aSVipul Pandya int ret; 1255422eea0aSVipul Pandya 1256422eea0aSVipul Pandya /* slow everybody down */ 1257422eea0aSVipul Pandya set_current_state(TASK_UNINTERRUPTIBLE); 1258422eea0aSVipul Pandya schedule_timeout(usecs_to_jiffies(1000)); 1259422eea0aSVipul Pandya 1260422eea0aSVipul Pandya /* flush the SGE contexts */ 1261422eea0aSVipul Pandya ret = cxgb4_flush_eq_cache(ctx->dev->rdev.lldi.ports[0]); 1262422eea0aSVipul Pandya if (ret) { 1263422eea0aSVipul Pandya printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n", 1264422eea0aSVipul Pandya pci_name(ctx->lldi.pdev)); 126505eb2389SSteve Wise return; 1266422eea0aSVipul Pandya } 1267422eea0aSVipul Pandya 1268422eea0aSVipul Pandya /* Count active queues so we can build a list of queues to recover */ 1269422eea0aSVipul Pandya spin_lock_irq(&ctx->dev->lock); 127005eb2389SSteve Wise WARN_ON(ctx->dev->db_state != STOPPED); 127105eb2389SSteve Wise ctx->dev->db_state = RECOVERY; 1272422eea0aSVipul Pandya idr_for_each(&ctx->dev->qpidr, count_qps, &count); 1273422eea0aSVipul Pandya 1274422eea0aSVipul Pandya qp_list.qps = kzalloc(count * sizeof *qp_list.qps, GFP_ATOMIC); 1275422eea0aSVipul Pandya if (!qp_list.qps) { 1276422eea0aSVipul Pandya printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n", 1277422eea0aSVipul Pandya pci_name(ctx->lldi.pdev)); 1278422eea0aSVipul Pandya spin_unlock_irq(&ctx->dev->lock); 127905eb2389SSteve Wise return; 1280422eea0aSVipul Pandya } 1281422eea0aSVipul Pandya qp_list.idx = 0; 1282422eea0aSVipul Pandya 1283422eea0aSVipul Pandya /* add and ref each qp so it doesn't get freed */ 1284422eea0aSVipul Pandya idr_for_each(&ctx->dev->qpidr, add_and_ref_qp, &qp_list); 1285422eea0aSVipul Pandya 1286422eea0aSVipul Pandya spin_unlock_irq(&ctx->dev->lock); 1287422eea0aSVipul Pandya 1288422eea0aSVipul Pandya /* now traverse the list in a safe context to recover the db state*/ 1289422eea0aSVipul Pandya recover_lost_dbs(ctx, &qp_list); 1290422eea0aSVipul Pandya 1291422eea0aSVipul Pandya /* we're almost done! deref the qps and clean up */ 129205eb2389SSteve Wise deref_qps(&qp_list); 1293422eea0aSVipul Pandya kfree(qp_list.qps); 1294422eea0aSVipul Pandya 1295422eea0aSVipul Pandya spin_lock_irq(&ctx->dev->lock); 129605eb2389SSteve Wise WARN_ON(ctx->dev->db_state != RECOVERY); 129705eb2389SSteve Wise ctx->dev->db_state = STOPPED; 12982c974781SVipul Pandya spin_unlock_irq(&ctx->dev->lock); 12992c974781SVipul Pandya } 13002c974781SVipul Pandya 13012c974781SVipul Pandya static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...) 13022c974781SVipul Pandya { 13032c974781SVipul Pandya struct uld_ctx *ctx = handle; 13042c974781SVipul Pandya 13052c974781SVipul Pandya switch (control) { 13062c974781SVipul Pandya case CXGB4_CONTROL_DB_FULL: 13072c974781SVipul Pandya stop_queues(ctx); 13082c974781SVipul Pandya ctx->dev->rdev.stats.db_full++; 13092c974781SVipul Pandya break; 13102c974781SVipul Pandya case CXGB4_CONTROL_DB_EMPTY: 13112c974781SVipul Pandya resume_queues(ctx); 13122c974781SVipul Pandya mutex_lock(&ctx->dev->rdev.stats.lock); 13132c974781SVipul Pandya ctx->dev->rdev.stats.db_empty++; 13142c974781SVipul Pandya mutex_unlock(&ctx->dev->rdev.stats.lock); 13152c974781SVipul Pandya break; 13162c974781SVipul Pandya case CXGB4_CONTROL_DB_DROP: 1317422eea0aSVipul Pandya recover_queues(ctx); 13182c974781SVipul Pandya mutex_lock(&ctx->dev->rdev.stats.lock); 13192c974781SVipul Pandya ctx->dev->rdev.stats.db_drop++; 13202c974781SVipul Pandya mutex_unlock(&ctx->dev->rdev.stats.lock); 13212c974781SVipul Pandya break; 13222c974781SVipul Pandya default: 13232c974781SVipul Pandya printk(KERN_WARNING MOD "%s: unknown control cmd %u\n", 13242c974781SVipul Pandya pci_name(ctx->lldi.pdev), control); 13252c974781SVipul Pandya break; 13262c974781SVipul Pandya } 13272c974781SVipul Pandya return 0; 13282c974781SVipul Pandya } 13292c974781SVipul Pandya 1330cfdda9d7SSteve Wise static struct cxgb4_uld_info c4iw_uld_info = { 1331cfdda9d7SSteve Wise .name = DRV_NAME, 1332cfdda9d7SSteve Wise .add = c4iw_uld_add, 1333cfdda9d7SSteve Wise .rx_handler = c4iw_uld_rx_handler, 1334cfdda9d7SSteve Wise .state_change = c4iw_uld_state_change, 13352c974781SVipul Pandya .control = c4iw_uld_control, 1336cfdda9d7SSteve Wise }; 1337cfdda9d7SSteve Wise 1338cfdda9d7SSteve Wise static int __init c4iw_init_module(void) 1339cfdda9d7SSteve Wise { 1340cfdda9d7SSteve Wise int err; 1341cfdda9d7SSteve Wise 1342cfdda9d7SSteve Wise err = c4iw_cm_init(); 1343cfdda9d7SSteve Wise if (err) 1344cfdda9d7SSteve Wise return err; 1345cfdda9d7SSteve Wise 1346cfdda9d7SSteve Wise c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL); 1347cfdda9d7SSteve Wise if (!c4iw_debugfs_root) 1348cfdda9d7SSteve Wise printk(KERN_WARNING MOD 1349cfdda9d7SSteve Wise "could not create debugfs entry, continuing\n"); 1350cfdda9d7SSteve Wise 13519eccfe10SSteve Wise if (ibnl_add_client(RDMA_NL_C4IW, RDMA_NL_IWPM_NUM_OPS, 13529eccfe10SSteve Wise c4iw_nl_cb_table)) 13539eccfe10SSteve Wise pr_err("%s[%u]: Failed to add netlink callback\n" 13549eccfe10SSteve Wise , __func__, __LINE__); 13559eccfe10SSteve Wise 1356cfdda9d7SSteve Wise cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info); 1357cfdda9d7SSteve Wise 1358cfdda9d7SSteve Wise return 0; 1359cfdda9d7SSteve Wise } 1360cfdda9d7SSteve Wise 1361cfdda9d7SSteve Wise static void __exit c4iw_exit_module(void) 1362cfdda9d7SSteve Wise { 13632f25e9a5SSteve Wise struct uld_ctx *ctx, *tmp; 1364cfdda9d7SSteve Wise 1365cfdda9d7SSteve Wise mutex_lock(&dev_mutex); 13662f25e9a5SSteve Wise list_for_each_entry_safe(ctx, tmp, &uld_ctx_list, entry) { 13672f25e9a5SSteve Wise if (ctx->dev) 13682f25e9a5SSteve Wise c4iw_remove(ctx); 13692f25e9a5SSteve Wise kfree(ctx); 1370cfdda9d7SSteve Wise } 1371cfdda9d7SSteve Wise mutex_unlock(&dev_mutex); 1372fd388ce6SSteve Wise cxgb4_unregister_uld(CXGB4_ULD_RDMA); 13739eccfe10SSteve Wise ibnl_remove_client(RDMA_NL_C4IW); 1374cfdda9d7SSteve Wise c4iw_cm_term(); 1375cfdda9d7SSteve Wise debugfs_remove_recursive(c4iw_debugfs_root); 1376cfdda9d7SSteve Wise } 1377cfdda9d7SSteve Wise 1378cfdda9d7SSteve Wise module_init(c4iw_init_module); 1379cfdda9d7SSteve Wise module_exit(c4iw_exit_module); 1380