1cfdda9d7SSteve Wise /* 2cfdda9d7SSteve Wise * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. 3cfdda9d7SSteve Wise * 4cfdda9d7SSteve Wise * This software is available to you under a choice of one of two 5cfdda9d7SSteve Wise * licenses. You may choose to be licensed under the terms of the GNU 6cfdda9d7SSteve Wise * General Public License (GPL) Version 2, available from the file 7cfdda9d7SSteve Wise * COPYING in the main directory of this source tree, or the 8cfdda9d7SSteve Wise * OpenIB.org BSD license below: 9cfdda9d7SSteve Wise * 10cfdda9d7SSteve Wise * Redistribution and use in source and binary forms, with or 11cfdda9d7SSteve Wise * without modification, are permitted provided that the following 12cfdda9d7SSteve Wise * conditions are met: 13cfdda9d7SSteve Wise * 14cfdda9d7SSteve Wise * - Redistributions of source code must retain the above 15cfdda9d7SSteve Wise * copyright notice, this list of conditions and the following 16cfdda9d7SSteve Wise * disclaimer. 17cfdda9d7SSteve Wise * 18cfdda9d7SSteve Wise * - Redistributions in binary form must reproduce the above 19cfdda9d7SSteve Wise * copyright notice, this list of conditions and the following 20cfdda9d7SSteve Wise * disclaimer in the documentation and/or other materials 21cfdda9d7SSteve Wise * provided with the distribution. 22cfdda9d7SSteve Wise * 23cfdda9d7SSteve Wise * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24cfdda9d7SSteve Wise * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25cfdda9d7SSteve Wise * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26cfdda9d7SSteve Wise * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27cfdda9d7SSteve Wise * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28cfdda9d7SSteve Wise * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29cfdda9d7SSteve Wise * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30cfdda9d7SSteve Wise * SOFTWARE. 31cfdda9d7SSteve Wise */ 32cfdda9d7SSteve Wise #include <linux/module.h> 33cfdda9d7SSteve Wise #include <linux/moduleparam.h> 34cfdda9d7SSteve Wise #include <linux/debugfs.h> 35e572568fSVipul Pandya #include <linux/vmalloc.h> 36cfdda9d7SSteve Wise 37cfdda9d7SSteve Wise #include <rdma/ib_verbs.h> 38cfdda9d7SSteve Wise 39cfdda9d7SSteve Wise #include "iw_cxgb4.h" 40cfdda9d7SSteve Wise 41cfdda9d7SSteve Wise #define DRV_VERSION "0.1" 42cfdda9d7SSteve Wise 43cfdda9d7SSteve Wise MODULE_AUTHOR("Steve Wise"); 44f079af7aSVipul Pandya MODULE_DESCRIPTION("Chelsio T4/T5 RDMA Driver"); 45cfdda9d7SSteve Wise MODULE_LICENSE("Dual BSD/GPL"); 46cfdda9d7SSteve Wise MODULE_VERSION(DRV_VERSION); 47cfdda9d7SSteve Wise 4880ccdd60SVipul Pandya static int allow_db_fc_on_t5; 4980ccdd60SVipul Pandya module_param(allow_db_fc_on_t5, int, 0644); 5080ccdd60SVipul Pandya MODULE_PARM_DESC(allow_db_fc_on_t5, 5180ccdd60SVipul Pandya "Allow DB Flow Control on T5 (default = 0)"); 5280ccdd60SVipul Pandya 5380ccdd60SVipul Pandya static int allow_db_coalescing_on_t5; 5480ccdd60SVipul Pandya module_param(allow_db_coalescing_on_t5, int, 0644); 5580ccdd60SVipul Pandya MODULE_PARM_DESC(allow_db_coalescing_on_t5, 5680ccdd60SVipul Pandya "Allow DB Coalescing on T5 (default = 0)"); 5780ccdd60SVipul Pandya 582c974781SVipul Pandya struct uld_ctx { 592c974781SVipul Pandya struct list_head entry; 602c974781SVipul Pandya struct cxgb4_lld_info lldi; 612c974781SVipul Pandya struct c4iw_dev *dev; 622c974781SVipul Pandya }; 632c974781SVipul Pandya 642f25e9a5SSteve Wise static LIST_HEAD(uld_ctx_list); 65cfdda9d7SSteve Wise static DEFINE_MUTEX(dev_mutex); 66cfdda9d7SSteve Wise 6705eb2389SSteve Wise #define DB_FC_RESUME_SIZE 64 6805eb2389SSteve Wise #define DB_FC_RESUME_DELAY 1 6905eb2389SSteve Wise #define DB_FC_DRAIN_THRESH 0 7005eb2389SSteve Wise 71cfdda9d7SSteve Wise static struct dentry *c4iw_debugfs_root; 72cfdda9d7SSteve Wise 739e8d1fa3SSteve Wise struct c4iw_debugfs_data { 74cfdda9d7SSteve Wise struct c4iw_dev *devp; 75cfdda9d7SSteve Wise char *buf; 76cfdda9d7SSteve Wise int bufsize; 77cfdda9d7SSteve Wise int pos; 78cfdda9d7SSteve Wise }; 79cfdda9d7SSteve Wise 809e8d1fa3SSteve Wise static int count_idrs(int id, void *p, void *data) 81cfdda9d7SSteve Wise { 82cfdda9d7SSteve Wise int *countp = data; 83cfdda9d7SSteve Wise 84cfdda9d7SSteve Wise *countp = *countp + 1; 85cfdda9d7SSteve Wise return 0; 86cfdda9d7SSteve Wise } 87cfdda9d7SSteve Wise 889e8d1fa3SSteve Wise static ssize_t debugfs_read(struct file *file, char __user *buf, size_t count, 899e8d1fa3SSteve Wise loff_t *ppos) 909e8d1fa3SSteve Wise { 919e8d1fa3SSteve Wise struct c4iw_debugfs_data *d = file->private_data; 929e8d1fa3SSteve Wise 933160977aSSteve Wise return simple_read_from_buffer(buf, count, ppos, d->buf, d->pos); 949e8d1fa3SSteve Wise } 959e8d1fa3SSteve Wise 969e8d1fa3SSteve Wise static int dump_qp(int id, void *p, void *data) 97cfdda9d7SSteve Wise { 98cfdda9d7SSteve Wise struct c4iw_qp *qp = p; 999e8d1fa3SSteve Wise struct c4iw_debugfs_data *qpd = data; 100cfdda9d7SSteve Wise int space; 101cfdda9d7SSteve Wise int cc; 102cfdda9d7SSteve Wise 103cfdda9d7SSteve Wise if (id != qp->wq.sq.qid) 104cfdda9d7SSteve Wise return 0; 105cfdda9d7SSteve Wise 106cfdda9d7SSteve Wise space = qpd->bufsize - qpd->pos - 1; 107cfdda9d7SSteve Wise if (space == 0) 108cfdda9d7SSteve Wise return 1; 109cfdda9d7SSteve Wise 110830662f6SVipul Pandya if (qp->ep) { 111830662f6SVipul Pandya if (qp->ep->com.local_addr.ss_family == AF_INET) { 112830662f6SVipul Pandya struct sockaddr_in *lsin = (struct sockaddr_in *) 113830662f6SVipul Pandya &qp->ep->com.local_addr; 114830662f6SVipul Pandya struct sockaddr_in *rsin = (struct sockaddr_in *) 115830662f6SVipul Pandya &qp->ep->com.remote_addr; 116830662f6SVipul Pandya 117db5d040dSSteve Wise cc = snprintf(qpd->buf + qpd->pos, space, 118830662f6SVipul Pandya "rc qp sq id %u rq id %u state %u " 119830662f6SVipul Pandya "onchip %u ep tid %u state %u " 120830662f6SVipul Pandya "%pI4:%u->%pI4:%u\n", 121830662f6SVipul Pandya qp->wq.sq.qid, qp->wq.rq.qid, 122830662f6SVipul Pandya (int)qp->attr.state, 123db5d040dSSteve Wise qp->wq.sq.flags & T4_SQ_ONCHIP, 124cfdda9d7SSteve Wise qp->ep->hwtid, (int)qp->ep->com.state, 125830662f6SVipul Pandya &lsin->sin_addr, ntohs(lsin->sin_port), 126830662f6SVipul Pandya &rsin->sin_addr, ntohs(rsin->sin_port)); 127830662f6SVipul Pandya } else { 128830662f6SVipul Pandya struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *) 129830662f6SVipul Pandya &qp->ep->com.local_addr; 130830662f6SVipul Pandya struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *) 131830662f6SVipul Pandya &qp->ep->com.remote_addr; 132830662f6SVipul Pandya 133830662f6SVipul Pandya cc = snprintf(qpd->buf + qpd->pos, space, 134830662f6SVipul Pandya "rc qp sq id %u rq id %u state %u " 135830662f6SVipul Pandya "onchip %u ep tid %u state %u " 136830662f6SVipul Pandya "%pI6:%u->%pI6:%u\n", 137830662f6SVipul Pandya qp->wq.sq.qid, qp->wq.rq.qid, 138830662f6SVipul Pandya (int)qp->attr.state, 139830662f6SVipul Pandya qp->wq.sq.flags & T4_SQ_ONCHIP, 140830662f6SVipul Pandya qp->ep->hwtid, (int)qp->ep->com.state, 141830662f6SVipul Pandya &lsin6->sin6_addr, 142830662f6SVipul Pandya ntohs(lsin6->sin6_port), 143830662f6SVipul Pandya &rsin6->sin6_addr, 144830662f6SVipul Pandya ntohs(rsin6->sin6_port)); 145830662f6SVipul Pandya } 146830662f6SVipul Pandya } else 147db5d040dSSteve Wise cc = snprintf(qpd->buf + qpd->pos, space, 148db5d040dSSteve Wise "qp sq id %u rq id %u state %u onchip %u\n", 149db5d040dSSteve Wise qp->wq.sq.qid, qp->wq.rq.qid, 150db5d040dSSteve Wise (int)qp->attr.state, 151db5d040dSSteve Wise qp->wq.sq.flags & T4_SQ_ONCHIP); 152cfdda9d7SSteve Wise if (cc < space) 153cfdda9d7SSteve Wise qpd->pos += cc; 154cfdda9d7SSteve Wise return 0; 155cfdda9d7SSteve Wise } 156cfdda9d7SSteve Wise 157cfdda9d7SSteve Wise static int qp_release(struct inode *inode, struct file *file) 158cfdda9d7SSteve Wise { 1599e8d1fa3SSteve Wise struct c4iw_debugfs_data *qpd = file->private_data; 160cfdda9d7SSteve Wise if (!qpd) { 161cfdda9d7SSteve Wise printk(KERN_INFO "%s null qpd?\n", __func__); 162cfdda9d7SSteve Wise return 0; 163cfdda9d7SSteve Wise } 164d716a2a0SVipul Pandya vfree(qpd->buf); 165cfdda9d7SSteve Wise kfree(qpd); 166cfdda9d7SSteve Wise return 0; 167cfdda9d7SSteve Wise } 168cfdda9d7SSteve Wise 169cfdda9d7SSteve Wise static int qp_open(struct inode *inode, struct file *file) 170cfdda9d7SSteve Wise { 1719e8d1fa3SSteve Wise struct c4iw_debugfs_data *qpd; 172cfdda9d7SSteve Wise int ret = 0; 173cfdda9d7SSteve Wise int count = 1; 174cfdda9d7SSteve Wise 175cfdda9d7SSteve Wise qpd = kmalloc(sizeof *qpd, GFP_KERNEL); 176cfdda9d7SSteve Wise if (!qpd) { 177cfdda9d7SSteve Wise ret = -ENOMEM; 178cfdda9d7SSteve Wise goto out; 179cfdda9d7SSteve Wise } 180cfdda9d7SSteve Wise qpd->devp = inode->i_private; 181cfdda9d7SSteve Wise qpd->pos = 0; 182cfdda9d7SSteve Wise 183cfdda9d7SSteve Wise spin_lock_irq(&qpd->devp->lock); 1849e8d1fa3SSteve Wise idr_for_each(&qpd->devp->qpidr, count_idrs, &count); 185cfdda9d7SSteve Wise spin_unlock_irq(&qpd->devp->lock); 186cfdda9d7SSteve Wise 187cfdda9d7SSteve Wise qpd->bufsize = count * 128; 188d716a2a0SVipul Pandya qpd->buf = vmalloc(qpd->bufsize); 189cfdda9d7SSteve Wise if (!qpd->buf) { 190cfdda9d7SSteve Wise ret = -ENOMEM; 191cfdda9d7SSteve Wise goto err1; 192cfdda9d7SSteve Wise } 193cfdda9d7SSteve Wise 194cfdda9d7SSteve Wise spin_lock_irq(&qpd->devp->lock); 1959e8d1fa3SSteve Wise idr_for_each(&qpd->devp->qpidr, dump_qp, qpd); 196cfdda9d7SSteve Wise spin_unlock_irq(&qpd->devp->lock); 197cfdda9d7SSteve Wise 198cfdda9d7SSteve Wise qpd->buf[qpd->pos++] = 0; 199cfdda9d7SSteve Wise file->private_data = qpd; 200cfdda9d7SSteve Wise goto out; 201cfdda9d7SSteve Wise err1: 202cfdda9d7SSteve Wise kfree(qpd); 203cfdda9d7SSteve Wise out: 204cfdda9d7SSteve Wise return ret; 205cfdda9d7SSteve Wise } 206cfdda9d7SSteve Wise 207cfdda9d7SSteve Wise static const struct file_operations qp_debugfs_fops = { 208cfdda9d7SSteve Wise .owner = THIS_MODULE, 209cfdda9d7SSteve Wise .open = qp_open, 210cfdda9d7SSteve Wise .release = qp_release, 2119e8d1fa3SSteve Wise .read = debugfs_read, 2128bbac892SSteve Wise .llseek = default_llseek, 2139e8d1fa3SSteve Wise }; 2149e8d1fa3SSteve Wise 2159e8d1fa3SSteve Wise static int dump_stag(int id, void *p, void *data) 2169e8d1fa3SSteve Wise { 2179e8d1fa3SSteve Wise struct c4iw_debugfs_data *stagd = data; 2189e8d1fa3SSteve Wise int space; 2199e8d1fa3SSteve Wise int cc; 2209e8d1fa3SSteve Wise 2219e8d1fa3SSteve Wise space = stagd->bufsize - stagd->pos - 1; 2229e8d1fa3SSteve Wise if (space == 0) 2239e8d1fa3SSteve Wise return 1; 2249e8d1fa3SSteve Wise 2259e8d1fa3SSteve Wise cc = snprintf(stagd->buf + stagd->pos, space, "0x%x\n", id<<8); 2269e8d1fa3SSteve Wise if (cc < space) 2279e8d1fa3SSteve Wise stagd->pos += cc; 2289e8d1fa3SSteve Wise return 0; 2299e8d1fa3SSteve Wise } 2309e8d1fa3SSteve Wise 2319e8d1fa3SSteve Wise static int stag_release(struct inode *inode, struct file *file) 2329e8d1fa3SSteve Wise { 2339e8d1fa3SSteve Wise struct c4iw_debugfs_data *stagd = file->private_data; 2349e8d1fa3SSteve Wise if (!stagd) { 2359e8d1fa3SSteve Wise printk(KERN_INFO "%s null stagd?\n", __func__); 2369e8d1fa3SSteve Wise return 0; 2379e8d1fa3SSteve Wise } 2389e8d1fa3SSteve Wise kfree(stagd->buf); 2399e8d1fa3SSteve Wise kfree(stagd); 2409e8d1fa3SSteve Wise return 0; 2419e8d1fa3SSteve Wise } 2429e8d1fa3SSteve Wise 2439e8d1fa3SSteve Wise static int stag_open(struct inode *inode, struct file *file) 2449e8d1fa3SSteve Wise { 2459e8d1fa3SSteve Wise struct c4iw_debugfs_data *stagd; 2469e8d1fa3SSteve Wise int ret = 0; 2479e8d1fa3SSteve Wise int count = 1; 2489e8d1fa3SSteve Wise 2499e8d1fa3SSteve Wise stagd = kmalloc(sizeof *stagd, GFP_KERNEL); 2509e8d1fa3SSteve Wise if (!stagd) { 2519e8d1fa3SSteve Wise ret = -ENOMEM; 2529e8d1fa3SSteve Wise goto out; 2539e8d1fa3SSteve Wise } 2549e8d1fa3SSteve Wise stagd->devp = inode->i_private; 2559e8d1fa3SSteve Wise stagd->pos = 0; 2569e8d1fa3SSteve Wise 2579e8d1fa3SSteve Wise spin_lock_irq(&stagd->devp->lock); 2589e8d1fa3SSteve Wise idr_for_each(&stagd->devp->mmidr, count_idrs, &count); 2599e8d1fa3SSteve Wise spin_unlock_irq(&stagd->devp->lock); 2609e8d1fa3SSteve Wise 2619e8d1fa3SSteve Wise stagd->bufsize = count * sizeof("0x12345678\n"); 2629e8d1fa3SSteve Wise stagd->buf = kmalloc(stagd->bufsize, GFP_KERNEL); 2639e8d1fa3SSteve Wise if (!stagd->buf) { 2649e8d1fa3SSteve Wise ret = -ENOMEM; 2659e8d1fa3SSteve Wise goto err1; 2669e8d1fa3SSteve Wise } 2679e8d1fa3SSteve Wise 2689e8d1fa3SSteve Wise spin_lock_irq(&stagd->devp->lock); 2699e8d1fa3SSteve Wise idr_for_each(&stagd->devp->mmidr, dump_stag, stagd); 2709e8d1fa3SSteve Wise spin_unlock_irq(&stagd->devp->lock); 2719e8d1fa3SSteve Wise 2729e8d1fa3SSteve Wise stagd->buf[stagd->pos++] = 0; 2739e8d1fa3SSteve Wise file->private_data = stagd; 2749e8d1fa3SSteve Wise goto out; 2759e8d1fa3SSteve Wise err1: 2769e8d1fa3SSteve Wise kfree(stagd); 2779e8d1fa3SSteve Wise out: 2789e8d1fa3SSteve Wise return ret; 2799e8d1fa3SSteve Wise } 2809e8d1fa3SSteve Wise 2819e8d1fa3SSteve Wise static const struct file_operations stag_debugfs_fops = { 2829e8d1fa3SSteve Wise .owner = THIS_MODULE, 2839e8d1fa3SSteve Wise .open = stag_open, 2849e8d1fa3SSteve Wise .release = stag_release, 2859e8d1fa3SSteve Wise .read = debugfs_read, 2868bbac892SSteve Wise .llseek = default_llseek, 287cfdda9d7SSteve Wise }; 288cfdda9d7SSteve Wise 28905eb2389SSteve Wise static char *db_state_str[] = {"NORMAL", "FLOW_CONTROL", "RECOVERY", "STOPPED"}; 290422eea0aSVipul Pandya 2918d81ef34SVipul Pandya static int stats_show(struct seq_file *seq, void *v) 2928d81ef34SVipul Pandya { 2938d81ef34SVipul Pandya struct c4iw_dev *dev = seq->private; 2948d81ef34SVipul Pandya 295ec3eead2SVipul Pandya seq_printf(seq, " Object: %10s %10s %10s %10s\n", "Total", "Current", 296ec3eead2SVipul Pandya "Max", "Fail"); 297ec3eead2SVipul Pandya seq_printf(seq, " PDID: %10llu %10llu %10llu %10llu\n", 2988d81ef34SVipul Pandya dev->rdev.stats.pd.total, dev->rdev.stats.pd.cur, 299ec3eead2SVipul Pandya dev->rdev.stats.pd.max, dev->rdev.stats.pd.fail); 300ec3eead2SVipul Pandya seq_printf(seq, " QID: %10llu %10llu %10llu %10llu\n", 3018d81ef34SVipul Pandya dev->rdev.stats.qid.total, dev->rdev.stats.qid.cur, 302ec3eead2SVipul Pandya dev->rdev.stats.qid.max, dev->rdev.stats.qid.fail); 303ec3eead2SVipul Pandya seq_printf(seq, " TPTMEM: %10llu %10llu %10llu %10llu\n", 3048d81ef34SVipul Pandya dev->rdev.stats.stag.total, dev->rdev.stats.stag.cur, 305ec3eead2SVipul Pandya dev->rdev.stats.stag.max, dev->rdev.stats.stag.fail); 306ec3eead2SVipul Pandya seq_printf(seq, " PBLMEM: %10llu %10llu %10llu %10llu\n", 3078d81ef34SVipul Pandya dev->rdev.stats.pbl.total, dev->rdev.stats.pbl.cur, 308ec3eead2SVipul Pandya dev->rdev.stats.pbl.max, dev->rdev.stats.pbl.fail); 309ec3eead2SVipul Pandya seq_printf(seq, " RQTMEM: %10llu %10llu %10llu %10llu\n", 3108d81ef34SVipul Pandya dev->rdev.stats.rqt.total, dev->rdev.stats.rqt.cur, 311ec3eead2SVipul Pandya dev->rdev.stats.rqt.max, dev->rdev.stats.rqt.fail); 312ec3eead2SVipul Pandya seq_printf(seq, " OCQPMEM: %10llu %10llu %10llu %10llu\n", 3138d81ef34SVipul Pandya dev->rdev.stats.ocqp.total, dev->rdev.stats.ocqp.cur, 314ec3eead2SVipul Pandya dev->rdev.stats.ocqp.max, dev->rdev.stats.ocqp.fail); 3152c974781SVipul Pandya seq_printf(seq, " DB FULL: %10llu\n", dev->rdev.stats.db_full); 3162c974781SVipul Pandya seq_printf(seq, " DB EMPTY: %10llu\n", dev->rdev.stats.db_empty); 3172c974781SVipul Pandya seq_printf(seq, " DB DROP: %10llu\n", dev->rdev.stats.db_drop); 31805eb2389SSteve Wise seq_printf(seq, " DB State: %s Transitions %llu FC Interruptions %llu\n", 319422eea0aSVipul Pandya db_state_str[dev->db_state], 32005eb2389SSteve Wise dev->rdev.stats.db_state_transitions, 32105eb2389SSteve Wise dev->rdev.stats.db_fc_interruptions); 3221cab775cSVipul Pandya seq_printf(seq, "TCAM_FULL: %10llu\n", dev->rdev.stats.tcam_full); 323793dad94SVipul Pandya seq_printf(seq, "ACT_OFLD_CONN_FAILS: %10llu\n", 324793dad94SVipul Pandya dev->rdev.stats.act_ofld_conn_fails); 325793dad94SVipul Pandya seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n", 326793dad94SVipul Pandya dev->rdev.stats.pas_ofld_conn_fails); 3278d81ef34SVipul Pandya return 0; 3288d81ef34SVipul Pandya } 3298d81ef34SVipul Pandya 3308d81ef34SVipul Pandya static int stats_open(struct inode *inode, struct file *file) 3318d81ef34SVipul Pandya { 3328d81ef34SVipul Pandya return single_open(file, stats_show, inode->i_private); 3338d81ef34SVipul Pandya } 3348d81ef34SVipul Pandya 3358d81ef34SVipul Pandya static ssize_t stats_clear(struct file *file, const char __user *buf, 3368d81ef34SVipul Pandya size_t count, loff_t *pos) 3378d81ef34SVipul Pandya { 3388d81ef34SVipul Pandya struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private; 3398d81ef34SVipul Pandya 3408d81ef34SVipul Pandya mutex_lock(&dev->rdev.stats.lock); 3418d81ef34SVipul Pandya dev->rdev.stats.pd.max = 0; 342ec3eead2SVipul Pandya dev->rdev.stats.pd.fail = 0; 3438d81ef34SVipul Pandya dev->rdev.stats.qid.max = 0; 344ec3eead2SVipul Pandya dev->rdev.stats.qid.fail = 0; 3458d81ef34SVipul Pandya dev->rdev.stats.stag.max = 0; 346ec3eead2SVipul Pandya dev->rdev.stats.stag.fail = 0; 3478d81ef34SVipul Pandya dev->rdev.stats.pbl.max = 0; 348ec3eead2SVipul Pandya dev->rdev.stats.pbl.fail = 0; 3498d81ef34SVipul Pandya dev->rdev.stats.rqt.max = 0; 350ec3eead2SVipul Pandya dev->rdev.stats.rqt.fail = 0; 3518d81ef34SVipul Pandya dev->rdev.stats.ocqp.max = 0; 352ec3eead2SVipul Pandya dev->rdev.stats.ocqp.fail = 0; 3532c974781SVipul Pandya dev->rdev.stats.db_full = 0; 3542c974781SVipul Pandya dev->rdev.stats.db_empty = 0; 3552c974781SVipul Pandya dev->rdev.stats.db_drop = 0; 356422eea0aSVipul Pandya dev->rdev.stats.db_state_transitions = 0; 357793dad94SVipul Pandya dev->rdev.stats.tcam_full = 0; 358793dad94SVipul Pandya dev->rdev.stats.act_ofld_conn_fails = 0; 359793dad94SVipul Pandya dev->rdev.stats.pas_ofld_conn_fails = 0; 3608d81ef34SVipul Pandya mutex_unlock(&dev->rdev.stats.lock); 3618d81ef34SVipul Pandya return count; 3628d81ef34SVipul Pandya } 3638d81ef34SVipul Pandya 3648d81ef34SVipul Pandya static const struct file_operations stats_debugfs_fops = { 3658d81ef34SVipul Pandya .owner = THIS_MODULE, 3668d81ef34SVipul Pandya .open = stats_open, 3678d81ef34SVipul Pandya .release = single_release, 3688d81ef34SVipul Pandya .read = seq_read, 3698d81ef34SVipul Pandya .llseek = seq_lseek, 3708d81ef34SVipul Pandya .write = stats_clear, 3718d81ef34SVipul Pandya }; 3728d81ef34SVipul Pandya 373793dad94SVipul Pandya static int dump_ep(int id, void *p, void *data) 374793dad94SVipul Pandya { 375793dad94SVipul Pandya struct c4iw_ep *ep = p; 376793dad94SVipul Pandya struct c4iw_debugfs_data *epd = data; 377793dad94SVipul Pandya int space; 378793dad94SVipul Pandya int cc; 379793dad94SVipul Pandya 380793dad94SVipul Pandya space = epd->bufsize - epd->pos - 1; 381793dad94SVipul Pandya if (space == 0) 382793dad94SVipul Pandya return 1; 383793dad94SVipul Pandya 384830662f6SVipul Pandya if (ep->com.local_addr.ss_family == AF_INET) { 385830662f6SVipul Pandya struct sockaddr_in *lsin = (struct sockaddr_in *) 386830662f6SVipul Pandya &ep->com.local_addr; 387830662f6SVipul Pandya struct sockaddr_in *rsin = (struct sockaddr_in *) 388830662f6SVipul Pandya &ep->com.remote_addr; 389830662f6SVipul Pandya 390793dad94SVipul Pandya cc = snprintf(epd->buf + epd->pos, space, 391830662f6SVipul Pandya "ep %p cm_id %p qp %p state %d flags 0x%lx " 392830662f6SVipul Pandya "history 0x%lx hwtid %d atid %d " 393830662f6SVipul Pandya "%pI4:%d <-> %pI4:%d\n", 394830662f6SVipul Pandya ep, ep->com.cm_id, ep->com.qp, 395830662f6SVipul Pandya (int)ep->com.state, ep->com.flags, 396830662f6SVipul Pandya ep->com.history, ep->hwtid, ep->atid, 397830662f6SVipul Pandya &lsin->sin_addr, ntohs(lsin->sin_port), 398830662f6SVipul Pandya &rsin->sin_addr, ntohs(rsin->sin_port)); 399830662f6SVipul Pandya } else { 400830662f6SVipul Pandya struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *) 401830662f6SVipul Pandya &ep->com.local_addr; 402830662f6SVipul Pandya struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *) 403830662f6SVipul Pandya &ep->com.remote_addr; 404830662f6SVipul Pandya 405830662f6SVipul Pandya cc = snprintf(epd->buf + epd->pos, space, 406830662f6SVipul Pandya "ep %p cm_id %p qp %p state %d flags 0x%lx " 407830662f6SVipul Pandya "history 0x%lx hwtid %d atid %d " 408830662f6SVipul Pandya "%pI6:%d <-> %pI6:%d\n", 409830662f6SVipul Pandya ep, ep->com.cm_id, ep->com.qp, 410830662f6SVipul Pandya (int)ep->com.state, ep->com.flags, 411830662f6SVipul Pandya ep->com.history, ep->hwtid, ep->atid, 412830662f6SVipul Pandya &lsin6->sin6_addr, ntohs(lsin6->sin6_port), 413830662f6SVipul Pandya &rsin6->sin6_addr, ntohs(rsin6->sin6_port)); 414830662f6SVipul Pandya } 415793dad94SVipul Pandya if (cc < space) 416793dad94SVipul Pandya epd->pos += cc; 417793dad94SVipul Pandya return 0; 418793dad94SVipul Pandya } 419793dad94SVipul Pandya 420793dad94SVipul Pandya static int dump_listen_ep(int id, void *p, void *data) 421793dad94SVipul Pandya { 422793dad94SVipul Pandya struct c4iw_listen_ep *ep = p; 423793dad94SVipul Pandya struct c4iw_debugfs_data *epd = data; 424793dad94SVipul Pandya int space; 425793dad94SVipul Pandya int cc; 426793dad94SVipul Pandya 427793dad94SVipul Pandya space = epd->bufsize - epd->pos - 1; 428793dad94SVipul Pandya if (space == 0) 429793dad94SVipul Pandya return 1; 430793dad94SVipul Pandya 431830662f6SVipul Pandya if (ep->com.local_addr.ss_family == AF_INET) { 432830662f6SVipul Pandya struct sockaddr_in *lsin = (struct sockaddr_in *) 433830662f6SVipul Pandya &ep->com.local_addr; 434830662f6SVipul Pandya 435793dad94SVipul Pandya cc = snprintf(epd->buf + epd->pos, space, 436830662f6SVipul Pandya "ep %p cm_id %p state %d flags 0x%lx stid %d " 437830662f6SVipul Pandya "backlog %d %pI4:%d\n", 438830662f6SVipul Pandya ep, ep->com.cm_id, (int)ep->com.state, 439793dad94SVipul Pandya ep->com.flags, ep->stid, ep->backlog, 440830662f6SVipul Pandya &lsin->sin_addr, ntohs(lsin->sin_port)); 441830662f6SVipul Pandya } else { 442830662f6SVipul Pandya struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *) 443830662f6SVipul Pandya &ep->com.local_addr; 444830662f6SVipul Pandya 445830662f6SVipul Pandya cc = snprintf(epd->buf + epd->pos, space, 446830662f6SVipul Pandya "ep %p cm_id %p state %d flags 0x%lx stid %d " 447830662f6SVipul Pandya "backlog %d %pI6:%d\n", 448830662f6SVipul Pandya ep, ep->com.cm_id, (int)ep->com.state, 449830662f6SVipul Pandya ep->com.flags, ep->stid, ep->backlog, 450830662f6SVipul Pandya &lsin6->sin6_addr, ntohs(lsin6->sin6_port)); 451830662f6SVipul Pandya } 452793dad94SVipul Pandya if (cc < space) 453793dad94SVipul Pandya epd->pos += cc; 454793dad94SVipul Pandya return 0; 455793dad94SVipul Pandya } 456793dad94SVipul Pandya 457793dad94SVipul Pandya static int ep_release(struct inode *inode, struct file *file) 458793dad94SVipul Pandya { 459793dad94SVipul Pandya struct c4iw_debugfs_data *epd = file->private_data; 460793dad94SVipul Pandya if (!epd) { 461793dad94SVipul Pandya pr_info("%s null qpd?\n", __func__); 462793dad94SVipul Pandya return 0; 463793dad94SVipul Pandya } 464793dad94SVipul Pandya vfree(epd->buf); 465793dad94SVipul Pandya kfree(epd); 466793dad94SVipul Pandya return 0; 467793dad94SVipul Pandya } 468793dad94SVipul Pandya 469793dad94SVipul Pandya static int ep_open(struct inode *inode, struct file *file) 470793dad94SVipul Pandya { 471793dad94SVipul Pandya struct c4iw_debugfs_data *epd; 472793dad94SVipul Pandya int ret = 0; 473793dad94SVipul Pandya int count = 1; 474793dad94SVipul Pandya 475793dad94SVipul Pandya epd = kmalloc(sizeof(*epd), GFP_KERNEL); 476793dad94SVipul Pandya if (!epd) { 477793dad94SVipul Pandya ret = -ENOMEM; 478793dad94SVipul Pandya goto out; 479793dad94SVipul Pandya } 480793dad94SVipul Pandya epd->devp = inode->i_private; 481793dad94SVipul Pandya epd->pos = 0; 482793dad94SVipul Pandya 483793dad94SVipul Pandya spin_lock_irq(&epd->devp->lock); 484793dad94SVipul Pandya idr_for_each(&epd->devp->hwtid_idr, count_idrs, &count); 485793dad94SVipul Pandya idr_for_each(&epd->devp->atid_idr, count_idrs, &count); 486793dad94SVipul Pandya idr_for_each(&epd->devp->stid_idr, count_idrs, &count); 487793dad94SVipul Pandya spin_unlock_irq(&epd->devp->lock); 488793dad94SVipul Pandya 489793dad94SVipul Pandya epd->bufsize = count * 160; 490793dad94SVipul Pandya epd->buf = vmalloc(epd->bufsize); 491793dad94SVipul Pandya if (!epd->buf) { 492793dad94SVipul Pandya ret = -ENOMEM; 493793dad94SVipul Pandya goto err1; 494793dad94SVipul Pandya } 495793dad94SVipul Pandya 496793dad94SVipul Pandya spin_lock_irq(&epd->devp->lock); 497793dad94SVipul Pandya idr_for_each(&epd->devp->hwtid_idr, dump_ep, epd); 498793dad94SVipul Pandya idr_for_each(&epd->devp->atid_idr, dump_ep, epd); 499793dad94SVipul Pandya idr_for_each(&epd->devp->stid_idr, dump_listen_ep, epd); 500793dad94SVipul Pandya spin_unlock_irq(&epd->devp->lock); 501793dad94SVipul Pandya 502793dad94SVipul Pandya file->private_data = epd; 503793dad94SVipul Pandya goto out; 504793dad94SVipul Pandya err1: 505793dad94SVipul Pandya kfree(epd); 506793dad94SVipul Pandya out: 507793dad94SVipul Pandya return ret; 508793dad94SVipul Pandya } 509793dad94SVipul Pandya 510793dad94SVipul Pandya static const struct file_operations ep_debugfs_fops = { 511793dad94SVipul Pandya .owner = THIS_MODULE, 512793dad94SVipul Pandya .open = ep_open, 513793dad94SVipul Pandya .release = ep_release, 514793dad94SVipul Pandya .read = debugfs_read, 515793dad94SVipul Pandya }; 516793dad94SVipul Pandya 517cfdda9d7SSteve Wise static int setup_debugfs(struct c4iw_dev *devp) 518cfdda9d7SSteve Wise { 519cfdda9d7SSteve Wise struct dentry *de; 520cfdda9d7SSteve Wise 521cfdda9d7SSteve Wise if (!devp->debugfs_root) 522cfdda9d7SSteve Wise return -1; 523cfdda9d7SSteve Wise 524cfdda9d7SSteve Wise de = debugfs_create_file("qps", S_IWUSR, devp->debugfs_root, 525cfdda9d7SSteve Wise (void *)devp, &qp_debugfs_fops); 526cfdda9d7SSteve Wise if (de && de->d_inode) 527cfdda9d7SSteve Wise de->d_inode->i_size = 4096; 5289e8d1fa3SSteve Wise 5299e8d1fa3SSteve Wise de = debugfs_create_file("stags", S_IWUSR, devp->debugfs_root, 5309e8d1fa3SSteve Wise (void *)devp, &stag_debugfs_fops); 5319e8d1fa3SSteve Wise if (de && de->d_inode) 5329e8d1fa3SSteve Wise de->d_inode->i_size = 4096; 5338d81ef34SVipul Pandya 5348d81ef34SVipul Pandya de = debugfs_create_file("stats", S_IWUSR, devp->debugfs_root, 5358d81ef34SVipul Pandya (void *)devp, &stats_debugfs_fops); 5368d81ef34SVipul Pandya if (de && de->d_inode) 5378d81ef34SVipul Pandya de->d_inode->i_size = 4096; 5388d81ef34SVipul Pandya 539793dad94SVipul Pandya de = debugfs_create_file("eps", S_IWUSR, devp->debugfs_root, 540793dad94SVipul Pandya (void *)devp, &ep_debugfs_fops); 541793dad94SVipul Pandya if (de && de->d_inode) 542793dad94SVipul Pandya de->d_inode->i_size = 4096; 543793dad94SVipul Pandya 544cfdda9d7SSteve Wise return 0; 545cfdda9d7SSteve Wise } 546cfdda9d7SSteve Wise 547cfdda9d7SSteve Wise void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev, 548cfdda9d7SSteve Wise struct c4iw_dev_ucontext *uctx) 549cfdda9d7SSteve Wise { 550cfdda9d7SSteve Wise struct list_head *pos, *nxt; 551cfdda9d7SSteve Wise struct c4iw_qid_list *entry; 552cfdda9d7SSteve Wise 553cfdda9d7SSteve Wise mutex_lock(&uctx->lock); 554cfdda9d7SSteve Wise list_for_each_safe(pos, nxt, &uctx->qpids) { 555cfdda9d7SSteve Wise entry = list_entry(pos, struct c4iw_qid_list, entry); 556cfdda9d7SSteve Wise list_del_init(&entry->entry); 5578d81ef34SVipul Pandya if (!(entry->qid & rdev->qpmask)) { 558ec3eead2SVipul Pandya c4iw_put_resource(&rdev->resource.qid_table, 559ec3eead2SVipul Pandya entry->qid); 5608d81ef34SVipul Pandya mutex_lock(&rdev->stats.lock); 5618d81ef34SVipul Pandya rdev->stats.qid.cur -= rdev->qpmask + 1; 5628d81ef34SVipul Pandya mutex_unlock(&rdev->stats.lock); 5638d81ef34SVipul Pandya } 564cfdda9d7SSteve Wise kfree(entry); 565cfdda9d7SSteve Wise } 566cfdda9d7SSteve Wise 567cfdda9d7SSteve Wise list_for_each_safe(pos, nxt, &uctx->qpids) { 568cfdda9d7SSteve Wise entry = list_entry(pos, struct c4iw_qid_list, entry); 569cfdda9d7SSteve Wise list_del_init(&entry->entry); 570cfdda9d7SSteve Wise kfree(entry); 571cfdda9d7SSteve Wise } 572cfdda9d7SSteve Wise mutex_unlock(&uctx->lock); 573cfdda9d7SSteve Wise } 574cfdda9d7SSteve Wise 575cfdda9d7SSteve Wise void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev, 576cfdda9d7SSteve Wise struct c4iw_dev_ucontext *uctx) 577cfdda9d7SSteve Wise { 578cfdda9d7SSteve Wise INIT_LIST_HEAD(&uctx->qpids); 579cfdda9d7SSteve Wise INIT_LIST_HEAD(&uctx->cqids); 580cfdda9d7SSteve Wise mutex_init(&uctx->lock); 581cfdda9d7SSteve Wise } 582cfdda9d7SSteve Wise 583cfdda9d7SSteve Wise /* Caller takes care of locking if needed */ 584cfdda9d7SSteve Wise static int c4iw_rdev_open(struct c4iw_rdev *rdev) 585cfdda9d7SSteve Wise { 586cfdda9d7SSteve Wise int err; 587cfdda9d7SSteve Wise 588cfdda9d7SSteve Wise c4iw_init_dev_ucontext(rdev, &rdev->uctx); 589cfdda9d7SSteve Wise 590cfdda9d7SSteve Wise /* 591cfdda9d7SSteve Wise * qpshift is the number of bits to shift the qpid left in order 592cfdda9d7SSteve Wise * to get the correct address of the doorbell for that qp. 593cfdda9d7SSteve Wise */ 594cfdda9d7SSteve Wise rdev->qpshift = PAGE_SHIFT - ilog2(rdev->lldi.udb_density); 595cfdda9d7SSteve Wise rdev->qpmask = rdev->lldi.udb_density - 1; 596cfdda9d7SSteve Wise rdev->cqshift = PAGE_SHIFT - ilog2(rdev->lldi.ucq_density); 597cfdda9d7SSteve Wise rdev->cqmask = rdev->lldi.ucq_density - 1; 598cfdda9d7SSteve Wise PDBG("%s dev %s stag start 0x%0x size 0x%0x num stags %d " 59993fb72e4SSteve Wise "pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x " 60093fb72e4SSteve Wise "qp qid start %u size %u cq qid start %u size %u\n", 601cfdda9d7SSteve Wise __func__, pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start, 602cfdda9d7SSteve Wise rdev->lldi.vr->stag.size, c4iw_num_stags(rdev), 603cfdda9d7SSteve Wise rdev->lldi.vr->pbl.start, 604cfdda9d7SSteve Wise rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start, 60593fb72e4SSteve Wise rdev->lldi.vr->rq.size, 60693fb72e4SSteve Wise rdev->lldi.vr->qp.start, 60793fb72e4SSteve Wise rdev->lldi.vr->qp.size, 60893fb72e4SSteve Wise rdev->lldi.vr->cq.start, 60993fb72e4SSteve Wise rdev->lldi.vr->cq.size); 610649fb5ecSBen Hutchings PDBG("udb len 0x%x udb base %llx db_reg %p gts_reg %p qpshift %lu " 611cfdda9d7SSteve Wise "qpmask 0x%x cqshift %lu cqmask 0x%x\n", 612cfdda9d7SSteve Wise (unsigned)pci_resource_len(rdev->lldi.pdev, 2), 613649fb5ecSBen Hutchings (u64)pci_resource_start(rdev->lldi.pdev, 2), 614cfdda9d7SSteve Wise rdev->lldi.db_reg, 615cfdda9d7SSteve Wise rdev->lldi.gts_reg, 616cfdda9d7SSteve Wise rdev->qpshift, rdev->qpmask, 617cfdda9d7SSteve Wise rdev->cqshift, rdev->cqmask); 618cfdda9d7SSteve Wise 619cfdda9d7SSteve Wise if (c4iw_num_stags(rdev) == 0) { 620cfdda9d7SSteve Wise err = -EINVAL; 621cfdda9d7SSteve Wise goto err1; 622cfdda9d7SSteve Wise } 623cfdda9d7SSteve Wise 6248d81ef34SVipul Pandya rdev->stats.pd.total = T4_MAX_NUM_PD; 6258d81ef34SVipul Pandya rdev->stats.stag.total = rdev->lldi.vr->stag.size; 6268d81ef34SVipul Pandya rdev->stats.pbl.total = rdev->lldi.vr->pbl.size; 6278d81ef34SVipul Pandya rdev->stats.rqt.total = rdev->lldi.vr->rq.size; 6288d81ef34SVipul Pandya rdev->stats.ocqp.total = rdev->lldi.vr->ocq.size; 6298d81ef34SVipul Pandya rdev->stats.qid.total = rdev->lldi.vr->qp.size; 6308d81ef34SVipul Pandya 631cfdda9d7SSteve Wise err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD); 632cfdda9d7SSteve Wise if (err) { 633cfdda9d7SSteve Wise printk(KERN_ERR MOD "error %d initializing resources\n", err); 634cfdda9d7SSteve Wise goto err1; 635cfdda9d7SSteve Wise } 636cfdda9d7SSteve Wise err = c4iw_pblpool_create(rdev); 637cfdda9d7SSteve Wise if (err) { 638cfdda9d7SSteve Wise printk(KERN_ERR MOD "error %d initializing pbl pool\n", err); 639cfdda9d7SSteve Wise goto err2; 640cfdda9d7SSteve Wise } 641cfdda9d7SSteve Wise err = c4iw_rqtpool_create(rdev); 642cfdda9d7SSteve Wise if (err) { 643cfdda9d7SSteve Wise printk(KERN_ERR MOD "error %d initializing rqt pool\n", err); 644cfdda9d7SSteve Wise goto err3; 645cfdda9d7SSteve Wise } 646c6d7b267SSteve Wise err = c4iw_ocqp_pool_create(rdev); 647c6d7b267SSteve Wise if (err) { 648c6d7b267SSteve Wise printk(KERN_ERR MOD "error %d initializing ocqp pool\n", err); 649c6d7b267SSteve Wise goto err4; 650c6d7b267SSteve Wise } 65105eb2389SSteve Wise rdev->status_page = (struct t4_dev_status_page *) 65205eb2389SSteve Wise __get_free_page(GFP_KERNEL); 65305eb2389SSteve Wise if (!rdev->status_page) { 65405eb2389SSteve Wise pr_err(MOD "error allocating status page\n"); 65505eb2389SSteve Wise goto err4; 65605eb2389SSteve Wise } 657cfdda9d7SSteve Wise return 0; 658c6d7b267SSteve Wise err4: 659c6d7b267SSteve Wise c4iw_rqtpool_destroy(rdev); 660cfdda9d7SSteve Wise err3: 661cfdda9d7SSteve Wise c4iw_pblpool_destroy(rdev); 662cfdda9d7SSteve Wise err2: 663cfdda9d7SSteve Wise c4iw_destroy_resource(&rdev->resource); 664cfdda9d7SSteve Wise err1: 665cfdda9d7SSteve Wise return err; 666cfdda9d7SSteve Wise } 667cfdda9d7SSteve Wise 668cfdda9d7SSteve Wise static void c4iw_rdev_close(struct c4iw_rdev *rdev) 669cfdda9d7SSteve Wise { 67005eb2389SSteve Wise free_page((unsigned long)rdev->status_page); 671cfdda9d7SSteve Wise c4iw_pblpool_destroy(rdev); 672cfdda9d7SSteve Wise c4iw_rqtpool_destroy(rdev); 673cfdda9d7SSteve Wise c4iw_destroy_resource(&rdev->resource); 674cfdda9d7SSteve Wise } 675cfdda9d7SSteve Wise 6769efe10a1SSteve Wise static void c4iw_dealloc(struct uld_ctx *ctx) 677cfdda9d7SSteve Wise { 6782f25e9a5SSteve Wise c4iw_rdev_close(&ctx->dev->rdev); 6792f25e9a5SSteve Wise idr_destroy(&ctx->dev->cqidr); 6802f25e9a5SSteve Wise idr_destroy(&ctx->dev->qpidr); 6812f25e9a5SSteve Wise idr_destroy(&ctx->dev->mmidr); 682793dad94SVipul Pandya idr_destroy(&ctx->dev->hwtid_idr); 683793dad94SVipul Pandya idr_destroy(&ctx->dev->stid_idr); 684793dad94SVipul Pandya idr_destroy(&ctx->dev->atid_idr); 685fa658a98SSteve Wise if (ctx->dev->rdev.bar2_kva) 686fa658a98SSteve Wise iounmap(ctx->dev->rdev.bar2_kva); 687fa658a98SSteve Wise if (ctx->dev->rdev.oc_mw_kva) 6882f25e9a5SSteve Wise iounmap(ctx->dev->rdev.oc_mw_kva); 6892f25e9a5SSteve Wise ib_dealloc_device(&ctx->dev->ibdev); 6902f25e9a5SSteve Wise ctx->dev = NULL; 691cfdda9d7SSteve Wise } 692cfdda9d7SSteve Wise 6939efe10a1SSteve Wise static void c4iw_remove(struct uld_ctx *ctx) 6949efe10a1SSteve Wise { 6959efe10a1SSteve Wise PDBG("%s c4iw_dev %p\n", __func__, ctx->dev); 6969efe10a1SSteve Wise c4iw_unregister_device(ctx->dev); 6979efe10a1SSteve Wise c4iw_dealloc(ctx); 6989efe10a1SSteve Wise } 6999efe10a1SSteve Wise 7009efe10a1SSteve Wise static int rdma_supported(const struct cxgb4_lld_info *infop) 7019efe10a1SSteve Wise { 7029efe10a1SSteve Wise return infop->vr->stag.size > 0 && infop->vr->pbl.size > 0 && 7039efe10a1SSteve Wise infop->vr->rq.size > 0 && infop->vr->qp.size > 0 && 704f079af7aSVipul Pandya infop->vr->cq.size > 0; 7059efe10a1SSteve Wise } 7069efe10a1SSteve Wise 707cfdda9d7SSteve Wise static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) 708cfdda9d7SSteve Wise { 709cfdda9d7SSteve Wise struct c4iw_dev *devp; 710cfdda9d7SSteve Wise int ret; 711cfdda9d7SSteve Wise 7129efe10a1SSteve Wise if (!rdma_supported(infop)) { 7139efe10a1SSteve Wise printk(KERN_INFO MOD "%s: RDMA not supported on this device.\n", 7149efe10a1SSteve Wise pci_name(infop->pdev)); 7159efe10a1SSteve Wise return ERR_PTR(-ENOSYS); 7169efe10a1SSteve Wise } 717f079af7aSVipul Pandya if (!ocqp_supported(infop)) 718f079af7aSVipul Pandya pr_info("%s: On-Chip Queues not supported on this device.\n", 719f079af7aSVipul Pandya pci_name(infop->pdev)); 72080ccdd60SVipul Pandya 721cfdda9d7SSteve Wise devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp)); 722cfdda9d7SSteve Wise if (!devp) { 723cfdda9d7SSteve Wise printk(KERN_ERR MOD "Cannot allocate ib device\n"); 724bbe9a0a2SSteve Wise return ERR_PTR(-ENOMEM); 725cfdda9d7SSteve Wise } 726cfdda9d7SSteve Wise devp->rdev.lldi = *infop; 727cfdda9d7SSteve Wise 728fa658a98SSteve Wise /* 729fa658a98SSteve Wise * For T5 devices, we map all of BAR2 with WC. 730fa658a98SSteve Wise * For T4 devices with onchip qp mem, we map only that part 731fa658a98SSteve Wise * of BAR2 with WC. 732fa658a98SSteve Wise */ 733fa658a98SSteve Wise devp->rdev.bar2_pa = pci_resource_start(devp->rdev.lldi.pdev, 2); 734fa658a98SSteve Wise if (is_t5(devp->rdev.lldi.adapter_type)) { 735fa658a98SSteve Wise devp->rdev.bar2_kva = ioremap_wc(devp->rdev.bar2_pa, 736fa658a98SSteve Wise pci_resource_len(devp->rdev.lldi.pdev, 2)); 737fa658a98SSteve Wise if (!devp->rdev.bar2_kva) { 738fa658a98SSteve Wise pr_err(MOD "Unable to ioremap BAR2\n"); 73965b302adSChristoph Jaeger ib_dealloc_device(&devp->ibdev); 740fa658a98SSteve Wise return ERR_PTR(-EINVAL); 741fa658a98SSteve Wise } 742fa658a98SSteve Wise } else if (ocqp_supported(infop)) { 743fa658a98SSteve Wise devp->rdev.oc_mw_pa = 744fa658a98SSteve Wise pci_resource_start(devp->rdev.lldi.pdev, 2) + 745fa658a98SSteve Wise pci_resource_len(devp->rdev.lldi.pdev, 2) - 746fa658a98SSteve Wise roundup_pow_of_two(devp->rdev.lldi.vr->ocq.size); 747c6d7b267SSteve Wise devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa, 748c6d7b267SSteve Wise devp->rdev.lldi.vr->ocq.size); 749fa658a98SSteve Wise if (!devp->rdev.oc_mw_kva) { 750fa658a98SSteve Wise pr_err(MOD "Unable to ioremap onchip mem\n"); 75165b302adSChristoph Jaeger ib_dealloc_device(&devp->ibdev); 752fa658a98SSteve Wise return ERR_PTR(-EINVAL); 753fa658a98SSteve Wise } 754fa658a98SSteve Wise } 755c6d7b267SSteve Wise 7562f25e9a5SSteve Wise PDBG(KERN_INFO MOD "ocq memory: " 757c6d7b267SSteve Wise "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n", 758c6d7b267SSteve Wise devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size, 759c6d7b267SSteve Wise devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva); 760c6d7b267SSteve Wise 761cfdda9d7SSteve Wise ret = c4iw_rdev_open(&devp->rdev); 762cfdda9d7SSteve Wise if (ret) { 763cfdda9d7SSteve Wise printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret); 764cfdda9d7SSteve Wise ib_dealloc_device(&devp->ibdev); 765bbe9a0a2SSteve Wise return ERR_PTR(ret); 766cfdda9d7SSteve Wise } 767cfdda9d7SSteve Wise 768cfdda9d7SSteve Wise idr_init(&devp->cqidr); 769cfdda9d7SSteve Wise idr_init(&devp->qpidr); 770cfdda9d7SSteve Wise idr_init(&devp->mmidr); 771793dad94SVipul Pandya idr_init(&devp->hwtid_idr); 772793dad94SVipul Pandya idr_init(&devp->stid_idr); 773793dad94SVipul Pandya idr_init(&devp->atid_idr); 774cfdda9d7SSteve Wise spin_lock_init(&devp->lock); 7758d81ef34SVipul Pandya mutex_init(&devp->rdev.stats.lock); 7762c974781SVipul Pandya mutex_init(&devp->db_mutex); 77705eb2389SSteve Wise INIT_LIST_HEAD(&devp->db_fc_list); 778cfdda9d7SSteve Wise 779cfdda9d7SSteve Wise if (c4iw_debugfs_root) { 780cfdda9d7SSteve Wise devp->debugfs_root = debugfs_create_dir( 781cfdda9d7SSteve Wise pci_name(devp->rdev.lldi.pdev), 782cfdda9d7SSteve Wise c4iw_debugfs_root); 783cfdda9d7SSteve Wise setup_debugfs(devp); 784cfdda9d7SSteve Wise } 785cfdda9d7SSteve Wise return devp; 786cfdda9d7SSteve Wise } 787cfdda9d7SSteve Wise 788cfdda9d7SSteve Wise static void *c4iw_uld_add(const struct cxgb4_lld_info *infop) 789cfdda9d7SSteve Wise { 7902f25e9a5SSteve Wise struct uld_ctx *ctx; 791cfdda9d7SSteve Wise static int vers_printed; 792cfdda9d7SSteve Wise int i; 793cfdda9d7SSteve Wise 794cfdda9d7SSteve Wise if (!vers_printed++) 795f079af7aSVipul Pandya pr_info("Chelsio T4/T5 RDMA Driver - version %s\n", 796cfdda9d7SSteve Wise DRV_VERSION); 797cfdda9d7SSteve Wise 7982f25e9a5SSteve Wise ctx = kzalloc(sizeof *ctx, GFP_KERNEL); 7992f25e9a5SSteve Wise if (!ctx) { 8002f25e9a5SSteve Wise ctx = ERR_PTR(-ENOMEM); 801cfdda9d7SSteve Wise goto out; 8022f25e9a5SSteve Wise } 8032f25e9a5SSteve Wise ctx->lldi = *infop; 804cfdda9d7SSteve Wise 805cfdda9d7SSteve Wise PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n", 8062f25e9a5SSteve Wise __func__, pci_name(ctx->lldi.pdev), 8072f25e9a5SSteve Wise ctx->lldi.nchan, ctx->lldi.nrxq, 8082f25e9a5SSteve Wise ctx->lldi.ntxq, ctx->lldi.nports); 809cfdda9d7SSteve Wise 8102f25e9a5SSteve Wise mutex_lock(&dev_mutex); 8112f25e9a5SSteve Wise list_add_tail(&ctx->entry, &uld_ctx_list); 8122f25e9a5SSteve Wise mutex_unlock(&dev_mutex); 8132f25e9a5SSteve Wise 8142f25e9a5SSteve Wise for (i = 0; i < ctx->lldi.nrxq; i++) 8152f25e9a5SSteve Wise PDBG("rxqid[%u] %u\n", i, ctx->lldi.rxq_ids[i]); 816cfdda9d7SSteve Wise out: 8172f25e9a5SSteve Wise return ctx; 818cfdda9d7SSteve Wise } 819cfdda9d7SSteve Wise 8201cab775cSVipul Pandya static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl, 8211cab775cSVipul Pandya const __be64 *rsp, 8221cab775cSVipul Pandya u32 pktshift) 8231cab775cSVipul Pandya { 8241cab775cSVipul Pandya struct sk_buff *skb; 8251cab775cSVipul Pandya 8261cab775cSVipul Pandya /* 8271cab775cSVipul Pandya * Allocate space for cpl_pass_accept_req which will be synthesized by 8281cab775cSVipul Pandya * driver. Once the driver synthesizes the request the skb will go 8291cab775cSVipul Pandya * through the regular cpl_pass_accept_req processing. 8301cab775cSVipul Pandya * The math here assumes sizeof cpl_pass_accept_req >= sizeof 8311cab775cSVipul Pandya * cpl_rx_pkt. 8321cab775cSVipul Pandya */ 8331cab775cSVipul Pandya skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) + 8341cab775cSVipul Pandya sizeof(struct rss_header) - pktshift, GFP_ATOMIC); 8351cab775cSVipul Pandya if (unlikely(!skb)) 8361cab775cSVipul Pandya return NULL; 8371cab775cSVipul Pandya 8381cab775cSVipul Pandya __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) + 8391cab775cSVipul Pandya sizeof(struct rss_header) - pktshift); 8401cab775cSVipul Pandya 8411cab775cSVipul Pandya /* 8421cab775cSVipul Pandya * This skb will contain: 8431cab775cSVipul Pandya * rss_header from the rspq descriptor (1 flit) 8441cab775cSVipul Pandya * cpl_rx_pkt struct from the rspq descriptor (2 flits) 8451cab775cSVipul Pandya * space for the difference between the size of an 8461cab775cSVipul Pandya * rx_pkt and pass_accept_req cpl (1 flit) 8471cab775cSVipul Pandya * the packet data from the gl 8481cab775cSVipul Pandya */ 8491cab775cSVipul Pandya skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_pass_accept_req) + 8501cab775cSVipul Pandya sizeof(struct rss_header)); 8511cab775cSVipul Pandya skb_copy_to_linear_data_offset(skb, sizeof(struct rss_header) + 8521cab775cSVipul Pandya sizeof(struct cpl_pass_accept_req), 8531cab775cSVipul Pandya gl->va + pktshift, 8541cab775cSVipul Pandya gl->tot_len - pktshift); 8551cab775cSVipul Pandya return skb; 8561cab775cSVipul Pandya } 8571cab775cSVipul Pandya 8581cab775cSVipul Pandya static inline int recv_rx_pkt(struct c4iw_dev *dev, const struct pkt_gl *gl, 8591cab775cSVipul Pandya const __be64 *rsp) 8601cab775cSVipul Pandya { 8611cab775cSVipul Pandya unsigned int opcode = *(u8 *)rsp; 8621cab775cSVipul Pandya struct sk_buff *skb; 8631cab775cSVipul Pandya 8641cab775cSVipul Pandya if (opcode != CPL_RX_PKT) 8651cab775cSVipul Pandya goto out; 8661cab775cSVipul Pandya 8671cab775cSVipul Pandya skb = copy_gl_to_skb_pkt(gl , rsp, dev->rdev.lldi.sge_pktshift); 8681cab775cSVipul Pandya if (skb == NULL) 8691cab775cSVipul Pandya goto out; 8701cab775cSVipul Pandya 8711cab775cSVipul Pandya if (c4iw_handlers[opcode] == NULL) { 8721cab775cSVipul Pandya pr_info("%s no handler opcode 0x%x...\n", __func__, 8731cab775cSVipul Pandya opcode); 8741cab775cSVipul Pandya kfree_skb(skb); 8751cab775cSVipul Pandya goto out; 8761cab775cSVipul Pandya } 8771cab775cSVipul Pandya c4iw_handlers[opcode](dev, skb); 8781cab775cSVipul Pandya return 1; 8791cab775cSVipul Pandya out: 8801cab775cSVipul Pandya return 0; 8811cab775cSVipul Pandya } 8821cab775cSVipul Pandya 883cfdda9d7SSteve Wise static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp, 884cfdda9d7SSteve Wise const struct pkt_gl *gl) 885cfdda9d7SSteve Wise { 8862f25e9a5SSteve Wise struct uld_ctx *ctx = handle; 8872f25e9a5SSteve Wise struct c4iw_dev *dev = ctx->dev; 888cfdda9d7SSteve Wise struct sk_buff *skb; 8891cab775cSVipul Pandya u8 opcode; 890cfdda9d7SSteve Wise 891cfdda9d7SSteve Wise if (gl == NULL) { 892cfdda9d7SSteve Wise /* omit RSS and rsp_ctrl at end of descriptor */ 893cfdda9d7SSteve Wise unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8; 894cfdda9d7SSteve Wise 895cfdda9d7SSteve Wise skb = alloc_skb(256, GFP_ATOMIC); 896cfdda9d7SSteve Wise if (!skb) 897cfdda9d7SSteve Wise goto nomem; 898cfdda9d7SSteve Wise __skb_put(skb, len); 899cfdda9d7SSteve Wise skb_copy_to_linear_data(skb, &rsp[1], len); 900cfdda9d7SSteve Wise } else if (gl == CXGB4_MSG_AN) { 901cfdda9d7SSteve Wise const struct rsp_ctrl *rc = (void *)rsp; 902cfdda9d7SSteve Wise 903cfdda9d7SSteve Wise u32 qid = be32_to_cpu(rc->pldbuflen_qid); 904cfdda9d7SSteve Wise c4iw_ev_handler(dev, qid); 905cfdda9d7SSteve Wise return 0; 9061cab775cSVipul Pandya } else if (unlikely(*(u8 *)rsp != *(u8 *)gl->va)) { 9071cab775cSVipul Pandya if (recv_rx_pkt(dev, gl, rsp)) 9081cab775cSVipul Pandya return 0; 9091cab775cSVipul Pandya 9101cab775cSVipul Pandya pr_info("%s: unexpected FL contents at %p, " \ 9111cab775cSVipul Pandya "RSS %#llx, FL %#llx, len %u\n", 9121cab775cSVipul Pandya pci_name(ctx->lldi.pdev), gl->va, 9131cab775cSVipul Pandya (unsigned long long)be64_to_cpu(*rsp), 914ef5d6355SVipul Pandya (unsigned long long)be64_to_cpu( 915ef5d6355SVipul Pandya *(__force __be64 *)gl->va), 9161cab775cSVipul Pandya gl->tot_len); 9171cab775cSVipul Pandya 9181cab775cSVipul Pandya return 0; 919cfdda9d7SSteve Wise } else { 920da411ba1SSteve Wise skb = cxgb4_pktgl_to_skb(gl, 128, 128); 921cfdda9d7SSteve Wise if (unlikely(!skb)) 922cfdda9d7SSteve Wise goto nomem; 923cfdda9d7SSteve Wise } 924cfdda9d7SSteve Wise 9251cab775cSVipul Pandya opcode = *(u8 *)rsp; 926dbb084ccSSteve Wise if (c4iw_handlers[opcode]) { 927cfdda9d7SSteve Wise c4iw_handlers[opcode](dev, skb); 928dbb084ccSSteve Wise } else { 9291cab775cSVipul Pandya pr_info("%s no handler opcode 0x%x...\n", __func__, 930cfdda9d7SSteve Wise opcode); 931dbb084ccSSteve Wise kfree_skb(skb); 932dbb084ccSSteve Wise } 933cfdda9d7SSteve Wise 934cfdda9d7SSteve Wise return 0; 935cfdda9d7SSteve Wise nomem: 936cfdda9d7SSteve Wise return -1; 937cfdda9d7SSteve Wise } 938cfdda9d7SSteve Wise 939cfdda9d7SSteve Wise static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state) 940cfdda9d7SSteve Wise { 9412f25e9a5SSteve Wise struct uld_ctx *ctx = handle; 9421c01c538SSteve Wise 943cfdda9d7SSteve Wise PDBG("%s new_state %u\n", __func__, new_state); 9441c01c538SSteve Wise switch (new_state) { 9451c01c538SSteve Wise case CXGB4_STATE_UP: 9462f25e9a5SSteve Wise printk(KERN_INFO MOD "%s: Up\n", pci_name(ctx->lldi.pdev)); 9472f25e9a5SSteve Wise if (!ctx->dev) { 9489efe10a1SSteve Wise int ret; 9492f25e9a5SSteve Wise 9502f25e9a5SSteve Wise ctx->dev = c4iw_alloc(&ctx->lldi); 9519efe10a1SSteve Wise if (IS_ERR(ctx->dev)) { 9529efe10a1SSteve Wise printk(KERN_ERR MOD 9539efe10a1SSteve Wise "%s: initialization failed: %ld\n", 9549efe10a1SSteve Wise pci_name(ctx->lldi.pdev), 9559efe10a1SSteve Wise PTR_ERR(ctx->dev)); 9569efe10a1SSteve Wise ctx->dev = NULL; 9579efe10a1SSteve Wise break; 9589efe10a1SSteve Wise } 9592f25e9a5SSteve Wise ret = c4iw_register_device(ctx->dev); 9609efe10a1SSteve Wise if (ret) { 9611c01c538SSteve Wise printk(KERN_ERR MOD 9621c01c538SSteve Wise "%s: RDMA registration failed: %d\n", 9632f25e9a5SSteve Wise pci_name(ctx->lldi.pdev), ret); 9649efe10a1SSteve Wise c4iw_dealloc(ctx); 9659efe10a1SSteve Wise } 9661c01c538SSteve Wise } 9671c01c538SSteve Wise break; 9681c01c538SSteve Wise case CXGB4_STATE_DOWN: 9691c01c538SSteve Wise printk(KERN_INFO MOD "%s: Down\n", 9702f25e9a5SSteve Wise pci_name(ctx->lldi.pdev)); 9712f25e9a5SSteve Wise if (ctx->dev) 9722f25e9a5SSteve Wise c4iw_remove(ctx); 9731c01c538SSteve Wise break; 9741c01c538SSteve Wise case CXGB4_STATE_START_RECOVERY: 9751c01c538SSteve Wise printk(KERN_INFO MOD "%s: Fatal Error\n", 9762f25e9a5SSteve Wise pci_name(ctx->lldi.pdev)); 9772f25e9a5SSteve Wise if (ctx->dev) { 978767fbe81SSteve Wise struct ib_event event; 979767fbe81SSteve Wise 9802f25e9a5SSteve Wise ctx->dev->rdev.flags |= T4_FATAL_ERROR; 981767fbe81SSteve Wise memset(&event, 0, sizeof event); 982767fbe81SSteve Wise event.event = IB_EVENT_DEVICE_FATAL; 9832f25e9a5SSteve Wise event.device = &ctx->dev->ibdev; 984767fbe81SSteve Wise ib_dispatch_event(&event); 9852f25e9a5SSteve Wise c4iw_remove(ctx); 986767fbe81SSteve Wise } 9871c01c538SSteve Wise break; 9881c01c538SSteve Wise case CXGB4_STATE_DETACH: 9891c01c538SSteve Wise printk(KERN_INFO MOD "%s: Detach\n", 9902f25e9a5SSteve Wise pci_name(ctx->lldi.pdev)); 9912f25e9a5SSteve Wise if (ctx->dev) 9922f25e9a5SSteve Wise c4iw_remove(ctx); 9931c01c538SSteve Wise break; 9941c01c538SSteve Wise } 995cfdda9d7SSteve Wise return 0; 996cfdda9d7SSteve Wise } 997cfdda9d7SSteve Wise 9982c974781SVipul Pandya static int disable_qp_db(int id, void *p, void *data) 9992c974781SVipul Pandya { 10002c974781SVipul Pandya struct c4iw_qp *qp = p; 10012c974781SVipul Pandya 10022c974781SVipul Pandya t4_disable_wq_db(&qp->wq); 10032c974781SVipul Pandya return 0; 10042c974781SVipul Pandya } 10052c974781SVipul Pandya 10062c974781SVipul Pandya static void stop_queues(struct uld_ctx *ctx) 10072c974781SVipul Pandya { 100805eb2389SSteve Wise unsigned long flags; 100905eb2389SSteve Wise 101005eb2389SSteve Wise spin_lock_irqsave(&ctx->dev->lock, flags); 1011422eea0aSVipul Pandya ctx->dev->rdev.stats.db_state_transitions++; 101205eb2389SSteve Wise ctx->dev->db_state = STOPPED; 101305eb2389SSteve Wise if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) 10142c974781SVipul Pandya idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL); 101505eb2389SSteve Wise else 101605eb2389SSteve Wise ctx->dev->rdev.status_page->db_off = 1; 101705eb2389SSteve Wise spin_unlock_irqrestore(&ctx->dev->lock, flags); 10182c974781SVipul Pandya } 10192c974781SVipul Pandya 10202c974781SVipul Pandya static int enable_qp_db(int id, void *p, void *data) 10212c974781SVipul Pandya { 10222c974781SVipul Pandya struct c4iw_qp *qp = p; 10232c974781SVipul Pandya 10242c974781SVipul Pandya t4_enable_wq_db(&qp->wq); 10252c974781SVipul Pandya return 0; 10262c974781SVipul Pandya } 10272c974781SVipul Pandya 102805eb2389SSteve Wise static void resume_rc_qp(struct c4iw_qp *qp) 102905eb2389SSteve Wise { 103005eb2389SSteve Wise spin_lock(&qp->lock); 1031fa658a98SSteve Wise t4_ring_sq_db(&qp->wq, qp->wq.sq.wq_pidx_inc, 1032fa658a98SSteve Wise is_t5(qp->rhp->rdev.lldi.adapter_type), NULL); 103305eb2389SSteve Wise qp->wq.sq.wq_pidx_inc = 0; 1034fa658a98SSteve Wise t4_ring_rq_db(&qp->wq, qp->wq.rq.wq_pidx_inc, 1035fa658a98SSteve Wise is_t5(qp->rhp->rdev.lldi.adapter_type), NULL); 103605eb2389SSteve Wise qp->wq.rq.wq_pidx_inc = 0; 103705eb2389SSteve Wise spin_unlock(&qp->lock); 103805eb2389SSteve Wise } 103905eb2389SSteve Wise 104005eb2389SSteve Wise static void resume_a_chunk(struct uld_ctx *ctx) 104105eb2389SSteve Wise { 104205eb2389SSteve Wise int i; 104305eb2389SSteve Wise struct c4iw_qp *qp; 104405eb2389SSteve Wise 104505eb2389SSteve Wise for (i = 0; i < DB_FC_RESUME_SIZE; i++) { 104605eb2389SSteve Wise qp = list_first_entry(&ctx->dev->db_fc_list, struct c4iw_qp, 104705eb2389SSteve Wise db_fc_entry); 104805eb2389SSteve Wise list_del_init(&qp->db_fc_entry); 104905eb2389SSteve Wise resume_rc_qp(qp); 105005eb2389SSteve Wise if (list_empty(&ctx->dev->db_fc_list)) 105105eb2389SSteve Wise break; 105205eb2389SSteve Wise } 105305eb2389SSteve Wise } 105405eb2389SSteve Wise 10552c974781SVipul Pandya static void resume_queues(struct uld_ctx *ctx) 10562c974781SVipul Pandya { 10572c974781SVipul Pandya spin_lock_irq(&ctx->dev->lock); 105805eb2389SSteve Wise if (ctx->dev->db_state != STOPPED) 105905eb2389SSteve Wise goto out; 106005eb2389SSteve Wise ctx->dev->db_state = FLOW_CONTROL; 106105eb2389SSteve Wise while (1) { 106205eb2389SSteve Wise if (list_empty(&ctx->dev->db_fc_list)) { 106305eb2389SSteve Wise WARN_ON(ctx->dev->db_state != FLOW_CONTROL); 1064422eea0aSVipul Pandya ctx->dev->db_state = NORMAL; 1065422eea0aSVipul Pandya ctx->dev->rdev.stats.db_state_transitions++; 106605eb2389SSteve Wise if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) { 106705eb2389SSteve Wise idr_for_each(&ctx->dev->qpidr, enable_qp_db, 106805eb2389SSteve Wise NULL); 106905eb2389SSteve Wise } else { 107005eb2389SSteve Wise ctx->dev->rdev.status_page->db_off = 0; 1071422eea0aSVipul Pandya } 107205eb2389SSteve Wise break; 107305eb2389SSteve Wise } else { 107405eb2389SSteve Wise if (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1) 107505eb2389SSteve Wise < (ctx->dev->rdev.lldi.dbfifo_int_thresh << 107605eb2389SSteve Wise DB_FC_DRAIN_THRESH)) { 107705eb2389SSteve Wise resume_a_chunk(ctx); 107805eb2389SSteve Wise } 107905eb2389SSteve Wise if (!list_empty(&ctx->dev->db_fc_list)) { 108005eb2389SSteve Wise spin_unlock_irq(&ctx->dev->lock); 108105eb2389SSteve Wise if (DB_FC_RESUME_DELAY) { 108205eb2389SSteve Wise set_current_state(TASK_UNINTERRUPTIBLE); 108305eb2389SSteve Wise schedule_timeout(DB_FC_RESUME_DELAY); 108405eb2389SSteve Wise } 108505eb2389SSteve Wise spin_lock_irq(&ctx->dev->lock); 108605eb2389SSteve Wise if (ctx->dev->db_state != FLOW_CONTROL) 108705eb2389SSteve Wise break; 108805eb2389SSteve Wise } 108905eb2389SSteve Wise } 109005eb2389SSteve Wise } 109105eb2389SSteve Wise out: 109205eb2389SSteve Wise if (ctx->dev->db_state != NORMAL) 109305eb2389SSteve Wise ctx->dev->rdev.stats.db_fc_interruptions++; 1094422eea0aSVipul Pandya spin_unlock_irq(&ctx->dev->lock); 1095422eea0aSVipul Pandya } 1096422eea0aSVipul Pandya 1097422eea0aSVipul Pandya struct qp_list { 1098422eea0aSVipul Pandya unsigned idx; 1099422eea0aSVipul Pandya struct c4iw_qp **qps; 1100422eea0aSVipul Pandya }; 1101422eea0aSVipul Pandya 1102422eea0aSVipul Pandya static int add_and_ref_qp(int id, void *p, void *data) 1103422eea0aSVipul Pandya { 1104422eea0aSVipul Pandya struct qp_list *qp_listp = data; 1105422eea0aSVipul Pandya struct c4iw_qp *qp = p; 1106422eea0aSVipul Pandya 1107422eea0aSVipul Pandya c4iw_qp_add_ref(&qp->ibqp); 1108422eea0aSVipul Pandya qp_listp->qps[qp_listp->idx++] = qp; 1109422eea0aSVipul Pandya return 0; 1110422eea0aSVipul Pandya } 1111422eea0aSVipul Pandya 1112422eea0aSVipul Pandya static int count_qps(int id, void *p, void *data) 1113422eea0aSVipul Pandya { 1114422eea0aSVipul Pandya unsigned *countp = data; 1115422eea0aSVipul Pandya (*countp)++; 1116422eea0aSVipul Pandya return 0; 1117422eea0aSVipul Pandya } 1118422eea0aSVipul Pandya 111905eb2389SSteve Wise static void deref_qps(struct qp_list *qp_list) 1120422eea0aSVipul Pandya { 1121422eea0aSVipul Pandya int idx; 1122422eea0aSVipul Pandya 112305eb2389SSteve Wise for (idx = 0; idx < qp_list->idx; idx++) 112405eb2389SSteve Wise c4iw_qp_rem_ref(&qp_list->qps[idx]->ibqp); 1125422eea0aSVipul Pandya } 1126422eea0aSVipul Pandya 1127422eea0aSVipul Pandya static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list) 1128422eea0aSVipul Pandya { 1129422eea0aSVipul Pandya int idx; 1130422eea0aSVipul Pandya int ret; 1131422eea0aSVipul Pandya 1132422eea0aSVipul Pandya for (idx = 0; idx < qp_list->idx; idx++) { 1133422eea0aSVipul Pandya struct c4iw_qp *qp = qp_list->qps[idx]; 1134422eea0aSVipul Pandya 113505eb2389SSteve Wise spin_lock_irq(&qp->rhp->lock); 113605eb2389SSteve Wise spin_lock(&qp->lock); 1137422eea0aSVipul Pandya ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0], 1138422eea0aSVipul Pandya qp->wq.sq.qid, 1139422eea0aSVipul Pandya t4_sq_host_wq_pidx(&qp->wq), 1140422eea0aSVipul Pandya t4_sq_wq_size(&qp->wq)); 1141422eea0aSVipul Pandya if (ret) { 114205eb2389SSteve Wise pr_err(KERN_ERR MOD "%s: Fatal error - " 1143422eea0aSVipul Pandya "DB overflow recovery failed - " 1144422eea0aSVipul Pandya "error syncing SQ qid %u\n", 1145422eea0aSVipul Pandya pci_name(ctx->lldi.pdev), qp->wq.sq.qid); 114605eb2389SSteve Wise spin_unlock(&qp->lock); 114705eb2389SSteve Wise spin_unlock_irq(&qp->rhp->lock); 1148422eea0aSVipul Pandya return; 1149422eea0aSVipul Pandya } 115005eb2389SSteve Wise qp->wq.sq.wq_pidx_inc = 0; 1151422eea0aSVipul Pandya 1152422eea0aSVipul Pandya ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0], 1153422eea0aSVipul Pandya qp->wq.rq.qid, 1154422eea0aSVipul Pandya t4_rq_host_wq_pidx(&qp->wq), 1155422eea0aSVipul Pandya t4_rq_wq_size(&qp->wq)); 1156422eea0aSVipul Pandya 1157422eea0aSVipul Pandya if (ret) { 115805eb2389SSteve Wise pr_err(KERN_ERR MOD "%s: Fatal error - " 1159422eea0aSVipul Pandya "DB overflow recovery failed - " 1160422eea0aSVipul Pandya "error syncing RQ qid %u\n", 1161422eea0aSVipul Pandya pci_name(ctx->lldi.pdev), qp->wq.rq.qid); 116205eb2389SSteve Wise spin_unlock(&qp->lock); 116305eb2389SSteve Wise spin_unlock_irq(&qp->rhp->lock); 1164422eea0aSVipul Pandya return; 1165422eea0aSVipul Pandya } 116605eb2389SSteve Wise qp->wq.rq.wq_pidx_inc = 0; 116705eb2389SSteve Wise spin_unlock(&qp->lock); 116805eb2389SSteve Wise spin_unlock_irq(&qp->rhp->lock); 1169422eea0aSVipul Pandya 1170422eea0aSVipul Pandya /* Wait for the dbfifo to drain */ 1171422eea0aSVipul Pandya while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) { 1172422eea0aSVipul Pandya set_current_state(TASK_UNINTERRUPTIBLE); 1173422eea0aSVipul Pandya schedule_timeout(usecs_to_jiffies(10)); 1174422eea0aSVipul Pandya } 1175422eea0aSVipul Pandya } 1176422eea0aSVipul Pandya } 1177422eea0aSVipul Pandya 1178422eea0aSVipul Pandya static void recover_queues(struct uld_ctx *ctx) 1179422eea0aSVipul Pandya { 1180422eea0aSVipul Pandya int count = 0; 1181422eea0aSVipul Pandya struct qp_list qp_list; 1182422eea0aSVipul Pandya int ret; 1183422eea0aSVipul Pandya 1184422eea0aSVipul Pandya /* slow everybody down */ 1185422eea0aSVipul Pandya set_current_state(TASK_UNINTERRUPTIBLE); 1186422eea0aSVipul Pandya schedule_timeout(usecs_to_jiffies(1000)); 1187422eea0aSVipul Pandya 1188422eea0aSVipul Pandya /* flush the SGE contexts */ 1189422eea0aSVipul Pandya ret = cxgb4_flush_eq_cache(ctx->dev->rdev.lldi.ports[0]); 1190422eea0aSVipul Pandya if (ret) { 1191422eea0aSVipul Pandya printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n", 1192422eea0aSVipul Pandya pci_name(ctx->lldi.pdev)); 119305eb2389SSteve Wise return; 1194422eea0aSVipul Pandya } 1195422eea0aSVipul Pandya 1196422eea0aSVipul Pandya /* Count active queues so we can build a list of queues to recover */ 1197422eea0aSVipul Pandya spin_lock_irq(&ctx->dev->lock); 119805eb2389SSteve Wise WARN_ON(ctx->dev->db_state != STOPPED); 119905eb2389SSteve Wise ctx->dev->db_state = RECOVERY; 1200422eea0aSVipul Pandya idr_for_each(&ctx->dev->qpidr, count_qps, &count); 1201422eea0aSVipul Pandya 1202422eea0aSVipul Pandya qp_list.qps = kzalloc(count * sizeof *qp_list.qps, GFP_ATOMIC); 1203422eea0aSVipul Pandya if (!qp_list.qps) { 1204422eea0aSVipul Pandya printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n", 1205422eea0aSVipul Pandya pci_name(ctx->lldi.pdev)); 1206422eea0aSVipul Pandya spin_unlock_irq(&ctx->dev->lock); 120705eb2389SSteve Wise return; 1208422eea0aSVipul Pandya } 1209422eea0aSVipul Pandya qp_list.idx = 0; 1210422eea0aSVipul Pandya 1211422eea0aSVipul Pandya /* add and ref each qp so it doesn't get freed */ 1212422eea0aSVipul Pandya idr_for_each(&ctx->dev->qpidr, add_and_ref_qp, &qp_list); 1213422eea0aSVipul Pandya 1214422eea0aSVipul Pandya spin_unlock_irq(&ctx->dev->lock); 1215422eea0aSVipul Pandya 1216422eea0aSVipul Pandya /* now traverse the list in a safe context to recover the db state*/ 1217422eea0aSVipul Pandya recover_lost_dbs(ctx, &qp_list); 1218422eea0aSVipul Pandya 1219422eea0aSVipul Pandya /* we're almost done! deref the qps and clean up */ 122005eb2389SSteve Wise deref_qps(&qp_list); 1221422eea0aSVipul Pandya kfree(qp_list.qps); 1222422eea0aSVipul Pandya 1223422eea0aSVipul Pandya spin_lock_irq(&ctx->dev->lock); 122405eb2389SSteve Wise WARN_ON(ctx->dev->db_state != RECOVERY); 122505eb2389SSteve Wise ctx->dev->db_state = STOPPED; 12262c974781SVipul Pandya spin_unlock_irq(&ctx->dev->lock); 12272c974781SVipul Pandya } 12282c974781SVipul Pandya 12292c974781SVipul Pandya static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...) 12302c974781SVipul Pandya { 12312c974781SVipul Pandya struct uld_ctx *ctx = handle; 12322c974781SVipul Pandya 12332c974781SVipul Pandya switch (control) { 12342c974781SVipul Pandya case CXGB4_CONTROL_DB_FULL: 12352c974781SVipul Pandya stop_queues(ctx); 12362c974781SVipul Pandya ctx->dev->rdev.stats.db_full++; 12372c974781SVipul Pandya break; 12382c974781SVipul Pandya case CXGB4_CONTROL_DB_EMPTY: 12392c974781SVipul Pandya resume_queues(ctx); 12402c974781SVipul Pandya mutex_lock(&ctx->dev->rdev.stats.lock); 12412c974781SVipul Pandya ctx->dev->rdev.stats.db_empty++; 12422c974781SVipul Pandya mutex_unlock(&ctx->dev->rdev.stats.lock); 12432c974781SVipul Pandya break; 12442c974781SVipul Pandya case CXGB4_CONTROL_DB_DROP: 1245422eea0aSVipul Pandya recover_queues(ctx); 12462c974781SVipul Pandya mutex_lock(&ctx->dev->rdev.stats.lock); 12472c974781SVipul Pandya ctx->dev->rdev.stats.db_drop++; 12482c974781SVipul Pandya mutex_unlock(&ctx->dev->rdev.stats.lock); 12492c974781SVipul Pandya break; 12502c974781SVipul Pandya default: 12512c974781SVipul Pandya printk(KERN_WARNING MOD "%s: unknown control cmd %u\n", 12522c974781SVipul Pandya pci_name(ctx->lldi.pdev), control); 12532c974781SVipul Pandya break; 12542c974781SVipul Pandya } 12552c974781SVipul Pandya return 0; 12562c974781SVipul Pandya } 12572c974781SVipul Pandya 1258cfdda9d7SSteve Wise static struct cxgb4_uld_info c4iw_uld_info = { 1259cfdda9d7SSteve Wise .name = DRV_NAME, 1260cfdda9d7SSteve Wise .add = c4iw_uld_add, 1261cfdda9d7SSteve Wise .rx_handler = c4iw_uld_rx_handler, 1262cfdda9d7SSteve Wise .state_change = c4iw_uld_state_change, 12632c974781SVipul Pandya .control = c4iw_uld_control, 1264cfdda9d7SSteve Wise }; 1265cfdda9d7SSteve Wise 1266cfdda9d7SSteve Wise static int __init c4iw_init_module(void) 1267cfdda9d7SSteve Wise { 1268cfdda9d7SSteve Wise int err; 1269cfdda9d7SSteve Wise 1270cfdda9d7SSteve Wise err = c4iw_cm_init(); 1271cfdda9d7SSteve Wise if (err) 1272cfdda9d7SSteve Wise return err; 1273cfdda9d7SSteve Wise 1274cfdda9d7SSteve Wise c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL); 1275cfdda9d7SSteve Wise if (!c4iw_debugfs_root) 1276cfdda9d7SSteve Wise printk(KERN_WARNING MOD 1277cfdda9d7SSteve Wise "could not create debugfs entry, continuing\n"); 1278cfdda9d7SSteve Wise 1279cfdda9d7SSteve Wise cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info); 1280cfdda9d7SSteve Wise 1281cfdda9d7SSteve Wise return 0; 1282cfdda9d7SSteve Wise } 1283cfdda9d7SSteve Wise 1284cfdda9d7SSteve Wise static void __exit c4iw_exit_module(void) 1285cfdda9d7SSteve Wise { 12862f25e9a5SSteve Wise struct uld_ctx *ctx, *tmp; 1287cfdda9d7SSteve Wise 1288cfdda9d7SSteve Wise mutex_lock(&dev_mutex); 12892f25e9a5SSteve Wise list_for_each_entry_safe(ctx, tmp, &uld_ctx_list, entry) { 12902f25e9a5SSteve Wise if (ctx->dev) 12912f25e9a5SSteve Wise c4iw_remove(ctx); 12922f25e9a5SSteve Wise kfree(ctx); 1293cfdda9d7SSteve Wise } 1294cfdda9d7SSteve Wise mutex_unlock(&dev_mutex); 1295fd388ce6SSteve Wise cxgb4_unregister_uld(CXGB4_ULD_RDMA); 1296cfdda9d7SSteve Wise c4iw_cm_term(); 1297cfdda9d7SSteve Wise debugfs_remove_recursive(c4iw_debugfs_root); 1298cfdda9d7SSteve Wise } 1299cfdda9d7SSteve Wise 1300cfdda9d7SSteve Wise module_init(c4iw_init_module); 1301cfdda9d7SSteve Wise module_exit(c4iw_exit_module); 1302