1 /* 2 * cxgb4_uld.c:Chelsio Upper Layer Driver Interface for T4/T5/T6 SGE management 3 * 4 * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 * 34 * Written by: Atul Gupta (atul.gupta@chelsio.com) 35 * Written by: Hariprasad Shenai (hariprasad@chelsio.com) 36 */ 37 38 #include <linux/kernel.h> 39 #include <linux/module.h> 40 #include <linux/errno.h> 41 #include <linux/types.h> 42 #include <linux/debugfs.h> 43 #include <linux/export.h> 44 #include <linux/list.h> 45 #include <linux/skbuff.h> 46 #include <linux/pci.h> 47 48 #include "cxgb4.h" 49 #include "cxgb4_uld.h" 50 #include "t4_regs.h" 51 #include "t4fw_api.h" 52 #include "t4_msg.h" 53 54 #define for_each_uldrxq(m, i) for (i = 0; i < ((m)->nrxq + (m)->nciq); i++) 55 56 static int get_msix_idx_from_bmap(struct adapter *adap) 57 { 58 struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds; 59 unsigned long flags; 60 unsigned int msix_idx; 61 62 spin_lock_irqsave(&bmap->lock, flags); 63 msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize); 64 if (msix_idx < bmap->mapsize) { 65 __set_bit(msix_idx, bmap->msix_bmap); 66 } else { 67 spin_unlock_irqrestore(&bmap->lock, flags); 68 return -ENOSPC; 69 } 70 71 spin_unlock_irqrestore(&bmap->lock, flags); 72 return msix_idx; 73 } 74 75 static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx) 76 { 77 struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds; 78 unsigned long flags; 79 80 spin_lock_irqsave(&bmap->lock, flags); 81 __clear_bit(msix_idx, bmap->msix_bmap); 82 spin_unlock_irqrestore(&bmap->lock, flags); 83 } 84 85 /* Flush the aggregated lro sessions */ 86 static void uldrx_flush_handler(struct sge_rspq *q) 87 { 88 struct adapter *adap = q->adap; 89 90 if (adap->uld[q->uld].lro_flush) 91 adap->uld[q->uld].lro_flush(&q->lro_mgr); 92 } 93 94 /** 95 * uldrx_handler - response queue handler for ULD queues 96 * @q: the response queue that received the packet 97 * @rsp: the response queue descriptor holding the offload message 98 * @gl: the gather list of packet fragments 99 * 100 * Deliver an ingress offload packet to a ULD. All processing is done by 101 * the ULD, we just maintain statistics. 102 */ 103 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp, 104 const struct pkt_gl *gl) 105 { 106 struct adapter *adap = q->adap; 107 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq); 108 int ret; 109 110 /* FW can send CPLs encapsulated in a CPL_FW4_MSG */ 111 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG && 112 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL) 113 rsp += 2; 114 115 if (q->flush_handler) 116 ret = adap->uld[q->uld].lro_rx_handler(adap->uld[q->uld].handle, 117 rsp, gl, &q->lro_mgr, 118 &q->napi); 119 else 120 ret = adap->uld[q->uld].rx_handler(adap->uld[q->uld].handle, 121 rsp, gl); 122 123 if (ret) { 124 rxq->stats.nomem++; 125 return -1; 126 } 127 128 if (!gl) 129 rxq->stats.imm++; 130 else if (gl == CXGB4_MSG_AN) 131 rxq->stats.an++; 132 else 133 rxq->stats.pkts++; 134 return 0; 135 } 136 137 static int alloc_uld_rxqs(struct adapter *adap, 138 struct sge_uld_rxq_info *rxq_info, 139 unsigned int nq, unsigned int offset, bool lro) 140 { 141 struct sge *s = &adap->sge; 142 struct sge_ofld_rxq *q = rxq_info->uldrxq + offset; 143 unsigned short *ids = rxq_info->rspq_id + offset; 144 unsigned int per_chan = nq / adap->params.nports; 145 unsigned int bmap_idx = 0; 146 int i, err, msi_idx; 147 148 if (adap->flags & USING_MSIX) 149 msi_idx = 1; 150 else 151 msi_idx = -((int)s->intrq.abs_id + 1); 152 153 for (i = 0; i < nq; i++, q++) { 154 if (msi_idx >= 0) { 155 bmap_idx = get_msix_idx_from_bmap(adap); 156 msi_idx = adap->msix_info_ulds[bmap_idx].idx; 157 } 158 err = t4_sge_alloc_rxq(adap, &q->rspq, false, 159 adap->port[i / per_chan], 160 msi_idx, 161 q->fl.size ? &q->fl : NULL, 162 uldrx_handler, 163 lro ? uldrx_flush_handler : NULL, 164 0); 165 if (err) 166 goto freeout; 167 if (msi_idx >= 0) 168 rxq_info->msix_tbl[i + offset] = bmap_idx; 169 memset(&q->stats, 0, sizeof(q->stats)); 170 if (ids) 171 ids[i] = q->rspq.abs_id; 172 } 173 return 0; 174 freeout: 175 q = rxq_info->uldrxq + offset; 176 for ( ; i; i--, q++) { 177 if (q->rspq.desc) 178 free_rspq_fl(adap, &q->rspq, 179 q->fl.size ? &q->fl : NULL); 180 } 181 182 /* We need to free rxq also in case of ciq allocation failure */ 183 if (offset) { 184 q = rxq_info->uldrxq + offset; 185 for ( ; i; i--, q++) { 186 if (q->rspq.desc) 187 free_rspq_fl(adap, &q->rspq, 188 q->fl.size ? &q->fl : NULL); 189 } 190 } 191 return err; 192 } 193 194 static int 195 setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro) 196 { 197 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; 198 int i, ret = 0; 199 200 if (adap->flags & USING_MSIX) { 201 rxq_info->msix_tbl = kcalloc((rxq_info->nrxq + rxq_info->nciq), 202 sizeof(unsigned short), 203 GFP_KERNEL); 204 if (!rxq_info->msix_tbl) 205 return -ENOMEM; 206 } 207 208 ret = !(!alloc_uld_rxqs(adap, rxq_info, rxq_info->nrxq, 0, lro) && 209 !alloc_uld_rxqs(adap, rxq_info, rxq_info->nciq, 210 rxq_info->nrxq, lro)); 211 212 /* Tell uP to route control queue completions to rdma rspq */ 213 if (adap->flags & FULL_INIT_DONE && 214 !ret && uld_type == CXGB4_ULD_RDMA) { 215 struct sge *s = &adap->sge; 216 unsigned int cmplqid; 217 u32 param, cmdop; 218 219 cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL; 220 for_each_port(adap, i) { 221 cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id; 222 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | 223 FW_PARAMS_PARAM_X_V(cmdop) | 224 FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id)); 225 ret = t4_set_params(adap, adap->mbox, adap->pf, 226 0, 1, ¶m, &cmplqid); 227 } 228 } 229 return ret; 230 } 231 232 static void t4_free_uld_rxqs(struct adapter *adap, int n, 233 struct sge_ofld_rxq *q) 234 { 235 for ( ; n; n--, q++) { 236 if (q->rspq.desc) 237 free_rspq_fl(adap, &q->rspq, 238 q->fl.size ? &q->fl : NULL); 239 } 240 } 241 242 static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type) 243 { 244 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; 245 246 if (adap->flags & FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) { 247 struct sge *s = &adap->sge; 248 u32 param, cmdop, cmplqid = 0; 249 int i; 250 251 cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL; 252 for_each_port(adap, i) { 253 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | 254 FW_PARAMS_PARAM_X_V(cmdop) | 255 FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id)); 256 t4_set_params(adap, adap->mbox, adap->pf, 257 0, 1, ¶m, &cmplqid); 258 } 259 } 260 261 if (rxq_info->nciq) 262 t4_free_uld_rxqs(adap, rxq_info->nciq, 263 rxq_info->uldrxq + rxq_info->nrxq); 264 t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq); 265 if (adap->flags & USING_MSIX) 266 kfree(rxq_info->msix_tbl); 267 } 268 269 static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type, 270 const struct cxgb4_uld_info *uld_info) 271 { 272 struct sge *s = &adap->sge; 273 struct sge_uld_rxq_info *rxq_info; 274 int i, nrxq, ciq_size; 275 276 rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL); 277 if (!rxq_info) 278 return -ENOMEM; 279 280 if (adap->flags & USING_MSIX && uld_info->nrxq > s->nqs_per_uld) { 281 i = s->nqs_per_uld; 282 rxq_info->nrxq = roundup(i, adap->params.nports); 283 } else { 284 i = min_t(int, uld_info->nrxq, 285 num_online_cpus()); 286 rxq_info->nrxq = roundup(i, adap->params.nports); 287 } 288 if (!uld_info->ciq) { 289 rxq_info->nciq = 0; 290 } else { 291 if (adap->flags & USING_MSIX) 292 rxq_info->nciq = min_t(int, s->nqs_per_uld, 293 num_online_cpus()); 294 else 295 rxq_info->nciq = min_t(int, MAX_OFLD_QSETS, 296 num_online_cpus()); 297 rxq_info->nciq = ((rxq_info->nciq / adap->params.nports) * 298 adap->params.nports); 299 rxq_info->nciq = max_t(int, rxq_info->nciq, 300 adap->params.nports); 301 } 302 303 nrxq = rxq_info->nrxq + rxq_info->nciq; /* total rxq's */ 304 rxq_info->uldrxq = kcalloc(nrxq, sizeof(struct sge_ofld_rxq), 305 GFP_KERNEL); 306 if (!rxq_info->uldrxq) { 307 kfree(rxq_info); 308 return -ENOMEM; 309 } 310 311 rxq_info->rspq_id = kcalloc(nrxq, sizeof(unsigned short), GFP_KERNEL); 312 if (!rxq_info->rspq_id) { 313 kfree(rxq_info->uldrxq); 314 kfree(rxq_info); 315 return -ENOMEM; 316 } 317 318 for (i = 0; i < rxq_info->nrxq; i++) { 319 struct sge_ofld_rxq *r = &rxq_info->uldrxq[i]; 320 321 init_rspq(adap, &r->rspq, 5, 1, uld_info->rxq_size, 64); 322 r->rspq.uld = uld_type; 323 r->fl.size = 72; 324 } 325 326 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids; 327 if (ciq_size > SGE_MAX_IQ_SIZE) { 328 dev_warn(adap->pdev_dev, "CIQ size too small for available IQs\n"); 329 ciq_size = SGE_MAX_IQ_SIZE; 330 } 331 332 for (i = rxq_info->nrxq; i < nrxq; i++) { 333 struct sge_ofld_rxq *r = &rxq_info->uldrxq[i]; 334 335 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64); 336 r->rspq.uld = uld_type; 337 } 338 339 memcpy(rxq_info->name, uld_info->name, IFNAMSIZ); 340 adap->sge.uld_rxq_info[uld_type] = rxq_info; 341 342 return 0; 343 } 344 345 static void free_queues_uld(struct adapter *adap, unsigned int uld_type) 346 { 347 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; 348 349 kfree(rxq_info->rspq_id); 350 kfree(rxq_info->uldrxq); 351 kfree(rxq_info); 352 } 353 354 static int 355 request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type) 356 { 357 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; 358 int err = 0; 359 unsigned int idx, bmap_idx; 360 361 for_each_uldrxq(rxq_info, idx) { 362 bmap_idx = rxq_info->msix_tbl[idx]; 363 err = request_irq(adap->msix_info_ulds[bmap_idx].vec, 364 t4_sge_intr_msix, 0, 365 adap->msix_info_ulds[bmap_idx].desc, 366 &rxq_info->uldrxq[idx].rspq); 367 if (err) 368 goto unwind; 369 } 370 return 0; 371 unwind: 372 while (idx-- > 0) { 373 bmap_idx = rxq_info->msix_tbl[idx]; 374 free_msix_idx_in_bmap(adap, bmap_idx); 375 free_irq(adap->msix_info_ulds[bmap_idx].vec, 376 &rxq_info->uldrxq[idx].rspq); 377 } 378 return err; 379 } 380 381 static void 382 free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type) 383 { 384 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; 385 unsigned int idx, bmap_idx; 386 387 for_each_uldrxq(rxq_info, idx) { 388 bmap_idx = rxq_info->msix_tbl[idx]; 389 390 free_msix_idx_in_bmap(adap, bmap_idx); 391 free_irq(adap->msix_info_ulds[bmap_idx].vec, 392 &rxq_info->uldrxq[idx].rspq); 393 } 394 } 395 396 static void name_msix_vecs_uld(struct adapter *adap, unsigned int uld_type) 397 { 398 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; 399 int n = sizeof(adap->msix_info_ulds[0].desc); 400 unsigned int idx, bmap_idx; 401 402 for_each_uldrxq(rxq_info, idx) { 403 bmap_idx = rxq_info->msix_tbl[idx]; 404 405 snprintf(adap->msix_info_ulds[bmap_idx].desc, n, "%s-%s%d", 406 adap->port[0]->name, rxq_info->name, idx); 407 } 408 } 409 410 static void enable_rx(struct adapter *adap, struct sge_rspq *q) 411 { 412 if (!q) 413 return; 414 415 if (q->handler) { 416 cxgb_busy_poll_init_lock(q); 417 napi_enable(&q->napi); 418 } 419 /* 0-increment GTS to start the timer and enable interrupts */ 420 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A), 421 SEINTARM_V(q->intr_params) | 422 INGRESSQID_V(q->cntxt_id)); 423 } 424 425 static void quiesce_rx(struct adapter *adap, struct sge_rspq *q) 426 { 427 if (q && q->handler) { 428 napi_disable(&q->napi); 429 local_bh_disable(); 430 while (!cxgb_poll_lock_napi(q)) 431 mdelay(1); 432 local_bh_enable(); 433 } 434 } 435 436 static void enable_rx_uld(struct adapter *adap, unsigned int uld_type) 437 { 438 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; 439 int idx; 440 441 for_each_uldrxq(rxq_info, idx) 442 enable_rx(adap, &rxq_info->uldrxq[idx].rspq); 443 } 444 445 static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type) 446 { 447 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; 448 int idx; 449 450 for_each_uldrxq(rxq_info, idx) 451 quiesce_rx(adap, &rxq_info->uldrxq[idx].rspq); 452 } 453 454 static void uld_queue_init(struct adapter *adap, unsigned int uld_type, 455 struct cxgb4_lld_info *lli) 456 { 457 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; 458 459 lli->rxq_ids = rxq_info->rspq_id; 460 lli->nrxq = rxq_info->nrxq; 461 lli->ciq_ids = rxq_info->rspq_id + rxq_info->nrxq; 462 lli->nciq = rxq_info->nciq; 463 } 464 465 int t4_uld_mem_alloc(struct adapter *adap) 466 { 467 struct sge *s = &adap->sge; 468 469 adap->uld = kcalloc(CXGB4_ULD_MAX, sizeof(*adap->uld), GFP_KERNEL); 470 if (!adap->uld) 471 return -ENOMEM; 472 473 s->uld_rxq_info = kzalloc(CXGB4_ULD_MAX * 474 sizeof(struct sge_uld_rxq_info *), 475 GFP_KERNEL); 476 if (!s->uld_rxq_info) 477 goto err_uld; 478 479 return 0; 480 err_uld: 481 kfree(adap->uld); 482 return -ENOMEM; 483 } 484 485 void t4_uld_mem_free(struct adapter *adap) 486 { 487 struct sge *s = &adap->sge; 488 489 kfree(s->uld_rxq_info); 490 kfree(adap->uld); 491 } 492 493 void t4_uld_clean_up(struct adapter *adap) 494 { 495 struct sge_uld_rxq_info *rxq_info; 496 unsigned int i; 497 498 if (!adap->uld) 499 return; 500 for (i = 0; i < CXGB4_ULD_MAX; i++) { 501 if (!adap->uld[i].handle) 502 continue; 503 rxq_info = adap->sge.uld_rxq_info[i]; 504 if (adap->flags & FULL_INIT_DONE) 505 quiesce_rx_uld(adap, i); 506 if (adap->flags & USING_MSIX) 507 free_msix_queue_irqs_uld(adap, i); 508 free_sge_queues_uld(adap, i); 509 free_queues_uld(adap, i); 510 } 511 } 512 513 static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld) 514 { 515 int i; 516 517 lld->pdev = adap->pdev; 518 lld->pf = adap->pf; 519 lld->l2t = adap->l2t; 520 lld->tids = &adap->tids; 521 lld->ports = adap->port; 522 lld->vr = &adap->vres; 523 lld->mtus = adap->params.mtus; 524 lld->ntxq = adap->sge.ofldqsets; 525 lld->nchan = adap->params.nports; 526 lld->nports = adap->params.nports; 527 lld->wr_cred = adap->params.ofldq_wr_cred; 528 lld->iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A)); 529 lld->iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A); 530 lld->iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A); 531 lld->iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A); 532 lld->iscsi_ppm = &adap->iscsi_ppm; 533 lld->adapter_type = adap->params.chip; 534 lld->cclk_ps = 1000000000 / adap->params.vpd.cclk; 535 lld->udb_density = 1 << adap->params.sge.eq_qpp; 536 lld->ucq_density = 1 << adap->params.sge.iq_qpp; 537 lld->filt_mode = adap->params.tp.vlan_pri_map; 538 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ 539 for (i = 0; i < NCHAN; i++) 540 lld->tx_modq[i] = i; 541 lld->gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A); 542 lld->db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A); 543 lld->fw_vers = adap->params.fw_vers; 544 lld->dbfifo_int_thresh = dbfifo_int_thresh; 545 lld->sge_ingpadboundary = adap->sge.fl_align; 546 lld->sge_egrstatuspagesize = adap->sge.stat_len; 547 lld->sge_pktshift = adap->sge.pktshift; 548 lld->enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN; 549 lld->max_ordird_qp = adap->params.max_ordird_qp; 550 lld->max_ird_adapter = adap->params.max_ird_adapter; 551 lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl; 552 lld->nodeid = dev_to_node(adap->pdev_dev); 553 lld->fr_nsmr_tpte_wr_support = adap->params.fr_nsmr_tpte_wr_support; 554 } 555 556 static void uld_attach(struct adapter *adap, unsigned int uld) 557 { 558 void *handle; 559 struct cxgb4_lld_info lli; 560 561 uld_init(adap, &lli); 562 uld_queue_init(adap, uld, &lli); 563 564 handle = adap->uld[uld].add(&lli); 565 if (IS_ERR(handle)) { 566 dev_warn(adap->pdev_dev, 567 "could not attach to the %s driver, error %ld\n", 568 adap->uld[uld].name, PTR_ERR(handle)); 569 return; 570 } 571 572 adap->uld[uld].handle = handle; 573 t4_register_netevent_notifier(); 574 575 if (adap->flags & FULL_INIT_DONE) 576 adap->uld[uld].state_change(handle, CXGB4_STATE_UP); 577 } 578 579 /** 580 * cxgb4_register_uld - register an upper-layer driver 581 * @type: the ULD type 582 * @p: the ULD methods 583 * 584 * Registers an upper-layer driver with this driver and notifies the ULD 585 * about any presently available devices that support its type. Returns 586 * %-EBUSY if a ULD of the same type is already registered. 587 */ 588 int cxgb4_register_uld(enum cxgb4_uld type, 589 const struct cxgb4_uld_info *p) 590 { 591 int ret = 0; 592 unsigned int adap_idx = 0; 593 struct adapter *adap; 594 595 if (type >= CXGB4_ULD_MAX) 596 return -EINVAL; 597 598 mutex_lock(&uld_mutex); 599 list_for_each_entry(adap, &adapter_list, list_node) { 600 if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) || 601 (type != CXGB4_ULD_CRYPTO && !is_offload(adap))) 602 continue; 603 if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip)) 604 continue; 605 ret = cfg_queues_uld(adap, type, p); 606 if (ret) 607 goto out; 608 ret = setup_sge_queues_uld(adap, type, p->lro); 609 if (ret) 610 goto free_queues; 611 if (adap->flags & USING_MSIX) { 612 name_msix_vecs_uld(adap, type); 613 ret = request_msix_queue_irqs_uld(adap, type); 614 if (ret) 615 goto free_rxq; 616 } 617 if (adap->flags & FULL_INIT_DONE) 618 enable_rx_uld(adap, type); 619 if (adap->uld[type].add) { 620 ret = -EBUSY; 621 goto free_irq; 622 } 623 adap->uld[type] = *p; 624 uld_attach(adap, type); 625 adap_idx++; 626 } 627 mutex_unlock(&uld_mutex); 628 return 0; 629 630 free_irq: 631 if (adap->flags & FULL_INIT_DONE) 632 quiesce_rx_uld(adap, type); 633 if (adap->flags & USING_MSIX) 634 free_msix_queue_irqs_uld(adap, type); 635 free_rxq: 636 free_sge_queues_uld(adap, type); 637 free_queues: 638 free_queues_uld(adap, type); 639 out: 640 641 list_for_each_entry(adap, &adapter_list, list_node) { 642 if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) || 643 (type != CXGB4_ULD_CRYPTO && !is_offload(adap))) 644 continue; 645 if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip)) 646 continue; 647 if (!adap_idx) 648 break; 649 adap->uld[type].handle = NULL; 650 adap->uld[type].add = NULL; 651 if (adap->flags & FULL_INIT_DONE) 652 quiesce_rx_uld(adap, type); 653 if (adap->flags & USING_MSIX) 654 free_msix_queue_irqs_uld(adap, type); 655 free_sge_queues_uld(adap, type); 656 free_queues_uld(adap, type); 657 adap_idx--; 658 } 659 mutex_unlock(&uld_mutex); 660 return ret; 661 } 662 EXPORT_SYMBOL(cxgb4_register_uld); 663 664 /** 665 * cxgb4_unregister_uld - unregister an upper-layer driver 666 * @type: the ULD type 667 * 668 * Unregisters an existing upper-layer driver. 669 */ 670 int cxgb4_unregister_uld(enum cxgb4_uld type) 671 { 672 struct adapter *adap; 673 674 if (type >= CXGB4_ULD_MAX) 675 return -EINVAL; 676 677 mutex_lock(&uld_mutex); 678 list_for_each_entry(adap, &adapter_list, list_node) { 679 if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) || 680 (type != CXGB4_ULD_CRYPTO && !is_offload(adap))) 681 continue; 682 if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip)) 683 continue; 684 adap->uld[type].handle = NULL; 685 adap->uld[type].add = NULL; 686 if (adap->flags & FULL_INIT_DONE) 687 quiesce_rx_uld(adap, type); 688 if (adap->flags & USING_MSIX) 689 free_msix_queue_irqs_uld(adap, type); 690 free_sge_queues_uld(adap, type); 691 free_queues_uld(adap, type); 692 } 693 mutex_unlock(&uld_mutex); 694 695 return 0; 696 } 697 EXPORT_SYMBOL(cxgb4_unregister_uld); 698