1 /* 2 * cxgb4_uld.c:Chelsio Upper Layer Driver Interface for T4/T5/T6 SGE management 3 * 4 * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 * 34 * Written by: Atul Gupta (atul.gupta@chelsio.com) 35 * Written by: Hariprasad Shenai (hariprasad@chelsio.com) 36 */ 37 38 #include <linux/kernel.h> 39 #include <linux/module.h> 40 #include <linux/errno.h> 41 #include <linux/types.h> 42 #include <linux/debugfs.h> 43 #include <linux/export.h> 44 #include <linux/list.h> 45 #include <linux/skbuff.h> 46 #include <linux/pci.h> 47 48 #include "cxgb4.h" 49 #include "cxgb4_uld.h" 50 #include "t4_regs.h" 51 #include "t4fw_api.h" 52 #include "t4_msg.h" 53 54 #define for_each_uldrxq(m, i) for (i = 0; i < ((m)->nrxq + (m)->nciq); i++) 55 56 static int get_msix_idx_from_bmap(struct adapter *adap) 57 { 58 struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds; 59 unsigned long flags; 60 unsigned int msix_idx; 61 62 spin_lock_irqsave(&bmap->lock, flags); 63 msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize); 64 if (msix_idx < bmap->mapsize) { 65 __set_bit(msix_idx, bmap->msix_bmap); 66 } else { 67 spin_unlock_irqrestore(&bmap->lock, flags); 68 return -ENOSPC; 69 } 70 71 spin_unlock_irqrestore(&bmap->lock, flags); 72 return msix_idx; 73 } 74 75 static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx) 76 { 77 struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds; 78 unsigned long flags; 79 80 spin_lock_irqsave(&bmap->lock, flags); 81 __clear_bit(msix_idx, bmap->msix_bmap); 82 spin_unlock_irqrestore(&bmap->lock, flags); 83 } 84 85 /* Flush the aggregated lro sessions */ 86 static void uldrx_flush_handler(struct sge_rspq *q) 87 { 88 struct adapter *adap = q->adap; 89 90 if (adap->uld[q->uld].lro_flush) 91 adap->uld[q->uld].lro_flush(&q->lro_mgr); 92 } 93 94 /** 95 * uldrx_handler - response queue handler for ULD queues 96 * @q: the response queue that received the packet 97 * @rsp: the response queue descriptor holding the offload message 98 * @gl: the gather list of packet fragments 99 * 100 * Deliver an ingress offload packet to a ULD. All processing is done by 101 * the ULD, we just maintain statistics. 102 */ 103 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp, 104 const struct pkt_gl *gl) 105 { 106 struct adapter *adap = q->adap; 107 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq); 108 int ret; 109 110 /* FW can send CPLs encapsulated in a CPL_FW4_MSG */ 111 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG && 112 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL) 113 rsp += 2; 114 115 if (q->flush_handler) 116 ret = adap->uld[q->uld].lro_rx_handler(adap->uld[q->uld].handle, 117 rsp, gl, &q->lro_mgr, 118 &q->napi); 119 else 120 ret = adap->uld[q->uld].rx_handler(adap->uld[q->uld].handle, 121 rsp, gl); 122 123 if (ret) { 124 rxq->stats.nomem++; 125 return -1; 126 } 127 128 if (!gl) 129 rxq->stats.imm++; 130 else if (gl == CXGB4_MSG_AN) 131 rxq->stats.an++; 132 else 133 rxq->stats.pkts++; 134 return 0; 135 } 136 137 static int alloc_uld_rxqs(struct adapter *adap, 138 struct sge_uld_rxq_info *rxq_info, bool lro) 139 { 140 struct sge *s = &adap->sge; 141 unsigned int nq = rxq_info->nrxq + rxq_info->nciq; 142 struct sge_ofld_rxq *q = rxq_info->uldrxq; 143 unsigned short *ids = rxq_info->rspq_id; 144 unsigned int bmap_idx = 0; 145 unsigned int per_chan; 146 int i, err, msi_idx, que_idx = 0; 147 148 per_chan = rxq_info->nrxq / adap->params.nports; 149 150 if (adap->flags & CXGB4_USING_MSIX) 151 msi_idx = 1; 152 else 153 msi_idx = -((int)s->intrq.abs_id + 1); 154 155 for (i = 0; i < nq; i++, q++) { 156 if (i == rxq_info->nrxq) { 157 /* start allocation of concentrator queues */ 158 per_chan = rxq_info->nciq / adap->params.nports; 159 que_idx = 0; 160 } 161 162 if (msi_idx >= 0) { 163 bmap_idx = get_msix_idx_from_bmap(adap); 164 msi_idx = adap->msix_info_ulds[bmap_idx].idx; 165 } 166 err = t4_sge_alloc_rxq(adap, &q->rspq, false, 167 adap->port[que_idx++ / per_chan], 168 msi_idx, 169 q->fl.size ? &q->fl : NULL, 170 uldrx_handler, 171 lro ? uldrx_flush_handler : NULL, 172 0); 173 if (err) 174 goto freeout; 175 if (msi_idx >= 0) 176 rxq_info->msix_tbl[i] = bmap_idx; 177 memset(&q->stats, 0, sizeof(q->stats)); 178 if (ids) 179 ids[i] = q->rspq.abs_id; 180 } 181 return 0; 182 freeout: 183 q = rxq_info->uldrxq; 184 for ( ; i; i--, q++) { 185 if (q->rspq.desc) 186 free_rspq_fl(adap, &q->rspq, 187 q->fl.size ? &q->fl : NULL); 188 } 189 return err; 190 } 191 192 static int 193 setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro) 194 { 195 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; 196 int i, ret = 0; 197 198 if (adap->flags & CXGB4_USING_MSIX) { 199 rxq_info->msix_tbl = kcalloc((rxq_info->nrxq + rxq_info->nciq), 200 sizeof(unsigned short), 201 GFP_KERNEL); 202 if (!rxq_info->msix_tbl) 203 return -ENOMEM; 204 } 205 206 ret = !(!alloc_uld_rxqs(adap, rxq_info, lro)); 207 208 /* Tell uP to route control queue completions to rdma rspq */ 209 if (adap->flags & CXGB4_FULL_INIT_DONE && 210 !ret && uld_type == CXGB4_ULD_RDMA) { 211 struct sge *s = &adap->sge; 212 unsigned int cmplqid; 213 u32 param, cmdop; 214 215 cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL; 216 for_each_port(adap, i) { 217 cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id; 218 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | 219 FW_PARAMS_PARAM_X_V(cmdop) | 220 FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id)); 221 ret = t4_set_params(adap, adap->mbox, adap->pf, 222 0, 1, ¶m, &cmplqid); 223 } 224 } 225 return ret; 226 } 227 228 static void t4_free_uld_rxqs(struct adapter *adap, int n, 229 struct sge_ofld_rxq *q) 230 { 231 for ( ; n; n--, q++) { 232 if (q->rspq.desc) 233 free_rspq_fl(adap, &q->rspq, 234 q->fl.size ? &q->fl : NULL); 235 } 236 } 237 238 static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type) 239 { 240 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; 241 242 if (adap->flags & CXGB4_FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) { 243 struct sge *s = &adap->sge; 244 u32 param, cmdop, cmplqid = 0; 245 int i; 246 247 cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL; 248 for_each_port(adap, i) { 249 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | 250 FW_PARAMS_PARAM_X_V(cmdop) | 251 FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id)); 252 t4_set_params(adap, adap->mbox, adap->pf, 253 0, 1, ¶m, &cmplqid); 254 } 255 } 256 257 if (rxq_info->nciq) 258 t4_free_uld_rxqs(adap, rxq_info->nciq, 259 rxq_info->uldrxq + rxq_info->nrxq); 260 t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq); 261 if (adap->flags & CXGB4_USING_MSIX) 262 kfree(rxq_info->msix_tbl); 263 } 264 265 static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type, 266 const struct cxgb4_uld_info *uld_info) 267 { 268 struct sge *s = &adap->sge; 269 struct sge_uld_rxq_info *rxq_info; 270 int i, nrxq, ciq_size; 271 272 rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL); 273 if (!rxq_info) 274 return -ENOMEM; 275 276 if (adap->flags & CXGB4_USING_MSIX && uld_info->nrxq > s->nqs_per_uld) { 277 i = s->nqs_per_uld; 278 rxq_info->nrxq = roundup(i, adap->params.nports); 279 } else { 280 i = min_t(int, uld_info->nrxq, 281 num_online_cpus()); 282 rxq_info->nrxq = roundup(i, adap->params.nports); 283 } 284 if (!uld_info->ciq) { 285 rxq_info->nciq = 0; 286 } else { 287 if (adap->flags & CXGB4_USING_MSIX) 288 rxq_info->nciq = min_t(int, s->nqs_per_uld, 289 num_online_cpus()); 290 else 291 rxq_info->nciq = min_t(int, MAX_OFLD_QSETS, 292 num_online_cpus()); 293 rxq_info->nciq = ((rxq_info->nciq / adap->params.nports) * 294 adap->params.nports); 295 rxq_info->nciq = max_t(int, rxq_info->nciq, 296 adap->params.nports); 297 } 298 299 nrxq = rxq_info->nrxq + rxq_info->nciq; /* total rxq's */ 300 rxq_info->uldrxq = kcalloc(nrxq, sizeof(struct sge_ofld_rxq), 301 GFP_KERNEL); 302 if (!rxq_info->uldrxq) { 303 kfree(rxq_info); 304 return -ENOMEM; 305 } 306 307 rxq_info->rspq_id = kcalloc(nrxq, sizeof(unsigned short), GFP_KERNEL); 308 if (!rxq_info->rspq_id) { 309 kfree(rxq_info->uldrxq); 310 kfree(rxq_info); 311 return -ENOMEM; 312 } 313 314 for (i = 0; i < rxq_info->nrxq; i++) { 315 struct sge_ofld_rxq *r = &rxq_info->uldrxq[i]; 316 317 init_rspq(adap, &r->rspq, 5, 1, uld_info->rxq_size, 64); 318 r->rspq.uld = uld_type; 319 r->fl.size = 72; 320 } 321 322 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids; 323 if (ciq_size > SGE_MAX_IQ_SIZE) { 324 dev_warn(adap->pdev_dev, "CIQ size too small for available IQs\n"); 325 ciq_size = SGE_MAX_IQ_SIZE; 326 } 327 328 for (i = rxq_info->nrxq; i < nrxq; i++) { 329 struct sge_ofld_rxq *r = &rxq_info->uldrxq[i]; 330 331 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64); 332 r->rspq.uld = uld_type; 333 } 334 335 memcpy(rxq_info->name, uld_info->name, IFNAMSIZ); 336 adap->sge.uld_rxq_info[uld_type] = rxq_info; 337 338 return 0; 339 } 340 341 static void free_queues_uld(struct adapter *adap, unsigned int uld_type) 342 { 343 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; 344 345 adap->sge.uld_rxq_info[uld_type] = NULL; 346 kfree(rxq_info->rspq_id); 347 kfree(rxq_info->uldrxq); 348 kfree(rxq_info); 349 } 350 351 static int 352 request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type) 353 { 354 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; 355 struct uld_msix_info *minfo; 356 int err = 0; 357 unsigned int idx, bmap_idx; 358 359 for_each_uldrxq(rxq_info, idx) { 360 bmap_idx = rxq_info->msix_tbl[idx]; 361 minfo = &adap->msix_info_ulds[bmap_idx]; 362 err = request_irq(minfo->vec, 363 t4_sge_intr_msix, 0, 364 minfo->desc, 365 &rxq_info->uldrxq[idx].rspq); 366 if (err) 367 goto unwind; 368 369 cxgb4_set_msix_aff(adap, minfo->vec, 370 &minfo->aff_mask, idx); 371 } 372 return 0; 373 374 unwind: 375 while (idx-- > 0) { 376 bmap_idx = rxq_info->msix_tbl[idx]; 377 minfo = &adap->msix_info_ulds[bmap_idx]; 378 cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask); 379 free_msix_idx_in_bmap(adap, bmap_idx); 380 free_irq(minfo->vec, &rxq_info->uldrxq[idx].rspq); 381 } 382 return err; 383 } 384 385 static void 386 free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type) 387 { 388 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; 389 struct uld_msix_info *minfo; 390 unsigned int idx, bmap_idx; 391 392 for_each_uldrxq(rxq_info, idx) { 393 bmap_idx = rxq_info->msix_tbl[idx]; 394 minfo = &adap->msix_info_ulds[bmap_idx]; 395 396 cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask); 397 free_msix_idx_in_bmap(adap, bmap_idx); 398 free_irq(minfo->vec, &rxq_info->uldrxq[idx].rspq); 399 } 400 } 401 402 static void name_msix_vecs_uld(struct adapter *adap, unsigned int uld_type) 403 { 404 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; 405 int n = sizeof(adap->msix_info_ulds[0].desc); 406 unsigned int idx, bmap_idx; 407 408 for_each_uldrxq(rxq_info, idx) { 409 bmap_idx = rxq_info->msix_tbl[idx]; 410 411 snprintf(adap->msix_info_ulds[bmap_idx].desc, n, "%s-%s%d", 412 adap->port[0]->name, rxq_info->name, idx); 413 } 414 } 415 416 static void enable_rx(struct adapter *adap, struct sge_rspq *q) 417 { 418 if (!q) 419 return; 420 421 if (q->handler) 422 napi_enable(&q->napi); 423 424 /* 0-increment GTS to start the timer and enable interrupts */ 425 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A), 426 SEINTARM_V(q->intr_params) | 427 INGRESSQID_V(q->cntxt_id)); 428 } 429 430 static void quiesce_rx(struct adapter *adap, struct sge_rspq *q) 431 { 432 if (q && q->handler) 433 napi_disable(&q->napi); 434 } 435 436 static void enable_rx_uld(struct adapter *adap, unsigned int uld_type) 437 { 438 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; 439 int idx; 440 441 for_each_uldrxq(rxq_info, idx) 442 enable_rx(adap, &rxq_info->uldrxq[idx].rspq); 443 } 444 445 static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type) 446 { 447 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; 448 int idx; 449 450 for_each_uldrxq(rxq_info, idx) 451 quiesce_rx(adap, &rxq_info->uldrxq[idx].rspq); 452 } 453 454 static void 455 free_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info) 456 { 457 int nq = txq_info->ntxq; 458 int i; 459 460 for (i = 0; i < nq; i++) { 461 struct sge_uld_txq *txq = &txq_info->uldtxq[i]; 462 463 if (txq && txq->q.desc) { 464 tasklet_kill(&txq->qresume_tsk); 465 t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0, 466 txq->q.cntxt_id); 467 free_tx_desc(adap, &txq->q, txq->q.in_use, false); 468 kfree(txq->q.sdesc); 469 __skb_queue_purge(&txq->sendq); 470 free_txq(adap, &txq->q); 471 } 472 } 473 } 474 475 static int 476 alloc_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info, 477 unsigned int uld_type) 478 { 479 struct sge *s = &adap->sge; 480 int nq = txq_info->ntxq; 481 int i, j, err; 482 483 j = nq / adap->params.nports; 484 for (i = 0; i < nq; i++) { 485 struct sge_uld_txq *txq = &txq_info->uldtxq[i]; 486 487 txq->q.size = 1024; 488 err = t4_sge_alloc_uld_txq(adap, txq, adap->port[i / j], 489 s->fw_evtq.cntxt_id, uld_type); 490 if (err) 491 goto freeout; 492 } 493 return 0; 494 freeout: 495 free_sge_txq_uld(adap, txq_info); 496 return err; 497 } 498 499 static void 500 release_sge_txq_uld(struct adapter *adap, unsigned int uld_type) 501 { 502 struct sge_uld_txq_info *txq_info = NULL; 503 int tx_uld_type = TX_ULD(uld_type); 504 505 txq_info = adap->sge.uld_txq_info[tx_uld_type]; 506 507 if (txq_info && atomic_dec_and_test(&txq_info->users)) { 508 free_sge_txq_uld(adap, txq_info); 509 kfree(txq_info->uldtxq); 510 kfree(txq_info); 511 adap->sge.uld_txq_info[tx_uld_type] = NULL; 512 } 513 } 514 515 static int 516 setup_sge_txq_uld(struct adapter *adap, unsigned int uld_type, 517 const struct cxgb4_uld_info *uld_info) 518 { 519 struct sge_uld_txq_info *txq_info = NULL; 520 int tx_uld_type, i; 521 522 tx_uld_type = TX_ULD(uld_type); 523 txq_info = adap->sge.uld_txq_info[tx_uld_type]; 524 525 if ((tx_uld_type == CXGB4_TX_OFLD) && txq_info && 526 (atomic_inc_return(&txq_info->users) > 1)) 527 return 0; 528 529 txq_info = kzalloc(sizeof(*txq_info), GFP_KERNEL); 530 if (!txq_info) 531 return -ENOMEM; 532 if (uld_type == CXGB4_ULD_CRYPTO) { 533 i = min_t(int, adap->vres.ncrypto_fc, 534 num_online_cpus()); 535 txq_info->ntxq = rounddown(i, adap->params.nports); 536 if (txq_info->ntxq <= 0) { 537 dev_warn(adap->pdev_dev, "Crypto Tx Queues can't be zero\n"); 538 kfree(txq_info); 539 return -EINVAL; 540 } 541 542 } else { 543 i = min_t(int, uld_info->ntxq, num_online_cpus()); 544 txq_info->ntxq = roundup(i, adap->params.nports); 545 } 546 txq_info->uldtxq = kcalloc(txq_info->ntxq, sizeof(struct sge_uld_txq), 547 GFP_KERNEL); 548 if (!txq_info->uldtxq) { 549 kfree(txq_info); 550 return -ENOMEM; 551 } 552 553 if (alloc_sge_txq_uld(adap, txq_info, tx_uld_type)) { 554 kfree(txq_info->uldtxq); 555 kfree(txq_info); 556 return -ENOMEM; 557 } 558 559 atomic_inc(&txq_info->users); 560 adap->sge.uld_txq_info[tx_uld_type] = txq_info; 561 return 0; 562 } 563 564 static void uld_queue_init(struct adapter *adap, unsigned int uld_type, 565 struct cxgb4_lld_info *lli) 566 { 567 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; 568 int tx_uld_type = TX_ULD(uld_type); 569 struct sge_uld_txq_info *txq_info = adap->sge.uld_txq_info[tx_uld_type]; 570 571 lli->rxq_ids = rxq_info->rspq_id; 572 lli->nrxq = rxq_info->nrxq; 573 lli->ciq_ids = rxq_info->rspq_id + rxq_info->nrxq; 574 lli->nciq = rxq_info->nciq; 575 lli->ntxq = txq_info->ntxq; 576 } 577 578 int t4_uld_mem_alloc(struct adapter *adap) 579 { 580 struct sge *s = &adap->sge; 581 582 adap->uld = kcalloc(CXGB4_ULD_MAX, sizeof(*adap->uld), GFP_KERNEL); 583 if (!adap->uld) 584 return -ENOMEM; 585 586 s->uld_rxq_info = kcalloc(CXGB4_ULD_MAX, 587 sizeof(struct sge_uld_rxq_info *), 588 GFP_KERNEL); 589 if (!s->uld_rxq_info) 590 goto err_uld; 591 592 s->uld_txq_info = kcalloc(CXGB4_TX_MAX, 593 sizeof(struct sge_uld_txq_info *), 594 GFP_KERNEL); 595 if (!s->uld_txq_info) 596 goto err_uld_rx; 597 return 0; 598 599 err_uld_rx: 600 kfree(s->uld_rxq_info); 601 err_uld: 602 kfree(adap->uld); 603 return -ENOMEM; 604 } 605 606 void t4_uld_mem_free(struct adapter *adap) 607 { 608 struct sge *s = &adap->sge; 609 610 kfree(s->uld_txq_info); 611 kfree(s->uld_rxq_info); 612 kfree(adap->uld); 613 } 614 615 /* This function should be called with uld_mutex taken. */ 616 static void cxgb4_shutdown_uld_adapter(struct adapter *adap, enum cxgb4_uld type) 617 { 618 if (adap->uld[type].handle) { 619 adap->uld[type].handle = NULL; 620 adap->uld[type].add = NULL; 621 release_sge_txq_uld(adap, type); 622 623 if (adap->flags & CXGB4_FULL_INIT_DONE) 624 quiesce_rx_uld(adap, type); 625 626 if (adap->flags & CXGB4_USING_MSIX) 627 free_msix_queue_irqs_uld(adap, type); 628 629 free_sge_queues_uld(adap, type); 630 free_queues_uld(adap, type); 631 } 632 } 633 634 void t4_uld_clean_up(struct adapter *adap) 635 { 636 unsigned int i; 637 638 mutex_lock(&uld_mutex); 639 for (i = 0; i < CXGB4_ULD_MAX; i++) { 640 if (!adap->uld[i].handle) 641 continue; 642 643 cxgb4_shutdown_uld_adapter(adap, i); 644 } 645 mutex_unlock(&uld_mutex); 646 } 647 648 static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld) 649 { 650 int i; 651 652 lld->pdev = adap->pdev; 653 lld->pf = adap->pf; 654 lld->l2t = adap->l2t; 655 lld->tids = &adap->tids; 656 lld->ports = adap->port; 657 lld->vr = &adap->vres; 658 lld->mtus = adap->params.mtus; 659 lld->nchan = adap->params.nports; 660 lld->nports = adap->params.nports; 661 lld->wr_cred = adap->params.ofldq_wr_cred; 662 lld->crypto = adap->params.crypto; 663 lld->iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A)); 664 lld->iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A); 665 lld->iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A); 666 lld->iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A); 667 lld->iscsi_ppm = &adap->iscsi_ppm; 668 lld->adapter_type = adap->params.chip; 669 lld->cclk_ps = 1000000000 / adap->params.vpd.cclk; 670 lld->udb_density = 1 << adap->params.sge.eq_qpp; 671 lld->ucq_density = 1 << adap->params.sge.iq_qpp; 672 lld->sge_host_page_size = 1 << (adap->params.sge.hps + 10); 673 lld->filt_mode = adap->params.tp.vlan_pri_map; 674 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ 675 for (i = 0; i < NCHAN; i++) 676 lld->tx_modq[i] = i; 677 lld->gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A); 678 lld->db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A); 679 lld->fw_vers = adap->params.fw_vers; 680 lld->dbfifo_int_thresh = dbfifo_int_thresh; 681 lld->sge_ingpadboundary = adap->sge.fl_align; 682 lld->sge_egrstatuspagesize = adap->sge.stat_len; 683 lld->sge_pktshift = adap->sge.pktshift; 684 lld->ulp_crypto = adap->params.crypto; 685 lld->enable_fw_ofld_conn = adap->flags & CXGB4_FW_OFLD_CONN; 686 lld->max_ordird_qp = adap->params.max_ordird_qp; 687 lld->max_ird_adapter = adap->params.max_ird_adapter; 688 lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl; 689 lld->nodeid = dev_to_node(adap->pdev_dev); 690 lld->fr_nsmr_tpte_wr_support = adap->params.fr_nsmr_tpte_wr_support; 691 lld->write_w_imm_support = adap->params.write_w_imm_support; 692 lld->write_cmpl_support = adap->params.write_cmpl_support; 693 } 694 695 static void uld_attach(struct adapter *adap, unsigned int uld) 696 { 697 void *handle; 698 struct cxgb4_lld_info lli; 699 700 uld_init(adap, &lli); 701 uld_queue_init(adap, uld, &lli); 702 703 handle = adap->uld[uld].add(&lli); 704 if (IS_ERR(handle)) { 705 dev_warn(adap->pdev_dev, 706 "could not attach to the %s driver, error %ld\n", 707 adap->uld[uld].name, PTR_ERR(handle)); 708 return; 709 } 710 711 adap->uld[uld].handle = handle; 712 t4_register_netevent_notifier(); 713 714 if (adap->flags & CXGB4_FULL_INIT_DONE) 715 adap->uld[uld].state_change(handle, CXGB4_STATE_UP); 716 } 717 718 /** 719 * cxgb4_register_uld - register an upper-layer driver 720 * @type: the ULD type 721 * @p: the ULD methods 722 * 723 * Registers an upper-layer driver with this driver and notifies the ULD 724 * about any presently available devices that support its type. Returns 725 * %-EBUSY if a ULD of the same type is already registered. 726 */ 727 void cxgb4_register_uld(enum cxgb4_uld type, 728 const struct cxgb4_uld_info *p) 729 { 730 int ret = 0; 731 struct adapter *adap; 732 733 if (type >= CXGB4_ULD_MAX) 734 return; 735 736 mutex_lock(&uld_mutex); 737 list_for_each_entry(adap, &adapter_list, list_node) { 738 if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) || 739 (type != CXGB4_ULD_CRYPTO && !is_offload(adap))) 740 continue; 741 if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip)) 742 continue; 743 ret = cfg_queues_uld(adap, type, p); 744 if (ret) 745 goto out; 746 ret = setup_sge_queues_uld(adap, type, p->lro); 747 if (ret) 748 goto free_queues; 749 if (adap->flags & CXGB4_USING_MSIX) { 750 name_msix_vecs_uld(adap, type); 751 ret = request_msix_queue_irqs_uld(adap, type); 752 if (ret) 753 goto free_rxq; 754 } 755 if (adap->flags & CXGB4_FULL_INIT_DONE) 756 enable_rx_uld(adap, type); 757 if (adap->uld[type].add) 758 goto free_irq; 759 ret = setup_sge_txq_uld(adap, type, p); 760 if (ret) 761 goto free_irq; 762 adap->uld[type] = *p; 763 uld_attach(adap, type); 764 continue; 765 free_irq: 766 if (adap->flags & CXGB4_FULL_INIT_DONE) 767 quiesce_rx_uld(adap, type); 768 if (adap->flags & CXGB4_USING_MSIX) 769 free_msix_queue_irqs_uld(adap, type); 770 free_rxq: 771 free_sge_queues_uld(adap, type); 772 free_queues: 773 free_queues_uld(adap, type); 774 out: 775 dev_warn(adap->pdev_dev, 776 "ULD registration failed for uld type %d\n", type); 777 } 778 mutex_unlock(&uld_mutex); 779 return; 780 } 781 EXPORT_SYMBOL(cxgb4_register_uld); 782 783 /** 784 * cxgb4_unregister_uld - unregister an upper-layer driver 785 * @type: the ULD type 786 * 787 * Unregisters an existing upper-layer driver. 788 */ 789 int cxgb4_unregister_uld(enum cxgb4_uld type) 790 { 791 struct adapter *adap; 792 793 if (type >= CXGB4_ULD_MAX) 794 return -EINVAL; 795 796 mutex_lock(&uld_mutex); 797 list_for_each_entry(adap, &adapter_list, list_node) { 798 if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) || 799 (type != CXGB4_ULD_CRYPTO && !is_offload(adap))) 800 continue; 801 if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip)) 802 continue; 803 804 cxgb4_shutdown_uld_adapter(adap, type); 805 } 806 mutex_unlock(&uld_mutex); 807 808 return 0; 809 } 810 EXPORT_SYMBOL(cxgb4_unregister_uld); 811