1 /* 2 * cxgb4_uld.c:Chelsio Upper Layer Driver Interface for T4/T5/T6 SGE management 3 * 4 * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 * 34 * Written by: Atul Gupta (atul.gupta@chelsio.com) 35 * Written by: Hariprasad Shenai (hariprasad@chelsio.com) 36 */ 37 38 #include <linux/kernel.h> 39 #include <linux/module.h> 40 #include <linux/errno.h> 41 #include <linux/types.h> 42 #include <linux/debugfs.h> 43 #include <linux/export.h> 44 #include <linux/list.h> 45 #include <linux/skbuff.h> 46 #include <linux/pci.h> 47 48 #include "cxgb4.h" 49 #include "cxgb4_uld.h" 50 #include "t4_regs.h" 51 #include "t4fw_api.h" 52 #include "t4_msg.h" 53 54 #define for_each_uldrxq(m, i) for (i = 0; i < ((m)->nrxq + (m)->nciq); i++) 55 56 /* Flush the aggregated lro sessions */ 57 static void uldrx_flush_handler(struct sge_rspq *q) 58 { 59 struct adapter *adap = q->adap; 60 61 if (adap->uld[q->uld].lro_flush) 62 adap->uld[q->uld].lro_flush(&q->lro_mgr); 63 } 64 65 /** 66 * uldrx_handler - response queue handler for ULD queues 67 * @q: the response queue that received the packet 68 * @rsp: the response queue descriptor holding the offload message 69 * @gl: the gather list of packet fragments 70 * 71 * Deliver an ingress offload packet to a ULD. All processing is done by 72 * the ULD, we just maintain statistics. 73 */ 74 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp, 75 const struct pkt_gl *gl) 76 { 77 struct adapter *adap = q->adap; 78 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq); 79 int ret; 80 81 /* FW can send CPLs encapsulated in a CPL_FW4_MSG */ 82 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG && 83 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL) 84 rsp += 2; 85 86 if (q->flush_handler) 87 ret = adap->uld[q->uld].lro_rx_handler(adap->uld[q->uld].handle, 88 rsp, gl, &q->lro_mgr, 89 &q->napi); 90 else 91 ret = adap->uld[q->uld].rx_handler(adap->uld[q->uld].handle, 92 rsp, gl); 93 94 if (ret) { 95 rxq->stats.nomem++; 96 return -1; 97 } 98 99 if (!gl) 100 rxq->stats.imm++; 101 else if (gl == CXGB4_MSG_AN) 102 rxq->stats.an++; 103 else 104 rxq->stats.pkts++; 105 return 0; 106 } 107 108 static int alloc_uld_rxqs(struct adapter *adap, 109 struct sge_uld_rxq_info *rxq_info, bool lro) 110 { 111 unsigned int nq = rxq_info->nrxq + rxq_info->nciq; 112 struct sge_ofld_rxq *q = rxq_info->uldrxq; 113 unsigned short *ids = rxq_info->rspq_id; 114 int i, err, msi_idx, que_idx = 0; 115 struct sge *s = &adap->sge; 116 unsigned int per_chan; 117 118 per_chan = rxq_info->nrxq / adap->params.nports; 119 120 if (adap->flags & CXGB4_USING_MSIX) 121 msi_idx = 1; 122 else 123 msi_idx = -((int)s->intrq.abs_id + 1); 124 125 for (i = 0; i < nq; i++, q++) { 126 if (i == rxq_info->nrxq) { 127 /* start allocation of concentrator queues */ 128 per_chan = rxq_info->nciq / adap->params.nports; 129 que_idx = 0; 130 } 131 132 if (msi_idx >= 0) { 133 msi_idx = cxgb4_get_msix_idx_from_bmap(adap); 134 if (msi_idx < 0) { 135 err = -ENOSPC; 136 goto freeout; 137 } 138 139 snprintf(adap->msix_info[msi_idx].desc, 140 sizeof(adap->msix_info[msi_idx].desc), 141 "%s-%s%d", 142 adap->port[0]->name, rxq_info->name, i); 143 144 q->msix = &adap->msix_info[msi_idx]; 145 } 146 err = t4_sge_alloc_rxq(adap, &q->rspq, false, 147 adap->port[que_idx++ / per_chan], 148 msi_idx, 149 q->fl.size ? &q->fl : NULL, 150 uldrx_handler, 151 lro ? uldrx_flush_handler : NULL, 152 0); 153 if (err) 154 goto freeout; 155 156 memset(&q->stats, 0, sizeof(q->stats)); 157 if (ids) 158 ids[i] = q->rspq.abs_id; 159 } 160 return 0; 161 freeout: 162 q = rxq_info->uldrxq; 163 for ( ; i; i--, q++) { 164 if (q->rspq.desc) 165 free_rspq_fl(adap, &q->rspq, 166 q->fl.size ? &q->fl : NULL); 167 if (q->msix) 168 cxgb4_free_msix_idx_in_bmap(adap, q->msix->idx); 169 } 170 return err; 171 } 172 173 static int 174 setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro) 175 { 176 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; 177 int i, ret; 178 179 ret = alloc_uld_rxqs(adap, rxq_info, lro); 180 if (ret) 181 return ret; 182 183 /* Tell uP to route control queue completions to rdma rspq */ 184 if (adap->flags & CXGB4_FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) { 185 struct sge *s = &adap->sge; 186 unsigned int cmplqid; 187 u32 param, cmdop; 188 189 cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL; 190 for_each_port(adap, i) { 191 cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id; 192 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | 193 FW_PARAMS_PARAM_X_V(cmdop) | 194 FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id)); 195 ret = t4_set_params(adap, adap->mbox, adap->pf, 196 0, 1, ¶m, &cmplqid); 197 } 198 } 199 return ret; 200 } 201 202 static void t4_free_uld_rxqs(struct adapter *adap, int n, 203 struct sge_ofld_rxq *q) 204 { 205 for ( ; n; n--, q++) { 206 if (q->rspq.desc) 207 free_rspq_fl(adap, &q->rspq, 208 q->fl.size ? &q->fl : NULL); 209 } 210 } 211 212 static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type) 213 { 214 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; 215 216 if (adap->flags & CXGB4_FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) { 217 struct sge *s = &adap->sge; 218 u32 param, cmdop, cmplqid = 0; 219 int i; 220 221 cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL; 222 for_each_port(adap, i) { 223 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | 224 FW_PARAMS_PARAM_X_V(cmdop) | 225 FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id)); 226 t4_set_params(adap, adap->mbox, adap->pf, 227 0, 1, ¶m, &cmplqid); 228 } 229 } 230 231 if (rxq_info->nciq) 232 t4_free_uld_rxqs(adap, rxq_info->nciq, 233 rxq_info->uldrxq + rxq_info->nrxq); 234 t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq); 235 } 236 237 static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type, 238 const struct cxgb4_uld_info *uld_info) 239 { 240 struct sge *s = &adap->sge; 241 struct sge_uld_rxq_info *rxq_info; 242 int i, nrxq, ciq_size; 243 244 rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL); 245 if (!rxq_info) 246 return -ENOMEM; 247 248 if (adap->flags & CXGB4_USING_MSIX && uld_info->nrxq > s->nqs_per_uld) { 249 i = s->nqs_per_uld; 250 rxq_info->nrxq = roundup(i, adap->params.nports); 251 } else { 252 i = min_t(int, uld_info->nrxq, 253 num_online_cpus()); 254 rxq_info->nrxq = roundup(i, adap->params.nports); 255 } 256 if (!uld_info->ciq) { 257 rxq_info->nciq = 0; 258 } else { 259 if (adap->flags & CXGB4_USING_MSIX) 260 rxq_info->nciq = min_t(int, s->nqs_per_uld, 261 num_online_cpus()); 262 else 263 rxq_info->nciq = min_t(int, MAX_OFLD_QSETS, 264 num_online_cpus()); 265 rxq_info->nciq = ((rxq_info->nciq / adap->params.nports) * 266 adap->params.nports); 267 rxq_info->nciq = max_t(int, rxq_info->nciq, 268 adap->params.nports); 269 } 270 271 nrxq = rxq_info->nrxq + rxq_info->nciq; /* total rxq's */ 272 rxq_info->uldrxq = kcalloc(nrxq, sizeof(struct sge_ofld_rxq), 273 GFP_KERNEL); 274 if (!rxq_info->uldrxq) { 275 kfree(rxq_info); 276 return -ENOMEM; 277 } 278 279 rxq_info->rspq_id = kcalloc(nrxq, sizeof(unsigned short), GFP_KERNEL); 280 if (!rxq_info->rspq_id) { 281 kfree(rxq_info->uldrxq); 282 kfree(rxq_info); 283 return -ENOMEM; 284 } 285 286 for (i = 0; i < rxq_info->nrxq; i++) { 287 struct sge_ofld_rxq *r = &rxq_info->uldrxq[i]; 288 289 init_rspq(adap, &r->rspq, 5, 1, uld_info->rxq_size, 64); 290 r->rspq.uld = uld_type; 291 r->fl.size = 72; 292 } 293 294 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids; 295 if (ciq_size > SGE_MAX_IQ_SIZE) { 296 dev_warn(adap->pdev_dev, "CIQ size too small for available IQs\n"); 297 ciq_size = SGE_MAX_IQ_SIZE; 298 } 299 300 for (i = rxq_info->nrxq; i < nrxq; i++) { 301 struct sge_ofld_rxq *r = &rxq_info->uldrxq[i]; 302 303 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64); 304 r->rspq.uld = uld_type; 305 } 306 307 memcpy(rxq_info->name, uld_info->name, IFNAMSIZ); 308 adap->sge.uld_rxq_info[uld_type] = rxq_info; 309 310 return 0; 311 } 312 313 static void free_queues_uld(struct adapter *adap, unsigned int uld_type) 314 { 315 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; 316 317 adap->sge.uld_rxq_info[uld_type] = NULL; 318 kfree(rxq_info->rspq_id); 319 kfree(rxq_info->uldrxq); 320 kfree(rxq_info); 321 } 322 323 static int 324 request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type) 325 { 326 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; 327 struct msix_info *minfo; 328 unsigned int idx; 329 int err = 0; 330 331 for_each_uldrxq(rxq_info, idx) { 332 minfo = rxq_info->uldrxq[idx].msix; 333 err = request_irq(minfo->vec, 334 t4_sge_intr_msix, 0, 335 minfo->desc, 336 &rxq_info->uldrxq[idx].rspq); 337 if (err) 338 goto unwind; 339 340 cxgb4_set_msix_aff(adap, minfo->vec, 341 &minfo->aff_mask, idx); 342 } 343 return 0; 344 345 unwind: 346 while (idx-- > 0) { 347 minfo = rxq_info->uldrxq[idx].msix; 348 cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask); 349 cxgb4_free_msix_idx_in_bmap(adap, minfo->idx); 350 free_irq(minfo->vec, &rxq_info->uldrxq[idx].rspq); 351 } 352 return err; 353 } 354 355 static void 356 free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type) 357 { 358 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; 359 struct msix_info *minfo; 360 unsigned int idx; 361 362 for_each_uldrxq(rxq_info, idx) { 363 minfo = rxq_info->uldrxq[idx].msix; 364 cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask); 365 cxgb4_free_msix_idx_in_bmap(adap, minfo->idx); 366 free_irq(minfo->vec, &rxq_info->uldrxq[idx].rspq); 367 } 368 } 369 370 static void enable_rx_uld(struct adapter *adap, unsigned int uld_type) 371 { 372 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; 373 int idx; 374 375 for_each_uldrxq(rxq_info, idx) { 376 struct sge_rspq *q = &rxq_info->uldrxq[idx].rspq; 377 378 if (!q) 379 continue; 380 381 cxgb4_enable_rx(adap, q); 382 } 383 } 384 385 static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type) 386 { 387 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; 388 int idx; 389 390 for_each_uldrxq(rxq_info, idx) { 391 struct sge_rspq *q = &rxq_info->uldrxq[idx].rspq; 392 393 if (!q) 394 continue; 395 396 cxgb4_quiesce_rx(q); 397 } 398 } 399 400 static void 401 free_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info) 402 { 403 int nq = txq_info->ntxq; 404 int i; 405 406 for (i = 0; i < nq; i++) { 407 struct sge_uld_txq *txq = &txq_info->uldtxq[i]; 408 409 if (txq && txq->q.desc) { 410 tasklet_kill(&txq->qresume_tsk); 411 t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0, 412 txq->q.cntxt_id); 413 free_tx_desc(adap, &txq->q, txq->q.in_use, false); 414 kfree(txq->q.sdesc); 415 __skb_queue_purge(&txq->sendq); 416 free_txq(adap, &txq->q); 417 } 418 } 419 } 420 421 static int 422 alloc_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info, 423 unsigned int uld_type) 424 { 425 struct sge *s = &adap->sge; 426 int nq = txq_info->ntxq; 427 int i, j, err; 428 429 j = nq / adap->params.nports; 430 for (i = 0; i < nq; i++) { 431 struct sge_uld_txq *txq = &txq_info->uldtxq[i]; 432 433 txq->q.size = 1024; 434 err = t4_sge_alloc_uld_txq(adap, txq, adap->port[i / j], 435 s->fw_evtq.cntxt_id, uld_type); 436 if (err) 437 goto freeout; 438 } 439 return 0; 440 freeout: 441 free_sge_txq_uld(adap, txq_info); 442 return err; 443 } 444 445 static void 446 release_sge_txq_uld(struct adapter *adap, unsigned int uld_type) 447 { 448 struct sge_uld_txq_info *txq_info = NULL; 449 int tx_uld_type = TX_ULD(uld_type); 450 451 txq_info = adap->sge.uld_txq_info[tx_uld_type]; 452 453 if (txq_info && atomic_dec_and_test(&txq_info->users)) { 454 free_sge_txq_uld(adap, txq_info); 455 kfree(txq_info->uldtxq); 456 kfree(txq_info); 457 adap->sge.uld_txq_info[tx_uld_type] = NULL; 458 } 459 } 460 461 static int 462 setup_sge_txq_uld(struct adapter *adap, unsigned int uld_type, 463 const struct cxgb4_uld_info *uld_info) 464 { 465 struct sge_uld_txq_info *txq_info = NULL; 466 int tx_uld_type, i; 467 468 tx_uld_type = TX_ULD(uld_type); 469 txq_info = adap->sge.uld_txq_info[tx_uld_type]; 470 471 if ((tx_uld_type == CXGB4_TX_OFLD) && txq_info && 472 (atomic_inc_return(&txq_info->users) > 1)) 473 return 0; 474 475 txq_info = kzalloc(sizeof(*txq_info), GFP_KERNEL); 476 if (!txq_info) 477 return -ENOMEM; 478 if (uld_type == CXGB4_ULD_CRYPTO) { 479 i = min_t(int, adap->vres.ncrypto_fc, 480 num_online_cpus()); 481 txq_info->ntxq = rounddown(i, adap->params.nports); 482 if (txq_info->ntxq <= 0) { 483 dev_warn(adap->pdev_dev, "Crypto Tx Queues can't be zero\n"); 484 kfree(txq_info); 485 return -EINVAL; 486 } 487 488 } else { 489 i = min_t(int, uld_info->ntxq, num_online_cpus()); 490 txq_info->ntxq = roundup(i, adap->params.nports); 491 } 492 txq_info->uldtxq = kcalloc(txq_info->ntxq, sizeof(struct sge_uld_txq), 493 GFP_KERNEL); 494 if (!txq_info->uldtxq) { 495 kfree(txq_info); 496 return -ENOMEM; 497 } 498 499 if (alloc_sge_txq_uld(adap, txq_info, tx_uld_type)) { 500 kfree(txq_info->uldtxq); 501 kfree(txq_info); 502 return -ENOMEM; 503 } 504 505 atomic_inc(&txq_info->users); 506 adap->sge.uld_txq_info[tx_uld_type] = txq_info; 507 return 0; 508 } 509 510 static void uld_queue_init(struct adapter *adap, unsigned int uld_type, 511 struct cxgb4_lld_info *lli) 512 { 513 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; 514 int tx_uld_type = TX_ULD(uld_type); 515 struct sge_uld_txq_info *txq_info = adap->sge.uld_txq_info[tx_uld_type]; 516 517 lli->rxq_ids = rxq_info->rspq_id; 518 lli->nrxq = rxq_info->nrxq; 519 lli->ciq_ids = rxq_info->rspq_id + rxq_info->nrxq; 520 lli->nciq = rxq_info->nciq; 521 lli->ntxq = txq_info->ntxq; 522 } 523 524 int t4_uld_mem_alloc(struct adapter *adap) 525 { 526 struct sge *s = &adap->sge; 527 528 adap->uld = kcalloc(CXGB4_ULD_MAX, sizeof(*adap->uld), GFP_KERNEL); 529 if (!adap->uld) 530 return -ENOMEM; 531 532 s->uld_rxq_info = kcalloc(CXGB4_ULD_MAX, 533 sizeof(struct sge_uld_rxq_info *), 534 GFP_KERNEL); 535 if (!s->uld_rxq_info) 536 goto err_uld; 537 538 s->uld_txq_info = kcalloc(CXGB4_TX_MAX, 539 sizeof(struct sge_uld_txq_info *), 540 GFP_KERNEL); 541 if (!s->uld_txq_info) 542 goto err_uld_rx; 543 return 0; 544 545 err_uld_rx: 546 kfree(s->uld_rxq_info); 547 err_uld: 548 kfree(adap->uld); 549 return -ENOMEM; 550 } 551 552 void t4_uld_mem_free(struct adapter *adap) 553 { 554 struct sge *s = &adap->sge; 555 556 kfree(s->uld_txq_info); 557 kfree(s->uld_rxq_info); 558 kfree(adap->uld); 559 } 560 561 /* This function should be called with uld_mutex taken. */ 562 static void cxgb4_shutdown_uld_adapter(struct adapter *adap, enum cxgb4_uld type) 563 { 564 if (adap->uld[type].handle) { 565 adap->uld[type].handle = NULL; 566 adap->uld[type].add = NULL; 567 release_sge_txq_uld(adap, type); 568 569 if (adap->flags & CXGB4_FULL_INIT_DONE) 570 quiesce_rx_uld(adap, type); 571 572 if (adap->flags & CXGB4_USING_MSIX) 573 free_msix_queue_irqs_uld(adap, type); 574 575 free_sge_queues_uld(adap, type); 576 free_queues_uld(adap, type); 577 } 578 } 579 580 void t4_uld_clean_up(struct adapter *adap) 581 { 582 unsigned int i; 583 584 mutex_lock(&uld_mutex); 585 for (i = 0; i < CXGB4_ULD_MAX; i++) { 586 if (!adap->uld[i].handle) 587 continue; 588 589 cxgb4_shutdown_uld_adapter(adap, i); 590 } 591 mutex_unlock(&uld_mutex); 592 } 593 594 static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld) 595 { 596 int i; 597 598 lld->pdev = adap->pdev; 599 lld->pf = adap->pf; 600 lld->l2t = adap->l2t; 601 lld->tids = &adap->tids; 602 lld->ports = adap->port; 603 lld->vr = &adap->vres; 604 lld->mtus = adap->params.mtus; 605 lld->nchan = adap->params.nports; 606 lld->nports = adap->params.nports; 607 lld->wr_cred = adap->params.ofldq_wr_cred; 608 lld->crypto = adap->params.crypto; 609 lld->iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A)); 610 lld->iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A); 611 lld->iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A); 612 lld->iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A); 613 lld->iscsi_ppm = &adap->iscsi_ppm; 614 lld->adapter_type = adap->params.chip; 615 lld->cclk_ps = 1000000000 / adap->params.vpd.cclk; 616 lld->udb_density = 1 << adap->params.sge.eq_qpp; 617 lld->ucq_density = 1 << adap->params.sge.iq_qpp; 618 lld->sge_host_page_size = 1 << (adap->params.sge.hps + 10); 619 lld->filt_mode = adap->params.tp.vlan_pri_map; 620 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ 621 for (i = 0; i < NCHAN; i++) 622 lld->tx_modq[i] = i; 623 lld->gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A); 624 lld->db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A); 625 lld->fw_vers = adap->params.fw_vers; 626 lld->dbfifo_int_thresh = dbfifo_int_thresh; 627 lld->sge_ingpadboundary = adap->sge.fl_align; 628 lld->sge_egrstatuspagesize = adap->sge.stat_len; 629 lld->sge_pktshift = adap->sge.pktshift; 630 lld->ulp_crypto = adap->params.crypto; 631 lld->enable_fw_ofld_conn = adap->flags & CXGB4_FW_OFLD_CONN; 632 lld->max_ordird_qp = adap->params.max_ordird_qp; 633 lld->max_ird_adapter = adap->params.max_ird_adapter; 634 lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl; 635 lld->nodeid = dev_to_node(adap->pdev_dev); 636 lld->fr_nsmr_tpte_wr_support = adap->params.fr_nsmr_tpte_wr_support; 637 lld->write_w_imm_support = adap->params.write_w_imm_support; 638 lld->write_cmpl_support = adap->params.write_cmpl_support; 639 } 640 641 static int uld_attach(struct adapter *adap, unsigned int uld) 642 { 643 struct cxgb4_lld_info lli; 644 void *handle; 645 646 uld_init(adap, &lli); 647 uld_queue_init(adap, uld, &lli); 648 649 handle = adap->uld[uld].add(&lli); 650 if (IS_ERR(handle)) { 651 dev_warn(adap->pdev_dev, 652 "could not attach to the %s driver, error %ld\n", 653 adap->uld[uld].name, PTR_ERR(handle)); 654 return PTR_ERR(handle); 655 } 656 657 adap->uld[uld].handle = handle; 658 t4_register_netevent_notifier(); 659 660 if (adap->flags & CXGB4_FULL_INIT_DONE) 661 adap->uld[uld].state_change(handle, CXGB4_STATE_UP); 662 663 return 0; 664 } 665 666 #ifdef CONFIG_CHELSIO_TLS_DEVICE 667 static bool cxgb4_uld_in_use(struct adapter *adap) 668 { 669 const struct tid_info *t = &adap->tids; 670 671 return (atomic_read(&t->conns_in_use) || t->stids_in_use); 672 } 673 674 /* cxgb4_set_ktls_feature: request FW to enable/disable ktls settings. 675 * @adap: adapter info 676 * @enable: 1 to enable / 0 to disable ktls settings. 677 */ 678 int cxgb4_set_ktls_feature(struct adapter *adap, bool enable) 679 { 680 int ret = 0; 681 u32 params = 682 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | 683 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_KTLS_HW) | 684 FW_PARAMS_PARAM_Y_V(enable) | 685 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_KTLS_HW_USER_ENABLE); 686 687 if (enable) { 688 if (!refcount_read(&adap->chcr_ktls.ktls_refcount)) { 689 /* At this moment if ULD connection are up means, other 690 * ULD is/are already active, return failure. 691 */ 692 if (cxgb4_uld_in_use(adap)) { 693 dev_warn(adap->pdev_dev, 694 "ULD connections (tid/stid) active. Can't enable kTLS\n"); 695 return -EINVAL; 696 } 697 ret = t4_set_params(adap, adap->mbox, adap->pf, 698 0, 1, ¶ms, ¶ms); 699 if (ret) 700 return ret; 701 refcount_set(&adap->chcr_ktls.ktls_refcount, 1); 702 pr_info("kTLS has been enabled. Restrictions placed on ULD support\n"); 703 } else { 704 /* ktls settings already up, just increment refcount. */ 705 refcount_inc(&adap->chcr_ktls.ktls_refcount); 706 } 707 } else { 708 /* return failure if refcount is already 0. */ 709 if (!refcount_read(&adap->chcr_ktls.ktls_refcount)) 710 return -EINVAL; 711 /* decrement refcount and test, if 0, disable ktls feature, 712 * else return command success. 713 */ 714 if (refcount_dec_and_test(&adap->chcr_ktls.ktls_refcount)) { 715 ret = t4_set_params(adap, adap->mbox, adap->pf, 716 0, 1, ¶ms, ¶ms); 717 if (ret) 718 return ret; 719 pr_info("kTLS is disabled. Restrictions on ULD support removed\n"); 720 } 721 } 722 723 return ret; 724 } 725 #endif 726 727 static void cxgb4_uld_alloc_resources(struct adapter *adap, 728 enum cxgb4_uld type, 729 const struct cxgb4_uld_info *p) 730 { 731 int ret = 0; 732 733 if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) || 734 (type != CXGB4_ULD_CRYPTO && !is_offload(adap))) 735 return; 736 if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip)) 737 return; 738 ret = cfg_queues_uld(adap, type, p); 739 if (ret) 740 goto out; 741 ret = setup_sge_queues_uld(adap, type, p->lro); 742 if (ret) 743 goto free_queues; 744 if (adap->flags & CXGB4_USING_MSIX) { 745 ret = request_msix_queue_irqs_uld(adap, type); 746 if (ret) 747 goto free_rxq; 748 } 749 if (adap->flags & CXGB4_FULL_INIT_DONE) 750 enable_rx_uld(adap, type); 751 if (adap->uld[type].add) 752 goto free_irq; 753 ret = setup_sge_txq_uld(adap, type, p); 754 if (ret) 755 goto free_irq; 756 adap->uld[type] = *p; 757 ret = uld_attach(adap, type); 758 if (ret) 759 goto free_txq; 760 return; 761 free_txq: 762 release_sge_txq_uld(adap, type); 763 free_irq: 764 if (adap->flags & CXGB4_FULL_INIT_DONE) 765 quiesce_rx_uld(adap, type); 766 if (adap->flags & CXGB4_USING_MSIX) 767 free_msix_queue_irqs_uld(adap, type); 768 free_rxq: 769 free_sge_queues_uld(adap, type); 770 free_queues: 771 free_queues_uld(adap, type); 772 out: 773 dev_warn(adap->pdev_dev, 774 "ULD registration failed for uld type %d\n", type); 775 } 776 777 void cxgb4_uld_enable(struct adapter *adap) 778 { 779 struct cxgb4_uld_list *uld_entry; 780 781 mutex_lock(&uld_mutex); 782 list_add_tail(&adap->list_node, &adapter_list); 783 list_for_each_entry(uld_entry, &uld_list, list_node) 784 cxgb4_uld_alloc_resources(adap, uld_entry->uld_type, 785 &uld_entry->uld_info); 786 mutex_unlock(&uld_mutex); 787 } 788 789 /* cxgb4_register_uld - register an upper-layer driver 790 * @type: the ULD type 791 * @p: the ULD methods 792 * 793 * Registers an upper-layer driver with this driver and notifies the ULD 794 * about any presently available devices that support its type. 795 */ 796 void cxgb4_register_uld(enum cxgb4_uld type, 797 const struct cxgb4_uld_info *p) 798 { 799 struct cxgb4_uld_list *uld_entry; 800 struct adapter *adap; 801 802 if (type >= CXGB4_ULD_MAX) 803 return; 804 805 uld_entry = kzalloc(sizeof(*uld_entry), GFP_KERNEL); 806 if (!uld_entry) 807 return; 808 809 memcpy(&uld_entry->uld_info, p, sizeof(struct cxgb4_uld_info)); 810 mutex_lock(&uld_mutex); 811 list_for_each_entry(adap, &adapter_list, list_node) 812 cxgb4_uld_alloc_resources(adap, type, p); 813 814 uld_entry->uld_type = type; 815 list_add_tail(&uld_entry->list_node, &uld_list); 816 mutex_unlock(&uld_mutex); 817 return; 818 } 819 EXPORT_SYMBOL(cxgb4_register_uld); 820 821 /** 822 * cxgb4_unregister_uld - unregister an upper-layer driver 823 * @type: the ULD type 824 * 825 * Unregisters an existing upper-layer driver. 826 */ 827 int cxgb4_unregister_uld(enum cxgb4_uld type) 828 { 829 struct cxgb4_uld_list *uld_entry, *tmp; 830 struct adapter *adap; 831 832 if (type >= CXGB4_ULD_MAX) 833 return -EINVAL; 834 835 mutex_lock(&uld_mutex); 836 list_for_each_entry(adap, &adapter_list, list_node) { 837 if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) || 838 (type != CXGB4_ULD_CRYPTO && !is_offload(adap))) 839 continue; 840 if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip)) 841 continue; 842 843 cxgb4_shutdown_uld_adapter(adap, type); 844 } 845 846 list_for_each_entry_safe(uld_entry, tmp, &uld_list, list_node) { 847 if (uld_entry->uld_type == type) { 848 list_del(&uld_entry->list_node); 849 kfree(uld_entry); 850 } 851 } 852 mutex_unlock(&uld_mutex); 853 854 return 0; 855 } 856 EXPORT_SYMBOL(cxgb4_unregister_uld); 857