1 /* 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/module.h> 34 #include <rdma/ib_umem.h> 35 #include <rdma/ib_cache.h> 36 #include <rdma/ib_user_verbs.h> 37 #include "mlx5_ib.h" 38 #include "user.h" 39 40 /* not supported currently */ 41 static int wq_signature; 42 43 enum { 44 MLX5_IB_ACK_REQ_FREQ = 8, 45 }; 46 47 enum { 48 MLX5_IB_DEFAULT_SCHED_QUEUE = 0x83, 49 MLX5_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f, 50 MLX5_IB_LINK_TYPE_IB = 0, 51 MLX5_IB_LINK_TYPE_ETH = 1 52 }; 53 54 enum { 55 MLX5_IB_SQ_STRIDE = 6, 56 MLX5_IB_CACHE_LINE_SIZE = 64, 57 }; 58 59 static const u32 mlx5_ib_opcode[] = { 60 [IB_WR_SEND] = MLX5_OPCODE_SEND, 61 [IB_WR_LSO] = MLX5_OPCODE_LSO, 62 [IB_WR_SEND_WITH_IMM] = MLX5_OPCODE_SEND_IMM, 63 [IB_WR_RDMA_WRITE] = MLX5_OPCODE_RDMA_WRITE, 64 [IB_WR_RDMA_WRITE_WITH_IMM] = MLX5_OPCODE_RDMA_WRITE_IMM, 65 [IB_WR_RDMA_READ] = MLX5_OPCODE_RDMA_READ, 66 [IB_WR_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_CS, 67 [IB_WR_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_FA, 68 [IB_WR_SEND_WITH_INV] = MLX5_OPCODE_SEND_INVAL, 69 [IB_WR_LOCAL_INV] = MLX5_OPCODE_UMR, 70 [IB_WR_REG_MR] = MLX5_OPCODE_UMR, 71 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_MASKED_CS, 72 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_MASKED_FA, 73 [MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR, 74 }; 75 76 struct mlx5_wqe_eth_pad { 77 u8 rsvd0[16]; 78 }; 79 80 static int is_qp0(enum ib_qp_type qp_type) 81 { 82 return qp_type == IB_QPT_SMI; 83 } 84 85 static int is_sqp(enum ib_qp_type qp_type) 86 { 87 return is_qp0(qp_type) || is_qp1(qp_type); 88 } 89 90 static void *get_wqe(struct mlx5_ib_qp *qp, int offset) 91 { 92 return mlx5_buf_offset(&qp->buf, offset); 93 } 94 95 static void *get_recv_wqe(struct mlx5_ib_qp *qp, int n) 96 { 97 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); 98 } 99 100 void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n) 101 { 102 return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE)); 103 } 104 105 /** 106 * mlx5_ib_read_user_wqe() - Copy a user-space WQE to kernel space. 107 * 108 * @qp: QP to copy from. 109 * @send: copy from the send queue when non-zero, use the receive queue 110 * otherwise. 111 * @wqe_index: index to start copying from. For send work queues, the 112 * wqe_index is in units of MLX5_SEND_WQE_BB. 113 * For receive work queue, it is the number of work queue 114 * element in the queue. 115 * @buffer: destination buffer. 116 * @length: maximum number of bytes to copy. 117 * 118 * Copies at least a single WQE, but may copy more data. 119 * 120 * Return: the number of bytes copied, or an error code. 121 */ 122 int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index, 123 void *buffer, u32 length, 124 struct mlx5_ib_qp_base *base) 125 { 126 struct ib_device *ibdev = qp->ibqp.device; 127 struct mlx5_ib_dev *dev = to_mdev(ibdev); 128 struct mlx5_ib_wq *wq = send ? &qp->sq : &qp->rq; 129 size_t offset; 130 size_t wq_end; 131 struct ib_umem *umem = base->ubuffer.umem; 132 u32 first_copy_length; 133 int wqe_length; 134 int ret; 135 136 if (wq->wqe_cnt == 0) { 137 mlx5_ib_dbg(dev, "mlx5_ib_read_user_wqe for a QP with wqe_cnt == 0. qp_type: 0x%x\n", 138 qp->ibqp.qp_type); 139 return -EINVAL; 140 } 141 142 offset = wq->offset + ((wqe_index % wq->wqe_cnt) << wq->wqe_shift); 143 wq_end = wq->offset + (wq->wqe_cnt << wq->wqe_shift); 144 145 if (send && length < sizeof(struct mlx5_wqe_ctrl_seg)) 146 return -EINVAL; 147 148 if (offset > umem->length || 149 (send && offset + sizeof(struct mlx5_wqe_ctrl_seg) > umem->length)) 150 return -EINVAL; 151 152 first_copy_length = min_t(u32, offset + length, wq_end) - offset; 153 ret = ib_umem_copy_from(buffer, umem, offset, first_copy_length); 154 if (ret) 155 return ret; 156 157 if (send) { 158 struct mlx5_wqe_ctrl_seg *ctrl = buffer; 159 int ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK; 160 161 wqe_length = ds * MLX5_WQE_DS_UNITS; 162 } else { 163 wqe_length = 1 << wq->wqe_shift; 164 } 165 166 if (wqe_length <= first_copy_length) 167 return first_copy_length; 168 169 ret = ib_umem_copy_from(buffer + first_copy_length, umem, wq->offset, 170 wqe_length - first_copy_length); 171 if (ret) 172 return ret; 173 174 return wqe_length; 175 } 176 177 static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type) 178 { 179 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; 180 struct ib_event event; 181 182 if (type == MLX5_EVENT_TYPE_PATH_MIG) { 183 /* This event is only valid for trans_qps */ 184 to_mibqp(qp)->port = to_mibqp(qp)->trans_qp.alt_port; 185 } 186 187 if (ibqp->event_handler) { 188 event.device = ibqp->device; 189 event.element.qp = ibqp; 190 switch (type) { 191 case MLX5_EVENT_TYPE_PATH_MIG: 192 event.event = IB_EVENT_PATH_MIG; 193 break; 194 case MLX5_EVENT_TYPE_COMM_EST: 195 event.event = IB_EVENT_COMM_EST; 196 break; 197 case MLX5_EVENT_TYPE_SQ_DRAINED: 198 event.event = IB_EVENT_SQ_DRAINED; 199 break; 200 case MLX5_EVENT_TYPE_SRQ_LAST_WQE: 201 event.event = IB_EVENT_QP_LAST_WQE_REACHED; 202 break; 203 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: 204 event.event = IB_EVENT_QP_FATAL; 205 break; 206 case MLX5_EVENT_TYPE_PATH_MIG_FAILED: 207 event.event = IB_EVENT_PATH_MIG_ERR; 208 break; 209 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: 210 event.event = IB_EVENT_QP_REQ_ERR; 211 break; 212 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: 213 event.event = IB_EVENT_QP_ACCESS_ERR; 214 break; 215 default: 216 pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn); 217 return; 218 } 219 220 ibqp->event_handler(&event, ibqp->qp_context); 221 } 222 } 223 224 static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap, 225 int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd) 226 { 227 int wqe_size; 228 int wq_size; 229 230 /* Sanity check RQ size before proceeding */ 231 if (cap->max_recv_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) 232 return -EINVAL; 233 234 if (!has_rq) { 235 qp->rq.max_gs = 0; 236 qp->rq.wqe_cnt = 0; 237 qp->rq.wqe_shift = 0; 238 } else { 239 if (ucmd) { 240 qp->rq.wqe_cnt = ucmd->rq_wqe_count; 241 qp->rq.wqe_shift = ucmd->rq_wqe_shift; 242 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig; 243 qp->rq.max_post = qp->rq.wqe_cnt; 244 } else { 245 wqe_size = qp->wq_sig ? sizeof(struct mlx5_wqe_signature_seg) : 0; 246 wqe_size += cap->max_recv_sge * sizeof(struct mlx5_wqe_data_seg); 247 wqe_size = roundup_pow_of_two(wqe_size); 248 wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size; 249 wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB); 250 qp->rq.wqe_cnt = wq_size / wqe_size; 251 if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq)) { 252 mlx5_ib_dbg(dev, "wqe_size %d, max %d\n", 253 wqe_size, 254 MLX5_CAP_GEN(dev->mdev, 255 max_wqe_sz_rq)); 256 return -EINVAL; 257 } 258 qp->rq.wqe_shift = ilog2(wqe_size); 259 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig; 260 qp->rq.max_post = qp->rq.wqe_cnt; 261 } 262 } 263 264 return 0; 265 } 266 267 static int sq_overhead(struct ib_qp_init_attr *attr) 268 { 269 int size = 0; 270 271 switch (attr->qp_type) { 272 case IB_QPT_XRC_INI: 273 size += sizeof(struct mlx5_wqe_xrc_seg); 274 /* fall through */ 275 case IB_QPT_RC: 276 size += sizeof(struct mlx5_wqe_ctrl_seg) + 277 max(sizeof(struct mlx5_wqe_atomic_seg) + 278 sizeof(struct mlx5_wqe_raddr_seg), 279 sizeof(struct mlx5_wqe_umr_ctrl_seg) + 280 sizeof(struct mlx5_mkey_seg)); 281 break; 282 283 case IB_QPT_XRC_TGT: 284 return 0; 285 286 case IB_QPT_UC: 287 size += sizeof(struct mlx5_wqe_ctrl_seg) + 288 max(sizeof(struct mlx5_wqe_raddr_seg), 289 sizeof(struct mlx5_wqe_umr_ctrl_seg) + 290 sizeof(struct mlx5_mkey_seg)); 291 break; 292 293 case IB_QPT_UD: 294 if (attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) 295 size += sizeof(struct mlx5_wqe_eth_pad) + 296 sizeof(struct mlx5_wqe_eth_seg); 297 /* fall through */ 298 case IB_QPT_SMI: 299 case MLX5_IB_QPT_HW_GSI: 300 size += sizeof(struct mlx5_wqe_ctrl_seg) + 301 sizeof(struct mlx5_wqe_datagram_seg); 302 break; 303 304 case MLX5_IB_QPT_REG_UMR: 305 size += sizeof(struct mlx5_wqe_ctrl_seg) + 306 sizeof(struct mlx5_wqe_umr_ctrl_seg) + 307 sizeof(struct mlx5_mkey_seg); 308 break; 309 310 default: 311 return -EINVAL; 312 } 313 314 return size; 315 } 316 317 static int calc_send_wqe(struct ib_qp_init_attr *attr) 318 { 319 int inl_size = 0; 320 int size; 321 322 size = sq_overhead(attr); 323 if (size < 0) 324 return size; 325 326 if (attr->cap.max_inline_data) { 327 inl_size = size + sizeof(struct mlx5_wqe_inline_seg) + 328 attr->cap.max_inline_data; 329 } 330 331 size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg); 332 if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN && 333 ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB) < MLX5_SIG_WQE_SIZE) 334 return MLX5_SIG_WQE_SIZE; 335 else 336 return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB); 337 } 338 339 static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, 340 struct mlx5_ib_qp *qp) 341 { 342 int wqe_size; 343 int wq_size; 344 345 if (!attr->cap.max_send_wr) 346 return 0; 347 348 wqe_size = calc_send_wqe(attr); 349 mlx5_ib_dbg(dev, "wqe_size %d\n", wqe_size); 350 if (wqe_size < 0) 351 return wqe_size; 352 353 if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) { 354 mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n", 355 wqe_size, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)); 356 return -EINVAL; 357 } 358 359 qp->max_inline_data = wqe_size - sq_overhead(attr) - 360 sizeof(struct mlx5_wqe_inline_seg); 361 attr->cap.max_inline_data = qp->max_inline_data; 362 363 if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN) 364 qp->signature_en = true; 365 366 wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size); 367 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; 368 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) { 369 mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n", 370 qp->sq.wqe_cnt, 371 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)); 372 return -ENOMEM; 373 } 374 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); 375 qp->sq.max_gs = attr->cap.max_send_sge; 376 qp->sq.max_post = wq_size / wqe_size; 377 attr->cap.max_send_wr = qp->sq.max_post; 378 379 return wq_size; 380 } 381 382 static int set_user_buf_size(struct mlx5_ib_dev *dev, 383 struct mlx5_ib_qp *qp, 384 struct mlx5_ib_create_qp *ucmd, 385 struct mlx5_ib_qp_base *base, 386 struct ib_qp_init_attr *attr) 387 { 388 int desc_sz = 1 << qp->sq.wqe_shift; 389 390 if (desc_sz > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) { 391 mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n", 392 desc_sz, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)); 393 return -EINVAL; 394 } 395 396 if (ucmd->sq_wqe_count && ((1 << ilog2(ucmd->sq_wqe_count)) != ucmd->sq_wqe_count)) { 397 mlx5_ib_warn(dev, "sq_wqe_count %d, sq_wqe_count %d\n", 398 ucmd->sq_wqe_count, ucmd->sq_wqe_count); 399 return -EINVAL; 400 } 401 402 qp->sq.wqe_cnt = ucmd->sq_wqe_count; 403 404 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) { 405 mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n", 406 qp->sq.wqe_cnt, 407 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)); 408 return -EINVAL; 409 } 410 411 if (attr->qp_type == IB_QPT_RAW_PACKET) { 412 base->ubuffer.buf_size = qp->rq.wqe_cnt << qp->rq.wqe_shift; 413 qp->raw_packet_qp.sq.ubuffer.buf_size = qp->sq.wqe_cnt << 6; 414 } else { 415 base->ubuffer.buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + 416 (qp->sq.wqe_cnt << 6); 417 } 418 419 return 0; 420 } 421 422 static int qp_has_rq(struct ib_qp_init_attr *attr) 423 { 424 if (attr->qp_type == IB_QPT_XRC_INI || 425 attr->qp_type == IB_QPT_XRC_TGT || attr->srq || 426 attr->qp_type == MLX5_IB_QPT_REG_UMR || 427 !attr->cap.max_recv_wr) 428 return 0; 429 430 return 1; 431 } 432 433 static int first_med_uuar(void) 434 { 435 return 1; 436 } 437 438 static int next_uuar(int n) 439 { 440 n++; 441 442 while (((n % 4) & 2)) 443 n++; 444 445 return n; 446 } 447 448 static int num_med_uuar(struct mlx5_uuar_info *uuari) 449 { 450 int n; 451 452 n = uuari->num_uars * MLX5_NON_FP_BF_REGS_PER_PAGE - 453 uuari->num_low_latency_uuars - 1; 454 455 return n >= 0 ? n : 0; 456 } 457 458 static int max_uuari(struct mlx5_uuar_info *uuari) 459 { 460 return uuari->num_uars * 4; 461 } 462 463 static int first_hi_uuar(struct mlx5_uuar_info *uuari) 464 { 465 int med; 466 int i; 467 int t; 468 469 med = num_med_uuar(uuari); 470 for (t = 0, i = first_med_uuar();; i = next_uuar(i)) { 471 t++; 472 if (t == med) 473 return next_uuar(i); 474 } 475 476 return 0; 477 } 478 479 static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari) 480 { 481 int i; 482 483 for (i = first_hi_uuar(uuari); i < max_uuari(uuari); i = next_uuar(i)) { 484 if (!test_bit(i, uuari->bitmap)) { 485 set_bit(i, uuari->bitmap); 486 uuari->count[i]++; 487 return i; 488 } 489 } 490 491 return -ENOMEM; 492 } 493 494 static int alloc_med_class_uuar(struct mlx5_uuar_info *uuari) 495 { 496 int minidx = first_med_uuar(); 497 int i; 498 499 for (i = first_med_uuar(); i < first_hi_uuar(uuari); i = next_uuar(i)) { 500 if (uuari->count[i] < uuari->count[minidx]) 501 minidx = i; 502 } 503 504 uuari->count[minidx]++; 505 return minidx; 506 } 507 508 static int alloc_uuar(struct mlx5_uuar_info *uuari, 509 enum mlx5_ib_latency_class lat) 510 { 511 int uuarn = -EINVAL; 512 513 mutex_lock(&uuari->lock); 514 switch (lat) { 515 case MLX5_IB_LATENCY_CLASS_LOW: 516 uuarn = 0; 517 uuari->count[uuarn]++; 518 break; 519 520 case MLX5_IB_LATENCY_CLASS_MEDIUM: 521 if (uuari->ver < 2) 522 uuarn = -ENOMEM; 523 else 524 uuarn = alloc_med_class_uuar(uuari); 525 break; 526 527 case MLX5_IB_LATENCY_CLASS_HIGH: 528 if (uuari->ver < 2) 529 uuarn = -ENOMEM; 530 else 531 uuarn = alloc_high_class_uuar(uuari); 532 break; 533 534 case MLX5_IB_LATENCY_CLASS_FAST_PATH: 535 uuarn = 2; 536 break; 537 } 538 mutex_unlock(&uuari->lock); 539 540 return uuarn; 541 } 542 543 static void free_med_class_uuar(struct mlx5_uuar_info *uuari, int uuarn) 544 { 545 clear_bit(uuarn, uuari->bitmap); 546 --uuari->count[uuarn]; 547 } 548 549 static void free_high_class_uuar(struct mlx5_uuar_info *uuari, int uuarn) 550 { 551 clear_bit(uuarn, uuari->bitmap); 552 --uuari->count[uuarn]; 553 } 554 555 static void free_uuar(struct mlx5_uuar_info *uuari, int uuarn) 556 { 557 int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE; 558 int high_uuar = nuuars - uuari->num_low_latency_uuars; 559 560 mutex_lock(&uuari->lock); 561 if (uuarn == 0) { 562 --uuari->count[uuarn]; 563 goto out; 564 } 565 566 if (uuarn < high_uuar) { 567 free_med_class_uuar(uuari, uuarn); 568 goto out; 569 } 570 571 free_high_class_uuar(uuari, uuarn); 572 573 out: 574 mutex_unlock(&uuari->lock); 575 } 576 577 static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state) 578 { 579 switch (state) { 580 case IB_QPS_RESET: return MLX5_QP_STATE_RST; 581 case IB_QPS_INIT: return MLX5_QP_STATE_INIT; 582 case IB_QPS_RTR: return MLX5_QP_STATE_RTR; 583 case IB_QPS_RTS: return MLX5_QP_STATE_RTS; 584 case IB_QPS_SQD: return MLX5_QP_STATE_SQD; 585 case IB_QPS_SQE: return MLX5_QP_STATE_SQER; 586 case IB_QPS_ERR: return MLX5_QP_STATE_ERR; 587 default: return -1; 588 } 589 } 590 591 static int to_mlx5_st(enum ib_qp_type type) 592 { 593 switch (type) { 594 case IB_QPT_RC: return MLX5_QP_ST_RC; 595 case IB_QPT_UC: return MLX5_QP_ST_UC; 596 case IB_QPT_UD: return MLX5_QP_ST_UD; 597 case MLX5_IB_QPT_REG_UMR: return MLX5_QP_ST_REG_UMR; 598 case IB_QPT_XRC_INI: 599 case IB_QPT_XRC_TGT: return MLX5_QP_ST_XRC; 600 case IB_QPT_SMI: return MLX5_QP_ST_QP0; 601 case MLX5_IB_QPT_HW_GSI: return MLX5_QP_ST_QP1; 602 case IB_QPT_RAW_IPV6: return MLX5_QP_ST_RAW_IPV6; 603 case IB_QPT_RAW_PACKET: 604 case IB_QPT_RAW_ETHERTYPE: return MLX5_QP_ST_RAW_ETHERTYPE; 605 case IB_QPT_MAX: 606 default: return -EINVAL; 607 } 608 } 609 610 static int uuarn_to_uar_index(struct mlx5_uuar_info *uuari, int uuarn) 611 { 612 return uuari->uars[uuarn / MLX5_BF_REGS_PER_PAGE].index; 613 } 614 615 static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev, 616 struct ib_pd *pd, 617 unsigned long addr, size_t size, 618 struct ib_umem **umem, 619 int *npages, int *page_shift, int *ncont, 620 u32 *offset) 621 { 622 int err; 623 624 *umem = ib_umem_get(pd->uobject->context, addr, size, 0, 0); 625 if (IS_ERR(*umem)) { 626 mlx5_ib_dbg(dev, "umem_get failed\n"); 627 return PTR_ERR(*umem); 628 } 629 630 mlx5_ib_cont_pages(*umem, addr, npages, page_shift, ncont, NULL); 631 632 err = mlx5_ib_get_buf_offset(addr, *page_shift, offset); 633 if (err) { 634 mlx5_ib_warn(dev, "bad offset\n"); 635 goto err_umem; 636 } 637 638 mlx5_ib_dbg(dev, "addr 0x%lx, size %zu, npages %d, page_shift %d, ncont %d, offset %d\n", 639 addr, size, *npages, *page_shift, *ncont, *offset); 640 641 return 0; 642 643 err_umem: 644 ib_umem_release(*umem); 645 *umem = NULL; 646 647 return err; 648 } 649 650 static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, 651 struct mlx5_ib_qp *qp, struct ib_udata *udata, 652 struct ib_qp_init_attr *attr, 653 struct mlx5_create_qp_mbox_in **in, 654 struct mlx5_ib_create_qp_resp *resp, int *inlen, 655 struct mlx5_ib_qp_base *base) 656 { 657 struct mlx5_ib_ucontext *context; 658 struct mlx5_ib_create_qp ucmd; 659 struct mlx5_ib_ubuffer *ubuffer = &base->ubuffer; 660 int page_shift = 0; 661 int uar_index; 662 int npages; 663 u32 offset = 0; 664 int uuarn; 665 int ncont = 0; 666 int err; 667 668 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)); 669 if (err) { 670 mlx5_ib_dbg(dev, "copy failed\n"); 671 return err; 672 } 673 674 context = to_mucontext(pd->uobject->context); 675 /* 676 * TBD: should come from the verbs when we have the API 677 */ 678 if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL) 679 /* In CROSS_CHANNEL CQ and QP must use the same UAR */ 680 uuarn = MLX5_CROSS_CHANNEL_UUAR; 681 else { 682 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_HIGH); 683 if (uuarn < 0) { 684 mlx5_ib_dbg(dev, "failed to allocate low latency UUAR\n"); 685 mlx5_ib_dbg(dev, "reverting to medium latency\n"); 686 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_MEDIUM); 687 if (uuarn < 0) { 688 mlx5_ib_dbg(dev, "failed to allocate medium latency UUAR\n"); 689 mlx5_ib_dbg(dev, "reverting to high latency\n"); 690 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW); 691 if (uuarn < 0) { 692 mlx5_ib_warn(dev, "uuar allocation failed\n"); 693 return uuarn; 694 } 695 } 696 } 697 } 698 699 uar_index = uuarn_to_uar_index(&context->uuari, uuarn); 700 mlx5_ib_dbg(dev, "uuarn 0x%x, uar_index 0x%x\n", uuarn, uar_index); 701 702 qp->rq.offset = 0; 703 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); 704 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; 705 706 err = set_user_buf_size(dev, qp, &ucmd, base, attr); 707 if (err) 708 goto err_uuar; 709 710 if (ucmd.buf_addr && ubuffer->buf_size) { 711 ubuffer->buf_addr = ucmd.buf_addr; 712 err = mlx5_ib_umem_get(dev, pd, ubuffer->buf_addr, 713 ubuffer->buf_size, 714 &ubuffer->umem, &npages, &page_shift, 715 &ncont, &offset); 716 if (err) 717 goto err_uuar; 718 } else { 719 ubuffer->umem = NULL; 720 } 721 722 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont; 723 *in = mlx5_vzalloc(*inlen); 724 if (!*in) { 725 err = -ENOMEM; 726 goto err_umem; 727 } 728 if (ubuffer->umem) 729 mlx5_ib_populate_pas(dev, ubuffer->umem, page_shift, 730 (*in)->pas, 0); 731 (*in)->ctx.log_pg_sz_remote_qpn = 732 cpu_to_be32((page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24); 733 (*in)->ctx.params2 = cpu_to_be32(offset << 6); 734 735 (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index); 736 resp->uuar_index = uuarn; 737 qp->uuarn = uuarn; 738 739 err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db); 740 if (err) { 741 mlx5_ib_dbg(dev, "map failed\n"); 742 goto err_free; 743 } 744 745 err = ib_copy_to_udata(udata, resp, sizeof(*resp)); 746 if (err) { 747 mlx5_ib_dbg(dev, "copy failed\n"); 748 goto err_unmap; 749 } 750 qp->create_type = MLX5_QP_USER; 751 752 return 0; 753 754 err_unmap: 755 mlx5_ib_db_unmap_user(context, &qp->db); 756 757 err_free: 758 kvfree(*in); 759 760 err_umem: 761 if (ubuffer->umem) 762 ib_umem_release(ubuffer->umem); 763 764 err_uuar: 765 free_uuar(&context->uuari, uuarn); 766 return err; 767 } 768 769 static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp, 770 struct mlx5_ib_qp_base *base) 771 { 772 struct mlx5_ib_ucontext *context; 773 774 context = to_mucontext(pd->uobject->context); 775 mlx5_ib_db_unmap_user(context, &qp->db); 776 if (base->ubuffer.umem) 777 ib_umem_release(base->ubuffer.umem); 778 free_uuar(&context->uuari, qp->uuarn); 779 } 780 781 static int create_kernel_qp(struct mlx5_ib_dev *dev, 782 struct ib_qp_init_attr *init_attr, 783 struct mlx5_ib_qp *qp, 784 struct mlx5_create_qp_mbox_in **in, int *inlen, 785 struct mlx5_ib_qp_base *base) 786 { 787 enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW; 788 struct mlx5_uuar_info *uuari; 789 int uar_index; 790 int uuarn; 791 int err; 792 793 uuari = &dev->mdev->priv.uuari; 794 if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | 795 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK | 796 IB_QP_CREATE_IPOIB_UD_LSO | 797 mlx5_ib_create_qp_sqpn_qp1())) 798 return -EINVAL; 799 800 if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR) 801 lc = MLX5_IB_LATENCY_CLASS_FAST_PATH; 802 803 uuarn = alloc_uuar(uuari, lc); 804 if (uuarn < 0) { 805 mlx5_ib_dbg(dev, "\n"); 806 return -ENOMEM; 807 } 808 809 qp->bf = &uuari->bfs[uuarn]; 810 uar_index = qp->bf->uar->index; 811 812 err = calc_sq_size(dev, init_attr, qp); 813 if (err < 0) { 814 mlx5_ib_dbg(dev, "err %d\n", err); 815 goto err_uuar; 816 } 817 818 qp->rq.offset = 0; 819 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; 820 base->ubuffer.buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift); 821 822 err = mlx5_buf_alloc(dev->mdev, base->ubuffer.buf_size, &qp->buf); 823 if (err) { 824 mlx5_ib_dbg(dev, "err %d\n", err); 825 goto err_uuar; 826 } 827 828 qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt); 829 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * qp->buf.npages; 830 *in = mlx5_vzalloc(*inlen); 831 if (!*in) { 832 err = -ENOMEM; 833 goto err_buf; 834 } 835 (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index); 836 (*in)->ctx.log_pg_sz_remote_qpn = 837 cpu_to_be32((qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24); 838 /* Set "fast registration enabled" for all kernel QPs */ 839 (*in)->ctx.params1 |= cpu_to_be32(1 << 11); 840 (*in)->ctx.sq_crq_size |= cpu_to_be16(1 << 4); 841 842 if (init_attr->create_flags & mlx5_ib_create_qp_sqpn_qp1()) { 843 (*in)->ctx.deth_sqpn = cpu_to_be32(1); 844 qp->flags |= MLX5_IB_QP_SQPN_QP1; 845 } 846 847 mlx5_fill_page_array(&qp->buf, (*in)->pas); 848 849 err = mlx5_db_alloc(dev->mdev, &qp->db); 850 if (err) { 851 mlx5_ib_dbg(dev, "err %d\n", err); 852 goto err_free; 853 } 854 855 qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wrid), GFP_KERNEL); 856 qp->sq.wr_data = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wr_data), GFP_KERNEL); 857 qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(*qp->rq.wrid), GFP_KERNEL); 858 qp->sq.w_list = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.w_list), GFP_KERNEL); 859 qp->sq.wqe_head = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wqe_head), GFP_KERNEL); 860 861 if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid || 862 !qp->sq.w_list || !qp->sq.wqe_head) { 863 err = -ENOMEM; 864 goto err_wrid; 865 } 866 qp->create_type = MLX5_QP_KERNEL; 867 868 return 0; 869 870 err_wrid: 871 mlx5_db_free(dev->mdev, &qp->db); 872 kfree(qp->sq.wqe_head); 873 kfree(qp->sq.w_list); 874 kfree(qp->sq.wrid); 875 kfree(qp->sq.wr_data); 876 kfree(qp->rq.wrid); 877 878 err_free: 879 kvfree(*in); 880 881 err_buf: 882 mlx5_buf_free(dev->mdev, &qp->buf); 883 884 err_uuar: 885 free_uuar(&dev->mdev->priv.uuari, uuarn); 886 return err; 887 } 888 889 static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) 890 { 891 mlx5_db_free(dev->mdev, &qp->db); 892 kfree(qp->sq.wqe_head); 893 kfree(qp->sq.w_list); 894 kfree(qp->sq.wrid); 895 kfree(qp->sq.wr_data); 896 kfree(qp->rq.wrid); 897 mlx5_buf_free(dev->mdev, &qp->buf); 898 free_uuar(&dev->mdev->priv.uuari, qp->bf->uuarn); 899 } 900 901 static __be32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr) 902 { 903 if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) || 904 (attr->qp_type == IB_QPT_XRC_INI)) 905 return cpu_to_be32(MLX5_SRQ_RQ); 906 else if (!qp->has_rq) 907 return cpu_to_be32(MLX5_ZERO_LEN_RQ); 908 else 909 return cpu_to_be32(MLX5_NON_ZERO_RQ); 910 } 911 912 static int is_connected(enum ib_qp_type qp_type) 913 { 914 if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC) 915 return 1; 916 917 return 0; 918 } 919 920 static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev, 921 struct mlx5_ib_sq *sq, u32 tdn) 922 { 923 u32 in[MLX5_ST_SZ_DW(create_tis_in)]; 924 void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); 925 926 memset(in, 0, sizeof(in)); 927 928 MLX5_SET(tisc, tisc, transport_domain, tdn); 929 930 return mlx5_core_create_tis(dev->mdev, in, sizeof(in), &sq->tisn); 931 } 932 933 static void destroy_raw_packet_qp_tis(struct mlx5_ib_dev *dev, 934 struct mlx5_ib_sq *sq) 935 { 936 mlx5_core_destroy_tis(dev->mdev, sq->tisn); 937 } 938 939 static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev, 940 struct mlx5_ib_sq *sq, void *qpin, 941 struct ib_pd *pd) 942 { 943 struct mlx5_ib_ubuffer *ubuffer = &sq->ubuffer; 944 __be64 *pas; 945 void *in; 946 void *sqc; 947 void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc); 948 void *wq; 949 int inlen; 950 int err; 951 int page_shift = 0; 952 int npages; 953 int ncont = 0; 954 u32 offset = 0; 955 956 err = mlx5_ib_umem_get(dev, pd, ubuffer->buf_addr, ubuffer->buf_size, 957 &sq->ubuffer.umem, &npages, &page_shift, 958 &ncont, &offset); 959 if (err) 960 return err; 961 962 inlen = MLX5_ST_SZ_BYTES(create_sq_in) + sizeof(u64) * ncont; 963 in = mlx5_vzalloc(inlen); 964 if (!in) { 965 err = -ENOMEM; 966 goto err_umem; 967 } 968 969 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx); 970 MLX5_SET(sqc, sqc, flush_in_error_en, 1); 971 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); 972 MLX5_SET(sqc, sqc, user_index, MLX5_GET(qpc, qpc, user_index)); 973 MLX5_SET(sqc, sqc, cqn, MLX5_GET(qpc, qpc, cqn_snd)); 974 MLX5_SET(sqc, sqc, tis_lst_sz, 1); 975 MLX5_SET(sqc, sqc, tis_num_0, sq->tisn); 976 977 wq = MLX5_ADDR_OF(sqc, sqc, wq); 978 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); 979 MLX5_SET(wq, wq, pd, MLX5_GET(qpc, qpc, pd)); 980 MLX5_SET(wq, wq, uar_page, MLX5_GET(qpc, qpc, uar_page)); 981 MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(qpc, qpc, dbr_addr)); 982 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); 983 MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(qpc, qpc, log_sq_size)); 984 MLX5_SET(wq, wq, log_wq_pg_sz, page_shift - MLX5_ADAPTER_PAGE_SHIFT); 985 MLX5_SET(wq, wq, page_offset, offset); 986 987 pas = (__be64 *)MLX5_ADDR_OF(wq, wq, pas); 988 mlx5_ib_populate_pas(dev, sq->ubuffer.umem, page_shift, pas, 0); 989 990 err = mlx5_core_create_sq_tracked(dev->mdev, in, inlen, &sq->base.mqp); 991 992 kvfree(in); 993 994 if (err) 995 goto err_umem; 996 997 return 0; 998 999 err_umem: 1000 ib_umem_release(sq->ubuffer.umem); 1001 sq->ubuffer.umem = NULL; 1002 1003 return err; 1004 } 1005 1006 static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev, 1007 struct mlx5_ib_sq *sq) 1008 { 1009 mlx5_core_destroy_sq_tracked(dev->mdev, &sq->base.mqp); 1010 ib_umem_release(sq->ubuffer.umem); 1011 } 1012 1013 static int get_rq_pas_size(void *qpc) 1014 { 1015 u32 log_page_size = MLX5_GET(qpc, qpc, log_page_size) + 12; 1016 u32 log_rq_stride = MLX5_GET(qpc, qpc, log_rq_stride); 1017 u32 log_rq_size = MLX5_GET(qpc, qpc, log_rq_size); 1018 u32 page_offset = MLX5_GET(qpc, qpc, page_offset); 1019 u32 po_quanta = 1 << (log_page_size - 6); 1020 u32 rq_sz = 1 << (log_rq_size + 4 + log_rq_stride); 1021 u32 page_size = 1 << log_page_size; 1022 u32 rq_sz_po = rq_sz + (page_offset * po_quanta); 1023 u32 rq_num_pas = (rq_sz_po + page_size - 1) / page_size; 1024 1025 return rq_num_pas * sizeof(u64); 1026 } 1027 1028 static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev, 1029 struct mlx5_ib_rq *rq, void *qpin) 1030 { 1031 __be64 *pas; 1032 __be64 *qp_pas; 1033 void *in; 1034 void *rqc; 1035 void *wq; 1036 void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc); 1037 int inlen; 1038 int err; 1039 u32 rq_pas_size = get_rq_pas_size(qpc); 1040 1041 inlen = MLX5_ST_SZ_BYTES(create_rq_in) + rq_pas_size; 1042 in = mlx5_vzalloc(inlen); 1043 if (!in) 1044 return -ENOMEM; 1045 1046 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx); 1047 MLX5_SET(rqc, rqc, vsd, 1); 1048 MLX5_SET(rqc, rqc, mem_rq_type, MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE); 1049 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); 1050 MLX5_SET(rqc, rqc, flush_in_error_en, 1); 1051 MLX5_SET(rqc, rqc, user_index, MLX5_GET(qpc, qpc, user_index)); 1052 MLX5_SET(rqc, rqc, cqn, MLX5_GET(qpc, qpc, cqn_rcv)); 1053 1054 wq = MLX5_ADDR_OF(rqc, rqc, wq); 1055 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); 1056 MLX5_SET(wq, wq, end_padding_mode, 1057 MLX5_GET(qpc, qpc, end_padding_mode)); 1058 MLX5_SET(wq, wq, page_offset, MLX5_GET(qpc, qpc, page_offset)); 1059 MLX5_SET(wq, wq, pd, MLX5_GET(qpc, qpc, pd)); 1060 MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(qpc, qpc, dbr_addr)); 1061 MLX5_SET(wq, wq, log_wq_stride, MLX5_GET(qpc, qpc, log_rq_stride) + 4); 1062 MLX5_SET(wq, wq, log_wq_pg_sz, MLX5_GET(qpc, qpc, log_page_size)); 1063 MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(qpc, qpc, log_rq_size)); 1064 1065 pas = (__be64 *)MLX5_ADDR_OF(wq, wq, pas); 1066 qp_pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, qpin, pas); 1067 memcpy(pas, qp_pas, rq_pas_size); 1068 1069 err = mlx5_core_create_rq_tracked(dev->mdev, in, inlen, &rq->base.mqp); 1070 1071 kvfree(in); 1072 1073 return err; 1074 } 1075 1076 static void destroy_raw_packet_qp_rq(struct mlx5_ib_dev *dev, 1077 struct mlx5_ib_rq *rq) 1078 { 1079 mlx5_core_destroy_rq_tracked(dev->mdev, &rq->base.mqp); 1080 } 1081 1082 static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev, 1083 struct mlx5_ib_rq *rq, u32 tdn) 1084 { 1085 u32 *in; 1086 void *tirc; 1087 int inlen; 1088 int err; 1089 1090 inlen = MLX5_ST_SZ_BYTES(create_tir_in); 1091 in = mlx5_vzalloc(inlen); 1092 if (!in) 1093 return -ENOMEM; 1094 1095 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); 1096 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT); 1097 MLX5_SET(tirc, tirc, inline_rqn, rq->base.mqp.qpn); 1098 MLX5_SET(tirc, tirc, transport_domain, tdn); 1099 1100 err = mlx5_core_create_tir(dev->mdev, in, inlen, &rq->tirn); 1101 1102 kvfree(in); 1103 1104 return err; 1105 } 1106 1107 static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev, 1108 struct mlx5_ib_rq *rq) 1109 { 1110 mlx5_core_destroy_tir(dev->mdev, rq->tirn); 1111 } 1112 1113 static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 1114 struct mlx5_create_qp_mbox_in *in, 1115 struct ib_pd *pd) 1116 { 1117 struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; 1118 struct mlx5_ib_sq *sq = &raw_packet_qp->sq; 1119 struct mlx5_ib_rq *rq = &raw_packet_qp->rq; 1120 struct ib_uobject *uobj = pd->uobject; 1121 struct ib_ucontext *ucontext = uobj->context; 1122 struct mlx5_ib_ucontext *mucontext = to_mucontext(ucontext); 1123 int err; 1124 u32 tdn = mucontext->tdn; 1125 1126 if (qp->sq.wqe_cnt) { 1127 err = create_raw_packet_qp_tis(dev, sq, tdn); 1128 if (err) 1129 return err; 1130 1131 err = create_raw_packet_qp_sq(dev, sq, in, pd); 1132 if (err) 1133 goto err_destroy_tis; 1134 1135 sq->base.container_mibqp = qp; 1136 } 1137 1138 if (qp->rq.wqe_cnt) { 1139 err = create_raw_packet_qp_rq(dev, rq, in); 1140 if (err) 1141 goto err_destroy_sq; 1142 1143 rq->base.container_mibqp = qp; 1144 1145 err = create_raw_packet_qp_tir(dev, rq, tdn); 1146 if (err) 1147 goto err_destroy_rq; 1148 } 1149 1150 qp->trans_qp.base.mqp.qpn = qp->sq.wqe_cnt ? sq->base.mqp.qpn : 1151 rq->base.mqp.qpn; 1152 1153 return 0; 1154 1155 err_destroy_rq: 1156 destroy_raw_packet_qp_rq(dev, rq); 1157 err_destroy_sq: 1158 if (!qp->sq.wqe_cnt) 1159 return err; 1160 destroy_raw_packet_qp_sq(dev, sq); 1161 err_destroy_tis: 1162 destroy_raw_packet_qp_tis(dev, sq); 1163 1164 return err; 1165 } 1166 1167 static void destroy_raw_packet_qp(struct mlx5_ib_dev *dev, 1168 struct mlx5_ib_qp *qp) 1169 { 1170 struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; 1171 struct mlx5_ib_sq *sq = &raw_packet_qp->sq; 1172 struct mlx5_ib_rq *rq = &raw_packet_qp->rq; 1173 1174 if (qp->rq.wqe_cnt) { 1175 destroy_raw_packet_qp_tir(dev, rq); 1176 destroy_raw_packet_qp_rq(dev, rq); 1177 } 1178 1179 if (qp->sq.wqe_cnt) { 1180 destroy_raw_packet_qp_sq(dev, sq); 1181 destroy_raw_packet_qp_tis(dev, sq); 1182 } 1183 } 1184 1185 static void raw_packet_qp_copy_info(struct mlx5_ib_qp *qp, 1186 struct mlx5_ib_raw_packet_qp *raw_packet_qp) 1187 { 1188 struct mlx5_ib_sq *sq = &raw_packet_qp->sq; 1189 struct mlx5_ib_rq *rq = &raw_packet_qp->rq; 1190 1191 sq->sq = &qp->sq; 1192 rq->rq = &qp->rq; 1193 sq->doorbell = &qp->db; 1194 rq->doorbell = &qp->db; 1195 } 1196 1197 static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, 1198 struct ib_qp_init_attr *init_attr, 1199 struct ib_udata *udata, struct mlx5_ib_qp *qp) 1200 { 1201 struct mlx5_ib_resources *devr = &dev->devr; 1202 struct mlx5_core_dev *mdev = dev->mdev; 1203 struct mlx5_ib_qp_base *base; 1204 struct mlx5_ib_create_qp_resp resp; 1205 struct mlx5_create_qp_mbox_in *in; 1206 struct mlx5_ib_create_qp ucmd; 1207 int inlen = sizeof(*in); 1208 int err; 1209 u32 uidx = MLX5_IB_DEFAULT_UIDX; 1210 void *qpc; 1211 1212 base = init_attr->qp_type == IB_QPT_RAW_PACKET ? 1213 &qp->raw_packet_qp.rq.base : 1214 &qp->trans_qp.base; 1215 1216 if (init_attr->qp_type != IB_QPT_RAW_PACKET) 1217 mlx5_ib_odp_create_qp(qp); 1218 1219 mutex_init(&qp->mutex); 1220 spin_lock_init(&qp->sq.lock); 1221 spin_lock_init(&qp->rq.lock); 1222 1223 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { 1224 if (!MLX5_CAP_GEN(mdev, block_lb_mc)) { 1225 mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n"); 1226 return -EINVAL; 1227 } else { 1228 qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK; 1229 } 1230 } 1231 1232 if (init_attr->create_flags & 1233 (IB_QP_CREATE_CROSS_CHANNEL | 1234 IB_QP_CREATE_MANAGED_SEND | 1235 IB_QP_CREATE_MANAGED_RECV)) { 1236 if (!MLX5_CAP_GEN(mdev, cd)) { 1237 mlx5_ib_dbg(dev, "cross-channel isn't supported\n"); 1238 return -EINVAL; 1239 } 1240 if (init_attr->create_flags & IB_QP_CREATE_CROSS_CHANNEL) 1241 qp->flags |= MLX5_IB_QP_CROSS_CHANNEL; 1242 if (init_attr->create_flags & IB_QP_CREATE_MANAGED_SEND) 1243 qp->flags |= MLX5_IB_QP_MANAGED_SEND; 1244 if (init_attr->create_flags & IB_QP_CREATE_MANAGED_RECV) 1245 qp->flags |= MLX5_IB_QP_MANAGED_RECV; 1246 } 1247 1248 if (init_attr->qp_type == IB_QPT_UD && 1249 (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)) 1250 if (!MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) { 1251 mlx5_ib_dbg(dev, "ipoib UD lso qp isn't supported\n"); 1252 return -EOPNOTSUPP; 1253 } 1254 1255 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) 1256 qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE; 1257 1258 if (pd && pd->uobject) { 1259 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { 1260 mlx5_ib_dbg(dev, "copy failed\n"); 1261 return -EFAULT; 1262 } 1263 1264 err = get_qp_user_index(to_mucontext(pd->uobject->context), 1265 &ucmd, udata->inlen, &uidx); 1266 if (err) 1267 return err; 1268 1269 qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE); 1270 qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE); 1271 } else { 1272 qp->wq_sig = !!wq_signature; 1273 } 1274 1275 qp->has_rq = qp_has_rq(init_attr); 1276 err = set_rq_size(dev, &init_attr->cap, qp->has_rq, 1277 qp, (pd && pd->uobject) ? &ucmd : NULL); 1278 if (err) { 1279 mlx5_ib_dbg(dev, "err %d\n", err); 1280 return err; 1281 } 1282 1283 if (pd) { 1284 if (pd->uobject) { 1285 __u32 max_wqes = 1286 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz); 1287 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count); 1288 if (ucmd.rq_wqe_shift != qp->rq.wqe_shift || 1289 ucmd.rq_wqe_count != qp->rq.wqe_cnt) { 1290 mlx5_ib_dbg(dev, "invalid rq params\n"); 1291 return -EINVAL; 1292 } 1293 if (ucmd.sq_wqe_count > max_wqes) { 1294 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n", 1295 ucmd.sq_wqe_count, max_wqes); 1296 return -EINVAL; 1297 } 1298 if (init_attr->create_flags & 1299 mlx5_ib_create_qp_sqpn_qp1()) { 1300 mlx5_ib_dbg(dev, "user-space is not allowed to create UD QPs spoofing as QP1\n"); 1301 return -EINVAL; 1302 } 1303 err = create_user_qp(dev, pd, qp, udata, init_attr, &in, 1304 &resp, &inlen, base); 1305 if (err) 1306 mlx5_ib_dbg(dev, "err %d\n", err); 1307 } else { 1308 err = create_kernel_qp(dev, init_attr, qp, &in, &inlen, 1309 base); 1310 if (err) 1311 mlx5_ib_dbg(dev, "err %d\n", err); 1312 } 1313 1314 if (err) 1315 return err; 1316 } else { 1317 in = mlx5_vzalloc(sizeof(*in)); 1318 if (!in) 1319 return -ENOMEM; 1320 1321 qp->create_type = MLX5_QP_EMPTY; 1322 } 1323 1324 if (is_sqp(init_attr->qp_type)) 1325 qp->port = init_attr->port_num; 1326 1327 in->ctx.flags = cpu_to_be32(to_mlx5_st(init_attr->qp_type) << 16 | 1328 MLX5_QP_PM_MIGRATED << 11); 1329 1330 if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR) 1331 in->ctx.flags_pd = cpu_to_be32(to_mpd(pd ? pd : devr->p0)->pdn); 1332 else 1333 in->ctx.flags_pd = cpu_to_be32(MLX5_QP_LAT_SENSITIVE); 1334 1335 if (qp->wq_sig) 1336 in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_ENABLE_SIG); 1337 1338 if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK) 1339 in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_BLOCK_MCAST); 1340 1341 if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL) 1342 in->ctx.params2 |= cpu_to_be32(MLX5_QP_BIT_CC_MASTER); 1343 if (qp->flags & MLX5_IB_QP_MANAGED_SEND) 1344 in->ctx.params2 |= cpu_to_be32(MLX5_QP_BIT_CC_SLAVE_SEND); 1345 if (qp->flags & MLX5_IB_QP_MANAGED_RECV) 1346 in->ctx.params2 |= cpu_to_be32(MLX5_QP_BIT_CC_SLAVE_RECV); 1347 1348 if (qp->scat_cqe && is_connected(init_attr->qp_type)) { 1349 int rcqe_sz; 1350 int scqe_sz; 1351 1352 rcqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->recv_cq); 1353 scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq); 1354 1355 if (rcqe_sz == 128) 1356 in->ctx.cs_res = MLX5_RES_SCAT_DATA64_CQE; 1357 else 1358 in->ctx.cs_res = MLX5_RES_SCAT_DATA32_CQE; 1359 1360 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) { 1361 if (scqe_sz == 128) 1362 in->ctx.cs_req = MLX5_REQ_SCAT_DATA64_CQE; 1363 else 1364 in->ctx.cs_req = MLX5_REQ_SCAT_DATA32_CQE; 1365 } 1366 } 1367 1368 if (qp->rq.wqe_cnt) { 1369 in->ctx.rq_size_stride = (qp->rq.wqe_shift - 4); 1370 in->ctx.rq_size_stride |= ilog2(qp->rq.wqe_cnt) << 3; 1371 } 1372 1373 in->ctx.rq_type_srqn = get_rx_type(qp, init_attr); 1374 1375 if (qp->sq.wqe_cnt) 1376 in->ctx.sq_crq_size |= cpu_to_be16(ilog2(qp->sq.wqe_cnt) << 11); 1377 else 1378 in->ctx.sq_crq_size |= cpu_to_be16(0x8000); 1379 1380 /* Set default resources */ 1381 switch (init_attr->qp_type) { 1382 case IB_QPT_XRC_TGT: 1383 in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn); 1384 in->ctx.cqn_send = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn); 1385 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn); 1386 in->ctx.xrcd = cpu_to_be32(to_mxrcd(init_attr->xrcd)->xrcdn); 1387 break; 1388 case IB_QPT_XRC_INI: 1389 in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn); 1390 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn); 1391 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn); 1392 break; 1393 default: 1394 if (init_attr->srq) { 1395 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x0)->xrcdn); 1396 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(init_attr->srq)->msrq.srqn); 1397 } else { 1398 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn); 1399 in->ctx.rq_type_srqn |= 1400 cpu_to_be32(to_msrq(devr->s1)->msrq.srqn); 1401 } 1402 } 1403 1404 if (init_attr->send_cq) 1405 in->ctx.cqn_send = cpu_to_be32(to_mcq(init_attr->send_cq)->mcq.cqn); 1406 1407 if (init_attr->recv_cq) 1408 in->ctx.cqn_recv = cpu_to_be32(to_mcq(init_attr->recv_cq)->mcq.cqn); 1409 1410 in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma); 1411 1412 if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1) { 1413 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); 1414 /* 0xffffff means we ask to work with cqe version 0 */ 1415 MLX5_SET(qpc, qpc, user_index, uidx); 1416 } 1417 /* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */ 1418 if (init_attr->qp_type == IB_QPT_UD && 1419 (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)) { 1420 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); 1421 MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, 1); 1422 qp->flags |= MLX5_IB_QP_LSO; 1423 } 1424 1425 if (init_attr->qp_type == IB_QPT_RAW_PACKET) { 1426 qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr; 1427 raw_packet_qp_copy_info(qp, &qp->raw_packet_qp); 1428 err = create_raw_packet_qp(dev, qp, in, pd); 1429 } else { 1430 err = mlx5_core_create_qp(dev->mdev, &base->mqp, in, inlen); 1431 } 1432 1433 if (err) { 1434 mlx5_ib_dbg(dev, "create qp failed\n"); 1435 goto err_create; 1436 } 1437 1438 kvfree(in); 1439 1440 base->container_mibqp = qp; 1441 base->mqp.event = mlx5_ib_qp_event; 1442 1443 return 0; 1444 1445 err_create: 1446 if (qp->create_type == MLX5_QP_USER) 1447 destroy_qp_user(pd, qp, base); 1448 else if (qp->create_type == MLX5_QP_KERNEL) 1449 destroy_qp_kernel(dev, qp); 1450 1451 kvfree(in); 1452 return err; 1453 } 1454 1455 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq) 1456 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) 1457 { 1458 if (send_cq) { 1459 if (recv_cq) { 1460 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { 1461 spin_lock_irq(&send_cq->lock); 1462 spin_lock_nested(&recv_cq->lock, 1463 SINGLE_DEPTH_NESTING); 1464 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) { 1465 spin_lock_irq(&send_cq->lock); 1466 __acquire(&recv_cq->lock); 1467 } else { 1468 spin_lock_irq(&recv_cq->lock); 1469 spin_lock_nested(&send_cq->lock, 1470 SINGLE_DEPTH_NESTING); 1471 } 1472 } else { 1473 spin_lock_irq(&send_cq->lock); 1474 __acquire(&recv_cq->lock); 1475 } 1476 } else if (recv_cq) { 1477 spin_lock_irq(&recv_cq->lock); 1478 __acquire(&send_cq->lock); 1479 } else { 1480 __acquire(&send_cq->lock); 1481 __acquire(&recv_cq->lock); 1482 } 1483 } 1484 1485 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq) 1486 __releases(&send_cq->lock) __releases(&recv_cq->lock) 1487 { 1488 if (send_cq) { 1489 if (recv_cq) { 1490 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { 1491 spin_unlock(&recv_cq->lock); 1492 spin_unlock_irq(&send_cq->lock); 1493 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) { 1494 __release(&recv_cq->lock); 1495 spin_unlock_irq(&send_cq->lock); 1496 } else { 1497 spin_unlock(&send_cq->lock); 1498 spin_unlock_irq(&recv_cq->lock); 1499 } 1500 } else { 1501 __release(&recv_cq->lock); 1502 spin_unlock_irq(&send_cq->lock); 1503 } 1504 } else if (recv_cq) { 1505 __release(&send_cq->lock); 1506 spin_unlock_irq(&recv_cq->lock); 1507 } else { 1508 __release(&recv_cq->lock); 1509 __release(&send_cq->lock); 1510 } 1511 } 1512 1513 static struct mlx5_ib_pd *get_pd(struct mlx5_ib_qp *qp) 1514 { 1515 return to_mpd(qp->ibqp.pd); 1516 } 1517 1518 static void get_cqs(struct mlx5_ib_qp *qp, 1519 struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq) 1520 { 1521 switch (qp->ibqp.qp_type) { 1522 case IB_QPT_XRC_TGT: 1523 *send_cq = NULL; 1524 *recv_cq = NULL; 1525 break; 1526 case MLX5_IB_QPT_REG_UMR: 1527 case IB_QPT_XRC_INI: 1528 *send_cq = to_mcq(qp->ibqp.send_cq); 1529 *recv_cq = NULL; 1530 break; 1531 1532 case IB_QPT_SMI: 1533 case MLX5_IB_QPT_HW_GSI: 1534 case IB_QPT_RC: 1535 case IB_QPT_UC: 1536 case IB_QPT_UD: 1537 case IB_QPT_RAW_IPV6: 1538 case IB_QPT_RAW_ETHERTYPE: 1539 case IB_QPT_RAW_PACKET: 1540 *send_cq = to_mcq(qp->ibqp.send_cq); 1541 *recv_cq = to_mcq(qp->ibqp.recv_cq); 1542 break; 1543 1544 case IB_QPT_MAX: 1545 default: 1546 *send_cq = NULL; 1547 *recv_cq = NULL; 1548 break; 1549 } 1550 } 1551 1552 static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 1553 u16 operation); 1554 1555 static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) 1556 { 1557 struct mlx5_ib_cq *send_cq, *recv_cq; 1558 struct mlx5_ib_qp_base *base = &qp->trans_qp.base; 1559 struct mlx5_modify_qp_mbox_in *in; 1560 int err; 1561 1562 base = qp->ibqp.qp_type == IB_QPT_RAW_PACKET ? 1563 &qp->raw_packet_qp.rq.base : 1564 &qp->trans_qp.base; 1565 1566 in = kzalloc(sizeof(*in), GFP_KERNEL); 1567 if (!in) 1568 return; 1569 1570 if (qp->state != IB_QPS_RESET) { 1571 if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET) { 1572 mlx5_ib_qp_disable_pagefaults(qp); 1573 err = mlx5_core_qp_modify(dev->mdev, 1574 MLX5_CMD_OP_2RST_QP, in, 0, 1575 &base->mqp); 1576 } else { 1577 err = modify_raw_packet_qp(dev, qp, 1578 MLX5_CMD_OP_2RST_QP); 1579 } 1580 if (err) 1581 mlx5_ib_warn(dev, "mlx5_ib: modify QP 0x%06x to RESET failed\n", 1582 base->mqp.qpn); 1583 } 1584 1585 get_cqs(qp, &send_cq, &recv_cq); 1586 1587 if (qp->create_type == MLX5_QP_KERNEL) { 1588 mlx5_ib_lock_cqs(send_cq, recv_cq); 1589 __mlx5_ib_cq_clean(recv_cq, base->mqp.qpn, 1590 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 1591 if (send_cq != recv_cq) 1592 __mlx5_ib_cq_clean(send_cq, base->mqp.qpn, 1593 NULL); 1594 mlx5_ib_unlock_cqs(send_cq, recv_cq); 1595 } 1596 1597 if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) { 1598 destroy_raw_packet_qp(dev, qp); 1599 } else { 1600 err = mlx5_core_destroy_qp(dev->mdev, &base->mqp); 1601 if (err) 1602 mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", 1603 base->mqp.qpn); 1604 } 1605 1606 kfree(in); 1607 1608 if (qp->create_type == MLX5_QP_KERNEL) 1609 destroy_qp_kernel(dev, qp); 1610 else if (qp->create_type == MLX5_QP_USER) 1611 destroy_qp_user(&get_pd(qp)->ibpd, qp, base); 1612 } 1613 1614 static const char *ib_qp_type_str(enum ib_qp_type type) 1615 { 1616 switch (type) { 1617 case IB_QPT_SMI: 1618 return "IB_QPT_SMI"; 1619 case IB_QPT_GSI: 1620 return "IB_QPT_GSI"; 1621 case IB_QPT_RC: 1622 return "IB_QPT_RC"; 1623 case IB_QPT_UC: 1624 return "IB_QPT_UC"; 1625 case IB_QPT_UD: 1626 return "IB_QPT_UD"; 1627 case IB_QPT_RAW_IPV6: 1628 return "IB_QPT_RAW_IPV6"; 1629 case IB_QPT_RAW_ETHERTYPE: 1630 return "IB_QPT_RAW_ETHERTYPE"; 1631 case IB_QPT_XRC_INI: 1632 return "IB_QPT_XRC_INI"; 1633 case IB_QPT_XRC_TGT: 1634 return "IB_QPT_XRC_TGT"; 1635 case IB_QPT_RAW_PACKET: 1636 return "IB_QPT_RAW_PACKET"; 1637 case MLX5_IB_QPT_REG_UMR: 1638 return "MLX5_IB_QPT_REG_UMR"; 1639 case IB_QPT_MAX: 1640 default: 1641 return "Invalid QP type"; 1642 } 1643 } 1644 1645 struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, 1646 struct ib_qp_init_attr *init_attr, 1647 struct ib_udata *udata) 1648 { 1649 struct mlx5_ib_dev *dev; 1650 struct mlx5_ib_qp *qp; 1651 u16 xrcdn = 0; 1652 int err; 1653 1654 if (pd) { 1655 dev = to_mdev(pd->device); 1656 1657 if (init_attr->qp_type == IB_QPT_RAW_PACKET) { 1658 if (!pd->uobject) { 1659 mlx5_ib_dbg(dev, "Raw Packet QP is not supported for kernel consumers\n"); 1660 return ERR_PTR(-EINVAL); 1661 } else if (!to_mucontext(pd->uobject->context)->cqe_version) { 1662 mlx5_ib_dbg(dev, "Raw Packet QP is only supported for CQE version > 0\n"); 1663 return ERR_PTR(-EINVAL); 1664 } 1665 } 1666 } else { 1667 /* being cautious here */ 1668 if (init_attr->qp_type != IB_QPT_XRC_TGT && 1669 init_attr->qp_type != MLX5_IB_QPT_REG_UMR) { 1670 pr_warn("%s: no PD for transport %s\n", __func__, 1671 ib_qp_type_str(init_attr->qp_type)); 1672 return ERR_PTR(-EINVAL); 1673 } 1674 dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device); 1675 } 1676 1677 switch (init_attr->qp_type) { 1678 case IB_QPT_XRC_TGT: 1679 case IB_QPT_XRC_INI: 1680 if (!MLX5_CAP_GEN(dev->mdev, xrc)) { 1681 mlx5_ib_dbg(dev, "XRC not supported\n"); 1682 return ERR_PTR(-ENOSYS); 1683 } 1684 init_attr->recv_cq = NULL; 1685 if (init_attr->qp_type == IB_QPT_XRC_TGT) { 1686 xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn; 1687 init_attr->send_cq = NULL; 1688 } 1689 1690 /* fall through */ 1691 case IB_QPT_RAW_PACKET: 1692 case IB_QPT_RC: 1693 case IB_QPT_UC: 1694 case IB_QPT_UD: 1695 case IB_QPT_SMI: 1696 case MLX5_IB_QPT_HW_GSI: 1697 case MLX5_IB_QPT_REG_UMR: 1698 qp = kzalloc(sizeof(*qp), GFP_KERNEL); 1699 if (!qp) 1700 return ERR_PTR(-ENOMEM); 1701 1702 err = create_qp_common(dev, pd, init_attr, udata, qp); 1703 if (err) { 1704 mlx5_ib_dbg(dev, "create_qp_common failed\n"); 1705 kfree(qp); 1706 return ERR_PTR(err); 1707 } 1708 1709 if (is_qp0(init_attr->qp_type)) 1710 qp->ibqp.qp_num = 0; 1711 else if (is_qp1(init_attr->qp_type)) 1712 qp->ibqp.qp_num = 1; 1713 else 1714 qp->ibqp.qp_num = qp->trans_qp.base.mqp.qpn; 1715 1716 mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n", 1717 qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn, 1718 to_mcq(init_attr->recv_cq)->mcq.cqn, 1719 to_mcq(init_attr->send_cq)->mcq.cqn); 1720 1721 qp->trans_qp.xrcdn = xrcdn; 1722 1723 break; 1724 1725 case IB_QPT_GSI: 1726 return mlx5_ib_gsi_create_qp(pd, init_attr); 1727 1728 case IB_QPT_RAW_IPV6: 1729 case IB_QPT_RAW_ETHERTYPE: 1730 case IB_QPT_MAX: 1731 default: 1732 mlx5_ib_dbg(dev, "unsupported qp type %d\n", 1733 init_attr->qp_type); 1734 /* Don't support raw QPs */ 1735 return ERR_PTR(-EINVAL); 1736 } 1737 1738 return &qp->ibqp; 1739 } 1740 1741 int mlx5_ib_destroy_qp(struct ib_qp *qp) 1742 { 1743 struct mlx5_ib_dev *dev = to_mdev(qp->device); 1744 struct mlx5_ib_qp *mqp = to_mqp(qp); 1745 1746 if (unlikely(qp->qp_type == IB_QPT_GSI)) 1747 return mlx5_ib_gsi_destroy_qp(qp); 1748 1749 destroy_qp_common(dev, mqp); 1750 1751 kfree(mqp); 1752 1753 return 0; 1754 } 1755 1756 static __be32 to_mlx5_access_flags(struct mlx5_ib_qp *qp, const struct ib_qp_attr *attr, 1757 int attr_mask) 1758 { 1759 u32 hw_access_flags = 0; 1760 u8 dest_rd_atomic; 1761 u32 access_flags; 1762 1763 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 1764 dest_rd_atomic = attr->max_dest_rd_atomic; 1765 else 1766 dest_rd_atomic = qp->trans_qp.resp_depth; 1767 1768 if (attr_mask & IB_QP_ACCESS_FLAGS) 1769 access_flags = attr->qp_access_flags; 1770 else 1771 access_flags = qp->trans_qp.atomic_rd_en; 1772 1773 if (!dest_rd_atomic) 1774 access_flags &= IB_ACCESS_REMOTE_WRITE; 1775 1776 if (access_flags & IB_ACCESS_REMOTE_READ) 1777 hw_access_flags |= MLX5_QP_BIT_RRE; 1778 if (access_flags & IB_ACCESS_REMOTE_ATOMIC) 1779 hw_access_flags |= (MLX5_QP_BIT_RAE | MLX5_ATOMIC_MODE_CX); 1780 if (access_flags & IB_ACCESS_REMOTE_WRITE) 1781 hw_access_flags |= MLX5_QP_BIT_RWE; 1782 1783 return cpu_to_be32(hw_access_flags); 1784 } 1785 1786 enum { 1787 MLX5_PATH_FLAG_FL = 1 << 0, 1788 MLX5_PATH_FLAG_FREE_AR = 1 << 1, 1789 MLX5_PATH_FLAG_COUNTER = 1 << 2, 1790 }; 1791 1792 static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate) 1793 { 1794 if (rate == IB_RATE_PORT_CURRENT) { 1795 return 0; 1796 } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) { 1797 return -EINVAL; 1798 } else { 1799 while (rate != IB_RATE_2_5_GBPS && 1800 !(1 << (rate + MLX5_STAT_RATE_OFFSET) & 1801 MLX5_CAP_GEN(dev->mdev, stat_rate_support))) 1802 --rate; 1803 } 1804 1805 return rate + MLX5_STAT_RATE_OFFSET; 1806 } 1807 1808 static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev, 1809 struct mlx5_ib_sq *sq, u8 sl) 1810 { 1811 void *in; 1812 void *tisc; 1813 int inlen; 1814 int err; 1815 1816 inlen = MLX5_ST_SZ_BYTES(modify_tis_in); 1817 in = mlx5_vzalloc(inlen); 1818 if (!in) 1819 return -ENOMEM; 1820 1821 MLX5_SET(modify_tis_in, in, bitmask.prio, 1); 1822 1823 tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx); 1824 MLX5_SET(tisc, tisc, prio, ((sl & 0x7) << 1)); 1825 1826 err = mlx5_core_modify_tis(dev, sq->tisn, in, inlen); 1827 1828 kvfree(in); 1829 1830 return err; 1831 } 1832 1833 static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 1834 const struct ib_ah_attr *ah, 1835 struct mlx5_qp_path *path, u8 port, int attr_mask, 1836 u32 path_flags, const struct ib_qp_attr *attr) 1837 { 1838 enum rdma_link_layer ll = rdma_port_get_link_layer(&dev->ib_dev, port); 1839 int err; 1840 1841 if (attr_mask & IB_QP_PKEY_INDEX) 1842 path->pkey_index = attr->pkey_index; 1843 1844 if (ah->ah_flags & IB_AH_GRH) { 1845 if (ah->grh.sgid_index >= 1846 dev->mdev->port_caps[port - 1].gid_table_len) { 1847 pr_err("sgid_index (%u) too large. max is %d\n", 1848 ah->grh.sgid_index, 1849 dev->mdev->port_caps[port - 1].gid_table_len); 1850 return -EINVAL; 1851 } 1852 } 1853 1854 if (ll == IB_LINK_LAYER_ETHERNET) { 1855 if (!(ah->ah_flags & IB_AH_GRH)) 1856 return -EINVAL; 1857 memcpy(path->rmac, ah->dmac, sizeof(ah->dmac)); 1858 path->udp_sport = mlx5_get_roce_udp_sport(dev, port, 1859 ah->grh.sgid_index); 1860 path->dci_cfi_prio_sl = (ah->sl & 0x7) << 4; 1861 } else { 1862 path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0; 1863 path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : 1864 0; 1865 path->rlid = cpu_to_be16(ah->dlid); 1866 path->grh_mlid = ah->src_path_bits & 0x7f; 1867 if (ah->ah_flags & IB_AH_GRH) 1868 path->grh_mlid |= 1 << 7; 1869 path->dci_cfi_prio_sl = ah->sl & 0xf; 1870 } 1871 1872 if (ah->ah_flags & IB_AH_GRH) { 1873 path->mgid_index = ah->grh.sgid_index; 1874 path->hop_limit = ah->grh.hop_limit; 1875 path->tclass_flowlabel = 1876 cpu_to_be32((ah->grh.traffic_class << 20) | 1877 (ah->grh.flow_label)); 1878 memcpy(path->rgid, ah->grh.dgid.raw, 16); 1879 } 1880 1881 err = ib_rate_to_mlx5(dev, ah->static_rate); 1882 if (err < 0) 1883 return err; 1884 path->static_rate = err; 1885 path->port = port; 1886 1887 if (attr_mask & IB_QP_TIMEOUT) 1888 path->ackto_lt = attr->timeout << 3; 1889 1890 if ((qp->ibqp.qp_type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt) 1891 return modify_raw_packet_eth_prio(dev->mdev, 1892 &qp->raw_packet_qp.sq, 1893 ah->sl & 0xf); 1894 1895 return 0; 1896 } 1897 1898 static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_QP_ST_MAX] = { 1899 [MLX5_QP_STATE_INIT] = { 1900 [MLX5_QP_STATE_INIT] = { 1901 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE | 1902 MLX5_QP_OPTPAR_RAE | 1903 MLX5_QP_OPTPAR_RWE | 1904 MLX5_QP_OPTPAR_PKEY_INDEX | 1905 MLX5_QP_OPTPAR_PRI_PORT, 1906 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE | 1907 MLX5_QP_OPTPAR_PKEY_INDEX | 1908 MLX5_QP_OPTPAR_PRI_PORT, 1909 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX | 1910 MLX5_QP_OPTPAR_Q_KEY | 1911 MLX5_QP_OPTPAR_PRI_PORT, 1912 }, 1913 [MLX5_QP_STATE_RTR] = { 1914 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | 1915 MLX5_QP_OPTPAR_RRE | 1916 MLX5_QP_OPTPAR_RAE | 1917 MLX5_QP_OPTPAR_RWE | 1918 MLX5_QP_OPTPAR_PKEY_INDEX, 1919 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | 1920 MLX5_QP_OPTPAR_RWE | 1921 MLX5_QP_OPTPAR_PKEY_INDEX, 1922 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX | 1923 MLX5_QP_OPTPAR_Q_KEY, 1924 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX | 1925 MLX5_QP_OPTPAR_Q_KEY, 1926 [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | 1927 MLX5_QP_OPTPAR_RRE | 1928 MLX5_QP_OPTPAR_RAE | 1929 MLX5_QP_OPTPAR_RWE | 1930 MLX5_QP_OPTPAR_PKEY_INDEX, 1931 }, 1932 }, 1933 [MLX5_QP_STATE_RTR] = { 1934 [MLX5_QP_STATE_RTS] = { 1935 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | 1936 MLX5_QP_OPTPAR_RRE | 1937 MLX5_QP_OPTPAR_RAE | 1938 MLX5_QP_OPTPAR_RWE | 1939 MLX5_QP_OPTPAR_PM_STATE | 1940 MLX5_QP_OPTPAR_RNR_TIMEOUT, 1941 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | 1942 MLX5_QP_OPTPAR_RWE | 1943 MLX5_QP_OPTPAR_PM_STATE, 1944 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY, 1945 }, 1946 }, 1947 [MLX5_QP_STATE_RTS] = { 1948 [MLX5_QP_STATE_RTS] = { 1949 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE | 1950 MLX5_QP_OPTPAR_RAE | 1951 MLX5_QP_OPTPAR_RWE | 1952 MLX5_QP_OPTPAR_RNR_TIMEOUT | 1953 MLX5_QP_OPTPAR_PM_STATE | 1954 MLX5_QP_OPTPAR_ALT_ADDR_PATH, 1955 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE | 1956 MLX5_QP_OPTPAR_PM_STATE | 1957 MLX5_QP_OPTPAR_ALT_ADDR_PATH, 1958 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY | 1959 MLX5_QP_OPTPAR_SRQN | 1960 MLX5_QP_OPTPAR_CQN_RCV, 1961 }, 1962 }, 1963 [MLX5_QP_STATE_SQER] = { 1964 [MLX5_QP_STATE_RTS] = { 1965 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY, 1966 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY, 1967 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE, 1968 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT | 1969 MLX5_QP_OPTPAR_RWE | 1970 MLX5_QP_OPTPAR_RAE | 1971 MLX5_QP_OPTPAR_RRE, 1972 }, 1973 }, 1974 }; 1975 1976 static int ib_nr_to_mlx5_nr(int ib_mask) 1977 { 1978 switch (ib_mask) { 1979 case IB_QP_STATE: 1980 return 0; 1981 case IB_QP_CUR_STATE: 1982 return 0; 1983 case IB_QP_EN_SQD_ASYNC_NOTIFY: 1984 return 0; 1985 case IB_QP_ACCESS_FLAGS: 1986 return MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_RRE | 1987 MLX5_QP_OPTPAR_RAE; 1988 case IB_QP_PKEY_INDEX: 1989 return MLX5_QP_OPTPAR_PKEY_INDEX; 1990 case IB_QP_PORT: 1991 return MLX5_QP_OPTPAR_PRI_PORT; 1992 case IB_QP_QKEY: 1993 return MLX5_QP_OPTPAR_Q_KEY; 1994 case IB_QP_AV: 1995 return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH | 1996 MLX5_QP_OPTPAR_PRI_PORT; 1997 case IB_QP_PATH_MTU: 1998 return 0; 1999 case IB_QP_TIMEOUT: 2000 return MLX5_QP_OPTPAR_ACK_TIMEOUT; 2001 case IB_QP_RETRY_CNT: 2002 return MLX5_QP_OPTPAR_RETRY_COUNT; 2003 case IB_QP_RNR_RETRY: 2004 return MLX5_QP_OPTPAR_RNR_RETRY; 2005 case IB_QP_RQ_PSN: 2006 return 0; 2007 case IB_QP_MAX_QP_RD_ATOMIC: 2008 return MLX5_QP_OPTPAR_SRA_MAX; 2009 case IB_QP_ALT_PATH: 2010 return MLX5_QP_OPTPAR_ALT_ADDR_PATH; 2011 case IB_QP_MIN_RNR_TIMER: 2012 return MLX5_QP_OPTPAR_RNR_TIMEOUT; 2013 case IB_QP_SQ_PSN: 2014 return 0; 2015 case IB_QP_MAX_DEST_RD_ATOMIC: 2016 return MLX5_QP_OPTPAR_RRA_MAX | MLX5_QP_OPTPAR_RWE | 2017 MLX5_QP_OPTPAR_RRE | MLX5_QP_OPTPAR_RAE; 2018 case IB_QP_PATH_MIG_STATE: 2019 return MLX5_QP_OPTPAR_PM_STATE; 2020 case IB_QP_CAP: 2021 return 0; 2022 case IB_QP_DEST_QPN: 2023 return 0; 2024 } 2025 return 0; 2026 } 2027 2028 static int ib_mask_to_mlx5_opt(int ib_mask) 2029 { 2030 int result = 0; 2031 int i; 2032 2033 for (i = 0; i < 8 * sizeof(int); i++) { 2034 if ((1 << i) & ib_mask) 2035 result |= ib_nr_to_mlx5_nr(1 << i); 2036 } 2037 2038 return result; 2039 } 2040 2041 static int modify_raw_packet_qp_rq(struct mlx5_core_dev *dev, 2042 struct mlx5_ib_rq *rq, int new_state) 2043 { 2044 void *in; 2045 void *rqc; 2046 int inlen; 2047 int err; 2048 2049 inlen = MLX5_ST_SZ_BYTES(modify_rq_in); 2050 in = mlx5_vzalloc(inlen); 2051 if (!in) 2052 return -ENOMEM; 2053 2054 MLX5_SET(modify_rq_in, in, rq_state, rq->state); 2055 2056 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); 2057 MLX5_SET(rqc, rqc, state, new_state); 2058 2059 err = mlx5_core_modify_rq(dev, rq->base.mqp.qpn, in, inlen); 2060 if (err) 2061 goto out; 2062 2063 rq->state = new_state; 2064 2065 out: 2066 kvfree(in); 2067 return err; 2068 } 2069 2070 static int modify_raw_packet_qp_sq(struct mlx5_core_dev *dev, 2071 struct mlx5_ib_sq *sq, int new_state) 2072 { 2073 void *in; 2074 void *sqc; 2075 int inlen; 2076 int err; 2077 2078 inlen = MLX5_ST_SZ_BYTES(modify_sq_in); 2079 in = mlx5_vzalloc(inlen); 2080 if (!in) 2081 return -ENOMEM; 2082 2083 MLX5_SET(modify_sq_in, in, sq_state, sq->state); 2084 2085 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); 2086 MLX5_SET(sqc, sqc, state, new_state); 2087 2088 err = mlx5_core_modify_sq(dev, sq->base.mqp.qpn, in, inlen); 2089 if (err) 2090 goto out; 2091 2092 sq->state = new_state; 2093 2094 out: 2095 kvfree(in); 2096 return err; 2097 } 2098 2099 static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 2100 u16 operation) 2101 { 2102 struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; 2103 struct mlx5_ib_rq *rq = &raw_packet_qp->rq; 2104 struct mlx5_ib_sq *sq = &raw_packet_qp->sq; 2105 int rq_state; 2106 int sq_state; 2107 int err; 2108 2109 switch (operation) { 2110 case MLX5_CMD_OP_RST2INIT_QP: 2111 rq_state = MLX5_RQC_STATE_RDY; 2112 sq_state = MLX5_SQC_STATE_RDY; 2113 break; 2114 case MLX5_CMD_OP_2ERR_QP: 2115 rq_state = MLX5_RQC_STATE_ERR; 2116 sq_state = MLX5_SQC_STATE_ERR; 2117 break; 2118 case MLX5_CMD_OP_2RST_QP: 2119 rq_state = MLX5_RQC_STATE_RST; 2120 sq_state = MLX5_SQC_STATE_RST; 2121 break; 2122 case MLX5_CMD_OP_INIT2INIT_QP: 2123 case MLX5_CMD_OP_INIT2RTR_QP: 2124 case MLX5_CMD_OP_RTR2RTS_QP: 2125 case MLX5_CMD_OP_RTS2RTS_QP: 2126 /* Nothing to do here... */ 2127 return 0; 2128 default: 2129 WARN_ON(1); 2130 return -EINVAL; 2131 } 2132 2133 if (qp->rq.wqe_cnt) { 2134 err = modify_raw_packet_qp_rq(dev->mdev, rq, rq_state); 2135 if (err) 2136 return err; 2137 } 2138 2139 if (qp->sq.wqe_cnt) 2140 return modify_raw_packet_qp_sq(dev->mdev, sq, sq_state); 2141 2142 return 0; 2143 } 2144 2145 static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, 2146 const struct ib_qp_attr *attr, int attr_mask, 2147 enum ib_qp_state cur_state, enum ib_qp_state new_state) 2148 { 2149 static const u16 optab[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE] = { 2150 [MLX5_QP_STATE_RST] = { 2151 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 2152 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 2153 [MLX5_QP_STATE_INIT] = MLX5_CMD_OP_RST2INIT_QP, 2154 }, 2155 [MLX5_QP_STATE_INIT] = { 2156 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 2157 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 2158 [MLX5_QP_STATE_INIT] = MLX5_CMD_OP_INIT2INIT_QP, 2159 [MLX5_QP_STATE_RTR] = MLX5_CMD_OP_INIT2RTR_QP, 2160 }, 2161 [MLX5_QP_STATE_RTR] = { 2162 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 2163 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 2164 [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTR2RTS_QP, 2165 }, 2166 [MLX5_QP_STATE_RTS] = { 2167 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 2168 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 2169 [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTS2RTS_QP, 2170 }, 2171 [MLX5_QP_STATE_SQD] = { 2172 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 2173 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 2174 }, 2175 [MLX5_QP_STATE_SQER] = { 2176 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 2177 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 2178 [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_SQERR2RTS_QP, 2179 }, 2180 [MLX5_QP_STATE_ERR] = { 2181 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 2182 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 2183 } 2184 }; 2185 2186 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 2187 struct mlx5_ib_qp *qp = to_mqp(ibqp); 2188 struct mlx5_ib_qp_base *base = &qp->trans_qp.base; 2189 struct mlx5_ib_cq *send_cq, *recv_cq; 2190 struct mlx5_qp_context *context; 2191 struct mlx5_modify_qp_mbox_in *in; 2192 struct mlx5_ib_pd *pd; 2193 enum mlx5_qp_state mlx5_cur, mlx5_new; 2194 enum mlx5_qp_optpar optpar; 2195 int sqd_event; 2196 int mlx5_st; 2197 int err; 2198 u16 op; 2199 2200 in = kzalloc(sizeof(*in), GFP_KERNEL); 2201 if (!in) 2202 return -ENOMEM; 2203 2204 context = &in->ctx; 2205 err = to_mlx5_st(ibqp->qp_type); 2206 if (err < 0) { 2207 mlx5_ib_dbg(dev, "unsupported qp type %d\n", ibqp->qp_type); 2208 goto out; 2209 } 2210 2211 context->flags = cpu_to_be32(err << 16); 2212 2213 if (!(attr_mask & IB_QP_PATH_MIG_STATE)) { 2214 context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11); 2215 } else { 2216 switch (attr->path_mig_state) { 2217 case IB_MIG_MIGRATED: 2218 context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11); 2219 break; 2220 case IB_MIG_REARM: 2221 context->flags |= cpu_to_be32(MLX5_QP_PM_REARM << 11); 2222 break; 2223 case IB_MIG_ARMED: 2224 context->flags |= cpu_to_be32(MLX5_QP_PM_ARMED << 11); 2225 break; 2226 } 2227 } 2228 2229 if (is_sqp(ibqp->qp_type)) { 2230 context->mtu_msgmax = (IB_MTU_256 << 5) | 8; 2231 } else if (ibqp->qp_type == IB_QPT_UD || 2232 ibqp->qp_type == MLX5_IB_QPT_REG_UMR) { 2233 context->mtu_msgmax = (IB_MTU_4096 << 5) | 12; 2234 } else if (attr_mask & IB_QP_PATH_MTU) { 2235 if (attr->path_mtu < IB_MTU_256 || 2236 attr->path_mtu > IB_MTU_4096) { 2237 mlx5_ib_warn(dev, "invalid mtu %d\n", attr->path_mtu); 2238 err = -EINVAL; 2239 goto out; 2240 } 2241 context->mtu_msgmax = (attr->path_mtu << 5) | 2242 (u8)MLX5_CAP_GEN(dev->mdev, log_max_msg); 2243 } 2244 2245 if (attr_mask & IB_QP_DEST_QPN) 2246 context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num); 2247 2248 if (attr_mask & IB_QP_PKEY_INDEX) 2249 context->pri_path.pkey_index = attr->pkey_index; 2250 2251 /* todo implement counter_index functionality */ 2252 2253 if (is_sqp(ibqp->qp_type)) 2254 context->pri_path.port = qp->port; 2255 2256 if (attr_mask & IB_QP_PORT) 2257 context->pri_path.port = attr->port_num; 2258 2259 if (attr_mask & IB_QP_AV) { 2260 err = mlx5_set_path(dev, qp, &attr->ah_attr, &context->pri_path, 2261 attr_mask & IB_QP_PORT ? attr->port_num : qp->port, 2262 attr_mask, 0, attr); 2263 if (err) 2264 goto out; 2265 } 2266 2267 if (attr_mask & IB_QP_TIMEOUT) 2268 context->pri_path.ackto_lt |= attr->timeout << 3; 2269 2270 if (attr_mask & IB_QP_ALT_PATH) { 2271 err = mlx5_set_path(dev, qp, &attr->alt_ah_attr, 2272 &context->alt_path, 2273 attr->alt_port_num, attr_mask, 0, attr); 2274 if (err) 2275 goto out; 2276 } 2277 2278 pd = get_pd(qp); 2279 get_cqs(qp, &send_cq, &recv_cq); 2280 2281 context->flags_pd = cpu_to_be32(pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn); 2282 context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0; 2283 context->cqn_recv = recv_cq ? cpu_to_be32(recv_cq->mcq.cqn) : 0; 2284 context->params1 = cpu_to_be32(MLX5_IB_ACK_REQ_FREQ << 28); 2285 2286 if (attr_mask & IB_QP_RNR_RETRY) 2287 context->params1 |= cpu_to_be32(attr->rnr_retry << 13); 2288 2289 if (attr_mask & IB_QP_RETRY_CNT) 2290 context->params1 |= cpu_to_be32(attr->retry_cnt << 16); 2291 2292 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { 2293 if (attr->max_rd_atomic) 2294 context->params1 |= 2295 cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21); 2296 } 2297 2298 if (attr_mask & IB_QP_SQ_PSN) 2299 context->next_send_psn = cpu_to_be32(attr->sq_psn); 2300 2301 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { 2302 if (attr->max_dest_rd_atomic) 2303 context->params2 |= 2304 cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21); 2305 } 2306 2307 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) 2308 context->params2 |= to_mlx5_access_flags(qp, attr, attr_mask); 2309 2310 if (attr_mask & IB_QP_MIN_RNR_TIMER) 2311 context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24); 2312 2313 if (attr_mask & IB_QP_RQ_PSN) 2314 context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn); 2315 2316 if (attr_mask & IB_QP_QKEY) 2317 context->qkey = cpu_to_be32(attr->qkey); 2318 2319 if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) 2320 context->db_rec_addr = cpu_to_be64(qp->db.dma); 2321 2322 if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD && 2323 attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify) 2324 sqd_event = 1; 2325 else 2326 sqd_event = 0; 2327 2328 if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) 2329 context->sq_crq_size |= cpu_to_be16(1 << 4); 2330 2331 if (qp->flags & MLX5_IB_QP_SQPN_QP1) 2332 context->deth_sqpn = cpu_to_be32(1); 2333 2334 mlx5_cur = to_mlx5_state(cur_state); 2335 mlx5_new = to_mlx5_state(new_state); 2336 mlx5_st = to_mlx5_st(ibqp->qp_type); 2337 if (mlx5_st < 0) 2338 goto out; 2339 2340 /* If moving to a reset or error state, we must disable page faults on 2341 * this QP and flush all current page faults. Otherwise a stale page 2342 * fault may attempt to work on this QP after it is reset and moved 2343 * again to RTS, and may cause the driver and the device to get out of 2344 * sync. */ 2345 if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR && 2346 (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR) && 2347 (qp->ibqp.qp_type != IB_QPT_RAW_PACKET)) 2348 mlx5_ib_qp_disable_pagefaults(qp); 2349 2350 if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE || 2351 !optab[mlx5_cur][mlx5_new]) 2352 goto out; 2353 2354 op = optab[mlx5_cur][mlx5_new]; 2355 optpar = ib_mask_to_mlx5_opt(attr_mask); 2356 optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st]; 2357 in->optparam = cpu_to_be32(optpar); 2358 2359 if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) 2360 err = modify_raw_packet_qp(dev, qp, op); 2361 else 2362 err = mlx5_core_qp_modify(dev->mdev, op, in, sqd_event, 2363 &base->mqp); 2364 if (err) 2365 goto out; 2366 2367 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT && 2368 (qp->ibqp.qp_type != IB_QPT_RAW_PACKET)) 2369 mlx5_ib_qp_enable_pagefaults(qp); 2370 2371 qp->state = new_state; 2372 2373 if (attr_mask & IB_QP_ACCESS_FLAGS) 2374 qp->trans_qp.atomic_rd_en = attr->qp_access_flags; 2375 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 2376 qp->trans_qp.resp_depth = attr->max_dest_rd_atomic; 2377 if (attr_mask & IB_QP_PORT) 2378 qp->port = attr->port_num; 2379 if (attr_mask & IB_QP_ALT_PATH) 2380 qp->trans_qp.alt_port = attr->alt_port_num; 2381 2382 /* 2383 * If we moved a kernel QP to RESET, clean up all old CQ 2384 * entries and reinitialize the QP. 2385 */ 2386 if (new_state == IB_QPS_RESET && !ibqp->uobject) { 2387 mlx5_ib_cq_clean(recv_cq, base->mqp.qpn, 2388 ibqp->srq ? to_msrq(ibqp->srq) : NULL); 2389 if (send_cq != recv_cq) 2390 mlx5_ib_cq_clean(send_cq, base->mqp.qpn, NULL); 2391 2392 qp->rq.head = 0; 2393 qp->rq.tail = 0; 2394 qp->sq.head = 0; 2395 qp->sq.tail = 0; 2396 qp->sq.cur_post = 0; 2397 qp->sq.last_poll = 0; 2398 qp->db.db[MLX5_RCV_DBR] = 0; 2399 qp->db.db[MLX5_SND_DBR] = 0; 2400 } 2401 2402 out: 2403 kfree(in); 2404 return err; 2405 } 2406 2407 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 2408 int attr_mask, struct ib_udata *udata) 2409 { 2410 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 2411 struct mlx5_ib_qp *qp = to_mqp(ibqp); 2412 enum ib_qp_type qp_type; 2413 enum ib_qp_state cur_state, new_state; 2414 int err = -EINVAL; 2415 int port; 2416 enum rdma_link_layer ll = IB_LINK_LAYER_UNSPECIFIED; 2417 2418 if (unlikely(ibqp->qp_type == IB_QPT_GSI)) 2419 return mlx5_ib_gsi_modify_qp(ibqp, attr, attr_mask); 2420 2421 qp_type = (unlikely(ibqp->qp_type == MLX5_IB_QPT_HW_GSI)) ? 2422 IB_QPT_GSI : ibqp->qp_type; 2423 2424 mutex_lock(&qp->mutex); 2425 2426 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; 2427 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 2428 2429 if (!(cur_state == new_state && cur_state == IB_QPS_RESET)) { 2430 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; 2431 ll = dev->ib_dev.get_link_layer(&dev->ib_dev, port); 2432 } 2433 2434 if (qp_type != MLX5_IB_QPT_REG_UMR && 2435 !ib_modify_qp_is_ok(cur_state, new_state, qp_type, attr_mask, ll)) { 2436 mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n", 2437 cur_state, new_state, ibqp->qp_type, attr_mask); 2438 goto out; 2439 } 2440 2441 if ((attr_mask & IB_QP_PORT) && 2442 (attr->port_num == 0 || 2443 attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports))) { 2444 mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n", 2445 attr->port_num, dev->num_ports); 2446 goto out; 2447 } 2448 2449 if (attr_mask & IB_QP_PKEY_INDEX) { 2450 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; 2451 if (attr->pkey_index >= 2452 dev->mdev->port_caps[port - 1].pkey_table_len) { 2453 mlx5_ib_dbg(dev, "invalid pkey index %d\n", 2454 attr->pkey_index); 2455 goto out; 2456 } 2457 } 2458 2459 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && 2460 attr->max_rd_atomic > 2461 (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp))) { 2462 mlx5_ib_dbg(dev, "invalid max_rd_atomic value %d\n", 2463 attr->max_rd_atomic); 2464 goto out; 2465 } 2466 2467 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && 2468 attr->max_dest_rd_atomic > 2469 (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp))) { 2470 mlx5_ib_dbg(dev, "invalid max_dest_rd_atomic value %d\n", 2471 attr->max_dest_rd_atomic); 2472 goto out; 2473 } 2474 2475 if (cur_state == new_state && cur_state == IB_QPS_RESET) { 2476 err = 0; 2477 goto out; 2478 } 2479 2480 err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state); 2481 2482 out: 2483 mutex_unlock(&qp->mutex); 2484 return err; 2485 } 2486 2487 static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq) 2488 { 2489 struct mlx5_ib_cq *cq; 2490 unsigned cur; 2491 2492 cur = wq->head - wq->tail; 2493 if (likely(cur + nreq < wq->max_post)) 2494 return 0; 2495 2496 cq = to_mcq(ib_cq); 2497 spin_lock(&cq->lock); 2498 cur = wq->head - wq->tail; 2499 spin_unlock(&cq->lock); 2500 2501 return cur + nreq >= wq->max_post; 2502 } 2503 2504 static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg, 2505 u64 remote_addr, u32 rkey) 2506 { 2507 rseg->raddr = cpu_to_be64(remote_addr); 2508 rseg->rkey = cpu_to_be32(rkey); 2509 rseg->reserved = 0; 2510 } 2511 2512 static void *set_eth_seg(struct mlx5_wqe_eth_seg *eseg, 2513 struct ib_send_wr *wr, void *qend, 2514 struct mlx5_ib_qp *qp, int *size) 2515 { 2516 void *seg = eseg; 2517 2518 memset(eseg, 0, sizeof(struct mlx5_wqe_eth_seg)); 2519 2520 if (wr->send_flags & IB_SEND_IP_CSUM) 2521 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | 2522 MLX5_ETH_WQE_L4_CSUM; 2523 2524 seg += sizeof(struct mlx5_wqe_eth_seg); 2525 *size += sizeof(struct mlx5_wqe_eth_seg) / 16; 2526 2527 if (wr->opcode == IB_WR_LSO) { 2528 struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr); 2529 int size_of_inl_hdr_start = sizeof(eseg->inline_hdr_start); 2530 u64 left, leftlen, copysz; 2531 void *pdata = ud_wr->header; 2532 2533 left = ud_wr->hlen; 2534 eseg->mss = cpu_to_be16(ud_wr->mss); 2535 eseg->inline_hdr_sz = cpu_to_be16(left); 2536 2537 /* 2538 * check if there is space till the end of queue, if yes, 2539 * copy all in one shot, otherwise copy till the end of queue, 2540 * rollback and than the copy the left 2541 */ 2542 leftlen = qend - (void *)eseg->inline_hdr_start; 2543 copysz = min_t(u64, leftlen, left); 2544 2545 memcpy(seg - size_of_inl_hdr_start, pdata, copysz); 2546 2547 if (likely(copysz > size_of_inl_hdr_start)) { 2548 seg += ALIGN(copysz - size_of_inl_hdr_start, 16); 2549 *size += ALIGN(copysz - size_of_inl_hdr_start, 16) / 16; 2550 } 2551 2552 if (unlikely(copysz < left)) { /* the last wqe in the queue */ 2553 seg = mlx5_get_send_wqe(qp, 0); 2554 left -= copysz; 2555 pdata += copysz; 2556 memcpy(seg, pdata, left); 2557 seg += ALIGN(left, 16); 2558 *size += ALIGN(left, 16) / 16; 2559 } 2560 } 2561 2562 return seg; 2563 } 2564 2565 static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg, 2566 struct ib_send_wr *wr) 2567 { 2568 memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av)); 2569 dseg->av.dqp_dct = cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV); 2570 dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr)->remote_qkey); 2571 } 2572 2573 static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg) 2574 { 2575 dseg->byte_count = cpu_to_be32(sg->length); 2576 dseg->lkey = cpu_to_be32(sg->lkey); 2577 dseg->addr = cpu_to_be64(sg->addr); 2578 } 2579 2580 static __be16 get_klm_octo(int npages) 2581 { 2582 return cpu_to_be16(ALIGN(npages, 8) / 2); 2583 } 2584 2585 static __be64 frwr_mkey_mask(void) 2586 { 2587 u64 result; 2588 2589 result = MLX5_MKEY_MASK_LEN | 2590 MLX5_MKEY_MASK_PAGE_SIZE | 2591 MLX5_MKEY_MASK_START_ADDR | 2592 MLX5_MKEY_MASK_EN_RINVAL | 2593 MLX5_MKEY_MASK_KEY | 2594 MLX5_MKEY_MASK_LR | 2595 MLX5_MKEY_MASK_LW | 2596 MLX5_MKEY_MASK_RR | 2597 MLX5_MKEY_MASK_RW | 2598 MLX5_MKEY_MASK_A | 2599 MLX5_MKEY_MASK_SMALL_FENCE | 2600 MLX5_MKEY_MASK_FREE; 2601 2602 return cpu_to_be64(result); 2603 } 2604 2605 static __be64 sig_mkey_mask(void) 2606 { 2607 u64 result; 2608 2609 result = MLX5_MKEY_MASK_LEN | 2610 MLX5_MKEY_MASK_PAGE_SIZE | 2611 MLX5_MKEY_MASK_START_ADDR | 2612 MLX5_MKEY_MASK_EN_SIGERR | 2613 MLX5_MKEY_MASK_EN_RINVAL | 2614 MLX5_MKEY_MASK_KEY | 2615 MLX5_MKEY_MASK_LR | 2616 MLX5_MKEY_MASK_LW | 2617 MLX5_MKEY_MASK_RR | 2618 MLX5_MKEY_MASK_RW | 2619 MLX5_MKEY_MASK_SMALL_FENCE | 2620 MLX5_MKEY_MASK_FREE | 2621 MLX5_MKEY_MASK_BSF_EN; 2622 2623 return cpu_to_be64(result); 2624 } 2625 2626 static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr, 2627 struct mlx5_ib_mr *mr) 2628 { 2629 int ndescs = mr->ndescs; 2630 2631 memset(umr, 0, sizeof(*umr)); 2632 2633 if (mr->access_mode == MLX5_ACCESS_MODE_KLM) 2634 /* KLMs take twice the size of MTTs */ 2635 ndescs *= 2; 2636 2637 umr->flags = MLX5_UMR_CHECK_NOT_FREE; 2638 umr->klm_octowords = get_klm_octo(ndescs); 2639 umr->mkey_mask = frwr_mkey_mask(); 2640 } 2641 2642 static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr) 2643 { 2644 memset(umr, 0, sizeof(*umr)); 2645 umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); 2646 umr->flags = 1 << 7; 2647 } 2648 2649 static __be64 get_umr_reg_mr_mask(void) 2650 { 2651 u64 result; 2652 2653 result = MLX5_MKEY_MASK_LEN | 2654 MLX5_MKEY_MASK_PAGE_SIZE | 2655 MLX5_MKEY_MASK_START_ADDR | 2656 MLX5_MKEY_MASK_PD | 2657 MLX5_MKEY_MASK_LR | 2658 MLX5_MKEY_MASK_LW | 2659 MLX5_MKEY_MASK_KEY | 2660 MLX5_MKEY_MASK_RR | 2661 MLX5_MKEY_MASK_RW | 2662 MLX5_MKEY_MASK_A | 2663 MLX5_MKEY_MASK_FREE; 2664 2665 return cpu_to_be64(result); 2666 } 2667 2668 static __be64 get_umr_unreg_mr_mask(void) 2669 { 2670 u64 result; 2671 2672 result = MLX5_MKEY_MASK_FREE; 2673 2674 return cpu_to_be64(result); 2675 } 2676 2677 static __be64 get_umr_update_mtt_mask(void) 2678 { 2679 u64 result; 2680 2681 result = MLX5_MKEY_MASK_FREE; 2682 2683 return cpu_to_be64(result); 2684 } 2685 2686 static __be64 get_umr_update_translation_mask(void) 2687 { 2688 u64 result; 2689 2690 result = MLX5_MKEY_MASK_LEN | 2691 MLX5_MKEY_MASK_PAGE_SIZE | 2692 MLX5_MKEY_MASK_START_ADDR | 2693 MLX5_MKEY_MASK_KEY | 2694 MLX5_MKEY_MASK_FREE; 2695 2696 return cpu_to_be64(result); 2697 } 2698 2699 static __be64 get_umr_update_access_mask(void) 2700 { 2701 u64 result; 2702 2703 result = MLX5_MKEY_MASK_LW | 2704 MLX5_MKEY_MASK_RR | 2705 MLX5_MKEY_MASK_RW | 2706 MLX5_MKEY_MASK_A | 2707 MLX5_MKEY_MASK_KEY | 2708 MLX5_MKEY_MASK_FREE; 2709 2710 return cpu_to_be64(result); 2711 } 2712 2713 static __be64 get_umr_update_pd_mask(void) 2714 { 2715 u64 result; 2716 2717 result = MLX5_MKEY_MASK_PD | 2718 MLX5_MKEY_MASK_KEY | 2719 MLX5_MKEY_MASK_FREE; 2720 2721 return cpu_to_be64(result); 2722 } 2723 2724 static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, 2725 struct ib_send_wr *wr) 2726 { 2727 struct mlx5_umr_wr *umrwr = umr_wr(wr); 2728 2729 memset(umr, 0, sizeof(*umr)); 2730 2731 if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE) 2732 umr->flags = MLX5_UMR_CHECK_FREE; /* fail if free */ 2733 else 2734 umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */ 2735 2736 if (!(wr->send_flags & MLX5_IB_SEND_UMR_UNREG)) { 2737 umr->klm_octowords = get_klm_octo(umrwr->npages); 2738 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT) { 2739 umr->mkey_mask = get_umr_update_mtt_mask(); 2740 umr->bsf_octowords = get_klm_octo(umrwr->target.offset); 2741 umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN; 2742 } 2743 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION) 2744 umr->mkey_mask |= get_umr_update_translation_mask(); 2745 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_ACCESS) 2746 umr->mkey_mask |= get_umr_update_access_mask(); 2747 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD) 2748 umr->mkey_mask |= get_umr_update_pd_mask(); 2749 if (!umr->mkey_mask) 2750 umr->mkey_mask = get_umr_reg_mr_mask(); 2751 } else { 2752 umr->mkey_mask = get_umr_unreg_mr_mask(); 2753 } 2754 2755 if (!wr->num_sge) 2756 umr->flags |= MLX5_UMR_INLINE; 2757 } 2758 2759 static u8 get_umr_flags(int acc) 2760 { 2761 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) | 2762 (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) | 2763 (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) | 2764 (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) | 2765 MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN; 2766 } 2767 2768 static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg, 2769 struct mlx5_ib_mr *mr, 2770 u32 key, int access) 2771 { 2772 int ndescs = ALIGN(mr->ndescs, 8) >> 1; 2773 2774 memset(seg, 0, sizeof(*seg)); 2775 2776 if (mr->access_mode == MLX5_ACCESS_MODE_MTT) 2777 seg->log2_page_size = ilog2(mr->ibmr.page_size); 2778 else if (mr->access_mode == MLX5_ACCESS_MODE_KLM) 2779 /* KLMs take twice the size of MTTs */ 2780 ndescs *= 2; 2781 2782 seg->flags = get_umr_flags(access) | mr->access_mode; 2783 seg->qpn_mkey7_0 = cpu_to_be32((key & 0xff) | 0xffffff00); 2784 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL); 2785 seg->start_addr = cpu_to_be64(mr->ibmr.iova); 2786 seg->len = cpu_to_be64(mr->ibmr.length); 2787 seg->xlt_oct_size = cpu_to_be32(ndescs); 2788 } 2789 2790 static void set_linv_mkey_seg(struct mlx5_mkey_seg *seg) 2791 { 2792 memset(seg, 0, sizeof(*seg)); 2793 seg->status = MLX5_MKEY_STATUS_FREE; 2794 } 2795 2796 static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr) 2797 { 2798 struct mlx5_umr_wr *umrwr = umr_wr(wr); 2799 2800 memset(seg, 0, sizeof(*seg)); 2801 if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) { 2802 seg->status = MLX5_MKEY_STATUS_FREE; 2803 return; 2804 } 2805 2806 seg->flags = convert_access(umrwr->access_flags); 2807 if (!(wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT)) { 2808 if (umrwr->pd) 2809 seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn); 2810 seg->start_addr = cpu_to_be64(umrwr->target.virt_addr); 2811 } 2812 seg->len = cpu_to_be64(umrwr->length); 2813 seg->log2_page_size = umrwr->page_shift; 2814 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 | 2815 mlx5_mkey_variant(umrwr->mkey)); 2816 } 2817 2818 static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg, 2819 struct mlx5_ib_mr *mr, 2820 struct mlx5_ib_pd *pd) 2821 { 2822 int bcount = mr->desc_size * mr->ndescs; 2823 2824 dseg->addr = cpu_to_be64(mr->desc_map); 2825 dseg->byte_count = cpu_to_be32(ALIGN(bcount, 64)); 2826 dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey); 2827 } 2828 2829 static __be32 send_ieth(struct ib_send_wr *wr) 2830 { 2831 switch (wr->opcode) { 2832 case IB_WR_SEND_WITH_IMM: 2833 case IB_WR_RDMA_WRITE_WITH_IMM: 2834 return wr->ex.imm_data; 2835 2836 case IB_WR_SEND_WITH_INV: 2837 return cpu_to_be32(wr->ex.invalidate_rkey); 2838 2839 default: 2840 return 0; 2841 } 2842 } 2843 2844 static u8 calc_sig(void *wqe, int size) 2845 { 2846 u8 *p = wqe; 2847 u8 res = 0; 2848 int i; 2849 2850 for (i = 0; i < size; i++) 2851 res ^= p[i]; 2852 2853 return ~res; 2854 } 2855 2856 static u8 wq_sig(void *wqe) 2857 { 2858 return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4); 2859 } 2860 2861 static int set_data_inl_seg(struct mlx5_ib_qp *qp, struct ib_send_wr *wr, 2862 void *wqe, int *sz) 2863 { 2864 struct mlx5_wqe_inline_seg *seg; 2865 void *qend = qp->sq.qend; 2866 void *addr; 2867 int inl = 0; 2868 int copy; 2869 int len; 2870 int i; 2871 2872 seg = wqe; 2873 wqe += sizeof(*seg); 2874 for (i = 0; i < wr->num_sge; i++) { 2875 addr = (void *)(unsigned long)(wr->sg_list[i].addr); 2876 len = wr->sg_list[i].length; 2877 inl += len; 2878 2879 if (unlikely(inl > qp->max_inline_data)) 2880 return -ENOMEM; 2881 2882 if (unlikely(wqe + len > qend)) { 2883 copy = qend - wqe; 2884 memcpy(wqe, addr, copy); 2885 addr += copy; 2886 len -= copy; 2887 wqe = mlx5_get_send_wqe(qp, 0); 2888 } 2889 memcpy(wqe, addr, len); 2890 wqe += len; 2891 } 2892 2893 seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG); 2894 2895 *sz = ALIGN(inl + sizeof(seg->byte_count), 16) / 16; 2896 2897 return 0; 2898 } 2899 2900 static u16 prot_field_size(enum ib_signature_type type) 2901 { 2902 switch (type) { 2903 case IB_SIG_TYPE_T10_DIF: 2904 return MLX5_DIF_SIZE; 2905 default: 2906 return 0; 2907 } 2908 } 2909 2910 static u8 bs_selector(int block_size) 2911 { 2912 switch (block_size) { 2913 case 512: return 0x1; 2914 case 520: return 0x2; 2915 case 4096: return 0x3; 2916 case 4160: return 0x4; 2917 case 1073741824: return 0x5; 2918 default: return 0; 2919 } 2920 } 2921 2922 static void mlx5_fill_inl_bsf(struct ib_sig_domain *domain, 2923 struct mlx5_bsf_inl *inl) 2924 { 2925 /* Valid inline section and allow BSF refresh */ 2926 inl->vld_refresh = cpu_to_be16(MLX5_BSF_INL_VALID | 2927 MLX5_BSF_REFRESH_DIF); 2928 inl->dif_apptag = cpu_to_be16(domain->sig.dif.app_tag); 2929 inl->dif_reftag = cpu_to_be32(domain->sig.dif.ref_tag); 2930 /* repeating block */ 2931 inl->rp_inv_seed = MLX5_BSF_REPEAT_BLOCK; 2932 inl->sig_type = domain->sig.dif.bg_type == IB_T10DIF_CRC ? 2933 MLX5_DIF_CRC : MLX5_DIF_IPCS; 2934 2935 if (domain->sig.dif.ref_remap) 2936 inl->dif_inc_ref_guard_check |= MLX5_BSF_INC_REFTAG; 2937 2938 if (domain->sig.dif.app_escape) { 2939 if (domain->sig.dif.ref_escape) 2940 inl->dif_inc_ref_guard_check |= MLX5_BSF_APPREF_ESCAPE; 2941 else 2942 inl->dif_inc_ref_guard_check |= MLX5_BSF_APPTAG_ESCAPE; 2943 } 2944 2945 inl->dif_app_bitmask_check = 2946 cpu_to_be16(domain->sig.dif.apptag_check_mask); 2947 } 2948 2949 static int mlx5_set_bsf(struct ib_mr *sig_mr, 2950 struct ib_sig_attrs *sig_attrs, 2951 struct mlx5_bsf *bsf, u32 data_size) 2952 { 2953 struct mlx5_core_sig_ctx *msig = to_mmr(sig_mr)->sig; 2954 struct mlx5_bsf_basic *basic = &bsf->basic; 2955 struct ib_sig_domain *mem = &sig_attrs->mem; 2956 struct ib_sig_domain *wire = &sig_attrs->wire; 2957 2958 memset(bsf, 0, sizeof(*bsf)); 2959 2960 /* Basic + Extended + Inline */ 2961 basic->bsf_size_sbs = 1 << 7; 2962 /* Input domain check byte mask */ 2963 basic->check_byte_mask = sig_attrs->check_mask; 2964 basic->raw_data_size = cpu_to_be32(data_size); 2965 2966 /* Memory domain */ 2967 switch (sig_attrs->mem.sig_type) { 2968 case IB_SIG_TYPE_NONE: 2969 break; 2970 case IB_SIG_TYPE_T10_DIF: 2971 basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval); 2972 basic->m_bfs_psv = cpu_to_be32(msig->psv_memory.psv_idx); 2973 mlx5_fill_inl_bsf(mem, &bsf->m_inl); 2974 break; 2975 default: 2976 return -EINVAL; 2977 } 2978 2979 /* Wire domain */ 2980 switch (sig_attrs->wire.sig_type) { 2981 case IB_SIG_TYPE_NONE: 2982 break; 2983 case IB_SIG_TYPE_T10_DIF: 2984 if (mem->sig.dif.pi_interval == wire->sig.dif.pi_interval && 2985 mem->sig_type == wire->sig_type) { 2986 /* Same block structure */ 2987 basic->bsf_size_sbs |= 1 << 4; 2988 if (mem->sig.dif.bg_type == wire->sig.dif.bg_type) 2989 basic->wire.copy_byte_mask |= MLX5_CPY_GRD_MASK; 2990 if (mem->sig.dif.app_tag == wire->sig.dif.app_tag) 2991 basic->wire.copy_byte_mask |= MLX5_CPY_APP_MASK; 2992 if (mem->sig.dif.ref_tag == wire->sig.dif.ref_tag) 2993 basic->wire.copy_byte_mask |= MLX5_CPY_REF_MASK; 2994 } else 2995 basic->wire.bs_selector = bs_selector(wire->sig.dif.pi_interval); 2996 2997 basic->w_bfs_psv = cpu_to_be32(msig->psv_wire.psv_idx); 2998 mlx5_fill_inl_bsf(wire, &bsf->w_inl); 2999 break; 3000 default: 3001 return -EINVAL; 3002 } 3003 3004 return 0; 3005 } 3006 3007 static int set_sig_data_segment(struct ib_sig_handover_wr *wr, 3008 struct mlx5_ib_qp *qp, void **seg, int *size) 3009 { 3010 struct ib_sig_attrs *sig_attrs = wr->sig_attrs; 3011 struct ib_mr *sig_mr = wr->sig_mr; 3012 struct mlx5_bsf *bsf; 3013 u32 data_len = wr->wr.sg_list->length; 3014 u32 data_key = wr->wr.sg_list->lkey; 3015 u64 data_va = wr->wr.sg_list->addr; 3016 int ret; 3017 int wqe_size; 3018 3019 if (!wr->prot || 3020 (data_key == wr->prot->lkey && 3021 data_va == wr->prot->addr && 3022 data_len == wr->prot->length)) { 3023 /** 3024 * Source domain doesn't contain signature information 3025 * or data and protection are interleaved in memory. 3026 * So need construct: 3027 * ------------------ 3028 * | data_klm | 3029 * ------------------ 3030 * | BSF | 3031 * ------------------ 3032 **/ 3033 struct mlx5_klm *data_klm = *seg; 3034 3035 data_klm->bcount = cpu_to_be32(data_len); 3036 data_klm->key = cpu_to_be32(data_key); 3037 data_klm->va = cpu_to_be64(data_va); 3038 wqe_size = ALIGN(sizeof(*data_klm), 64); 3039 } else { 3040 /** 3041 * Source domain contains signature information 3042 * So need construct a strided block format: 3043 * --------------------------- 3044 * | stride_block_ctrl | 3045 * --------------------------- 3046 * | data_klm | 3047 * --------------------------- 3048 * | prot_klm | 3049 * --------------------------- 3050 * | BSF | 3051 * --------------------------- 3052 **/ 3053 struct mlx5_stride_block_ctrl_seg *sblock_ctrl; 3054 struct mlx5_stride_block_entry *data_sentry; 3055 struct mlx5_stride_block_entry *prot_sentry; 3056 u32 prot_key = wr->prot->lkey; 3057 u64 prot_va = wr->prot->addr; 3058 u16 block_size = sig_attrs->mem.sig.dif.pi_interval; 3059 int prot_size; 3060 3061 sblock_ctrl = *seg; 3062 data_sentry = (void *)sblock_ctrl + sizeof(*sblock_ctrl); 3063 prot_sentry = (void *)data_sentry + sizeof(*data_sentry); 3064 3065 prot_size = prot_field_size(sig_attrs->mem.sig_type); 3066 if (!prot_size) { 3067 pr_err("Bad block size given: %u\n", block_size); 3068 return -EINVAL; 3069 } 3070 sblock_ctrl->bcount_per_cycle = cpu_to_be32(block_size + 3071 prot_size); 3072 sblock_ctrl->op = cpu_to_be32(MLX5_STRIDE_BLOCK_OP); 3073 sblock_ctrl->repeat_count = cpu_to_be32(data_len / block_size); 3074 sblock_ctrl->num_entries = cpu_to_be16(2); 3075 3076 data_sentry->bcount = cpu_to_be16(block_size); 3077 data_sentry->key = cpu_to_be32(data_key); 3078 data_sentry->va = cpu_to_be64(data_va); 3079 data_sentry->stride = cpu_to_be16(block_size); 3080 3081 prot_sentry->bcount = cpu_to_be16(prot_size); 3082 prot_sentry->key = cpu_to_be32(prot_key); 3083 prot_sentry->va = cpu_to_be64(prot_va); 3084 prot_sentry->stride = cpu_to_be16(prot_size); 3085 3086 wqe_size = ALIGN(sizeof(*sblock_ctrl) + sizeof(*data_sentry) + 3087 sizeof(*prot_sentry), 64); 3088 } 3089 3090 *seg += wqe_size; 3091 *size += wqe_size / 16; 3092 if (unlikely((*seg == qp->sq.qend))) 3093 *seg = mlx5_get_send_wqe(qp, 0); 3094 3095 bsf = *seg; 3096 ret = mlx5_set_bsf(sig_mr, sig_attrs, bsf, data_len); 3097 if (ret) 3098 return -EINVAL; 3099 3100 *seg += sizeof(*bsf); 3101 *size += sizeof(*bsf) / 16; 3102 if (unlikely((*seg == qp->sq.qend))) 3103 *seg = mlx5_get_send_wqe(qp, 0); 3104 3105 return 0; 3106 } 3107 3108 static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg, 3109 struct ib_sig_handover_wr *wr, u32 nelements, 3110 u32 length, u32 pdn) 3111 { 3112 struct ib_mr *sig_mr = wr->sig_mr; 3113 u32 sig_key = sig_mr->rkey; 3114 u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1; 3115 3116 memset(seg, 0, sizeof(*seg)); 3117 3118 seg->flags = get_umr_flags(wr->access_flags) | 3119 MLX5_ACCESS_MODE_KLM; 3120 seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00); 3121 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 | 3122 MLX5_MKEY_BSF_EN | pdn); 3123 seg->len = cpu_to_be64(length); 3124 seg->xlt_oct_size = cpu_to_be32(be16_to_cpu(get_klm_octo(nelements))); 3125 seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE); 3126 } 3127 3128 static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, 3129 u32 nelements) 3130 { 3131 memset(umr, 0, sizeof(*umr)); 3132 3133 umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE; 3134 umr->klm_octowords = get_klm_octo(nelements); 3135 umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE); 3136 umr->mkey_mask = sig_mkey_mask(); 3137 } 3138 3139 3140 static int set_sig_umr_wr(struct ib_send_wr *send_wr, struct mlx5_ib_qp *qp, 3141 void **seg, int *size) 3142 { 3143 struct ib_sig_handover_wr *wr = sig_handover_wr(send_wr); 3144 struct mlx5_ib_mr *sig_mr = to_mmr(wr->sig_mr); 3145 u32 pdn = get_pd(qp)->pdn; 3146 u32 klm_oct_size; 3147 int region_len, ret; 3148 3149 if (unlikely(wr->wr.num_sge != 1) || 3150 unlikely(wr->access_flags & IB_ACCESS_REMOTE_ATOMIC) || 3151 unlikely(!sig_mr->sig) || unlikely(!qp->signature_en) || 3152 unlikely(!sig_mr->sig->sig_status_checked)) 3153 return -EINVAL; 3154 3155 /* length of the protected region, data + protection */ 3156 region_len = wr->wr.sg_list->length; 3157 if (wr->prot && 3158 (wr->prot->lkey != wr->wr.sg_list->lkey || 3159 wr->prot->addr != wr->wr.sg_list->addr || 3160 wr->prot->length != wr->wr.sg_list->length)) 3161 region_len += wr->prot->length; 3162 3163 /** 3164 * KLM octoword size - if protection was provided 3165 * then we use strided block format (3 octowords), 3166 * else we use single KLM (1 octoword) 3167 **/ 3168 klm_oct_size = wr->prot ? 3 : 1; 3169 3170 set_sig_umr_segment(*seg, klm_oct_size); 3171 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); 3172 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; 3173 if (unlikely((*seg == qp->sq.qend))) 3174 *seg = mlx5_get_send_wqe(qp, 0); 3175 3176 set_sig_mkey_segment(*seg, wr, klm_oct_size, region_len, pdn); 3177 *seg += sizeof(struct mlx5_mkey_seg); 3178 *size += sizeof(struct mlx5_mkey_seg) / 16; 3179 if (unlikely((*seg == qp->sq.qend))) 3180 *seg = mlx5_get_send_wqe(qp, 0); 3181 3182 ret = set_sig_data_segment(wr, qp, seg, size); 3183 if (ret) 3184 return ret; 3185 3186 sig_mr->sig->sig_status_checked = false; 3187 return 0; 3188 } 3189 3190 static int set_psv_wr(struct ib_sig_domain *domain, 3191 u32 psv_idx, void **seg, int *size) 3192 { 3193 struct mlx5_seg_set_psv *psv_seg = *seg; 3194 3195 memset(psv_seg, 0, sizeof(*psv_seg)); 3196 psv_seg->psv_num = cpu_to_be32(psv_idx); 3197 switch (domain->sig_type) { 3198 case IB_SIG_TYPE_NONE: 3199 break; 3200 case IB_SIG_TYPE_T10_DIF: 3201 psv_seg->transient_sig = cpu_to_be32(domain->sig.dif.bg << 16 | 3202 domain->sig.dif.app_tag); 3203 psv_seg->ref_tag = cpu_to_be32(domain->sig.dif.ref_tag); 3204 break; 3205 default: 3206 pr_err("Bad signature type given.\n"); 3207 return 1; 3208 } 3209 3210 *seg += sizeof(*psv_seg); 3211 *size += sizeof(*psv_seg) / 16; 3212 3213 return 0; 3214 } 3215 3216 static int set_reg_wr(struct mlx5_ib_qp *qp, 3217 struct ib_reg_wr *wr, 3218 void **seg, int *size) 3219 { 3220 struct mlx5_ib_mr *mr = to_mmr(wr->mr); 3221 struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd); 3222 3223 if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) { 3224 mlx5_ib_warn(to_mdev(qp->ibqp.device), 3225 "Invalid IB_SEND_INLINE send flag\n"); 3226 return -EINVAL; 3227 } 3228 3229 set_reg_umr_seg(*seg, mr); 3230 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); 3231 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; 3232 if (unlikely((*seg == qp->sq.qend))) 3233 *seg = mlx5_get_send_wqe(qp, 0); 3234 3235 set_reg_mkey_seg(*seg, mr, wr->key, wr->access); 3236 *seg += sizeof(struct mlx5_mkey_seg); 3237 *size += sizeof(struct mlx5_mkey_seg) / 16; 3238 if (unlikely((*seg == qp->sq.qend))) 3239 *seg = mlx5_get_send_wqe(qp, 0); 3240 3241 set_reg_data_seg(*seg, mr, pd); 3242 *seg += sizeof(struct mlx5_wqe_data_seg); 3243 *size += (sizeof(struct mlx5_wqe_data_seg) / 16); 3244 3245 return 0; 3246 } 3247 3248 static void set_linv_wr(struct mlx5_ib_qp *qp, void **seg, int *size) 3249 { 3250 set_linv_umr_seg(*seg); 3251 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); 3252 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; 3253 if (unlikely((*seg == qp->sq.qend))) 3254 *seg = mlx5_get_send_wqe(qp, 0); 3255 set_linv_mkey_seg(*seg); 3256 *seg += sizeof(struct mlx5_mkey_seg); 3257 *size += sizeof(struct mlx5_mkey_seg) / 16; 3258 if (unlikely((*seg == qp->sq.qend))) 3259 *seg = mlx5_get_send_wqe(qp, 0); 3260 } 3261 3262 static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16) 3263 { 3264 __be32 *p = NULL; 3265 int tidx = idx; 3266 int i, j; 3267 3268 pr_debug("dump wqe at %p\n", mlx5_get_send_wqe(qp, tidx)); 3269 for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) { 3270 if ((i & 0xf) == 0) { 3271 void *buf = mlx5_get_send_wqe(qp, tidx); 3272 tidx = (tidx + 1) & (qp->sq.wqe_cnt - 1); 3273 p = buf; 3274 j = 0; 3275 } 3276 pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]), 3277 be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]), 3278 be32_to_cpu(p[j + 3])); 3279 } 3280 } 3281 3282 static void mlx5_bf_copy(u64 __iomem *dst, u64 *src, 3283 unsigned bytecnt, struct mlx5_ib_qp *qp) 3284 { 3285 while (bytecnt > 0) { 3286 __iowrite64_copy(dst++, src++, 8); 3287 __iowrite64_copy(dst++, src++, 8); 3288 __iowrite64_copy(dst++, src++, 8); 3289 __iowrite64_copy(dst++, src++, 8); 3290 __iowrite64_copy(dst++, src++, 8); 3291 __iowrite64_copy(dst++, src++, 8); 3292 __iowrite64_copy(dst++, src++, 8); 3293 __iowrite64_copy(dst++, src++, 8); 3294 bytecnt -= 64; 3295 if (unlikely(src == qp->sq.qend)) 3296 src = mlx5_get_send_wqe(qp, 0); 3297 } 3298 } 3299 3300 static u8 get_fence(u8 fence, struct ib_send_wr *wr) 3301 { 3302 if (unlikely(wr->opcode == IB_WR_LOCAL_INV && 3303 wr->send_flags & IB_SEND_FENCE)) 3304 return MLX5_FENCE_MODE_STRONG_ORDERING; 3305 3306 if (unlikely(fence)) { 3307 if (wr->send_flags & IB_SEND_FENCE) 3308 return MLX5_FENCE_MODE_SMALL_AND_FENCE; 3309 else 3310 return fence; 3311 3312 } else { 3313 return 0; 3314 } 3315 } 3316 3317 static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, 3318 struct mlx5_wqe_ctrl_seg **ctrl, 3319 struct ib_send_wr *wr, unsigned *idx, 3320 int *size, int nreq) 3321 { 3322 int err = 0; 3323 3324 if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) { 3325 err = -ENOMEM; 3326 return err; 3327 } 3328 3329 *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1); 3330 *seg = mlx5_get_send_wqe(qp, *idx); 3331 *ctrl = *seg; 3332 *(uint32_t *)(*seg + 8) = 0; 3333 (*ctrl)->imm = send_ieth(wr); 3334 (*ctrl)->fm_ce_se = qp->sq_signal_bits | 3335 (wr->send_flags & IB_SEND_SIGNALED ? 3336 MLX5_WQE_CTRL_CQ_UPDATE : 0) | 3337 (wr->send_flags & IB_SEND_SOLICITED ? 3338 MLX5_WQE_CTRL_SOLICITED : 0); 3339 3340 *seg += sizeof(**ctrl); 3341 *size = sizeof(**ctrl) / 16; 3342 3343 return err; 3344 } 3345 3346 static void finish_wqe(struct mlx5_ib_qp *qp, 3347 struct mlx5_wqe_ctrl_seg *ctrl, 3348 u8 size, unsigned idx, u64 wr_id, 3349 int nreq, u8 fence, u8 next_fence, 3350 u32 mlx5_opcode) 3351 { 3352 u8 opmod = 0; 3353 3354 ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) | 3355 mlx5_opcode | ((u32)opmod << 24)); 3356 ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8)); 3357 ctrl->fm_ce_se |= fence; 3358 qp->fm_cache = next_fence; 3359 if (unlikely(qp->wq_sig)) 3360 ctrl->signature = wq_sig(ctrl); 3361 3362 qp->sq.wrid[idx] = wr_id; 3363 qp->sq.w_list[idx].opcode = mlx5_opcode; 3364 qp->sq.wqe_head[idx] = qp->sq.head + nreq; 3365 qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB); 3366 qp->sq.w_list[idx].next = qp->sq.cur_post; 3367 } 3368 3369 3370 int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 3371 struct ib_send_wr **bad_wr) 3372 { 3373 struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */ 3374 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 3375 struct mlx5_ib_qp *qp; 3376 struct mlx5_ib_mr *mr; 3377 struct mlx5_wqe_data_seg *dpseg; 3378 struct mlx5_wqe_xrc_seg *xrc; 3379 struct mlx5_bf *bf; 3380 int uninitialized_var(size); 3381 void *qend; 3382 unsigned long flags; 3383 unsigned idx; 3384 int err = 0; 3385 int inl = 0; 3386 int num_sge; 3387 void *seg; 3388 int nreq; 3389 int i; 3390 u8 next_fence = 0; 3391 u8 fence; 3392 3393 if (unlikely(ibqp->qp_type == IB_QPT_GSI)) 3394 return mlx5_ib_gsi_post_send(ibqp, wr, bad_wr); 3395 3396 qp = to_mqp(ibqp); 3397 bf = qp->bf; 3398 qend = qp->sq.qend; 3399 3400 spin_lock_irqsave(&qp->sq.lock, flags); 3401 3402 for (nreq = 0; wr; nreq++, wr = wr->next) { 3403 if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) { 3404 mlx5_ib_warn(dev, "\n"); 3405 err = -EINVAL; 3406 *bad_wr = wr; 3407 goto out; 3408 } 3409 3410 fence = qp->fm_cache; 3411 num_sge = wr->num_sge; 3412 if (unlikely(num_sge > qp->sq.max_gs)) { 3413 mlx5_ib_warn(dev, "\n"); 3414 err = -ENOMEM; 3415 *bad_wr = wr; 3416 goto out; 3417 } 3418 3419 err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, nreq); 3420 if (err) { 3421 mlx5_ib_warn(dev, "\n"); 3422 err = -ENOMEM; 3423 *bad_wr = wr; 3424 goto out; 3425 } 3426 3427 switch (ibqp->qp_type) { 3428 case IB_QPT_XRC_INI: 3429 xrc = seg; 3430 seg += sizeof(*xrc); 3431 size += sizeof(*xrc) / 16; 3432 /* fall through */ 3433 case IB_QPT_RC: 3434 switch (wr->opcode) { 3435 case IB_WR_RDMA_READ: 3436 case IB_WR_RDMA_WRITE: 3437 case IB_WR_RDMA_WRITE_WITH_IMM: 3438 set_raddr_seg(seg, rdma_wr(wr)->remote_addr, 3439 rdma_wr(wr)->rkey); 3440 seg += sizeof(struct mlx5_wqe_raddr_seg); 3441 size += sizeof(struct mlx5_wqe_raddr_seg) / 16; 3442 break; 3443 3444 case IB_WR_ATOMIC_CMP_AND_SWP: 3445 case IB_WR_ATOMIC_FETCH_AND_ADD: 3446 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: 3447 mlx5_ib_warn(dev, "Atomic operations are not supported yet\n"); 3448 err = -ENOSYS; 3449 *bad_wr = wr; 3450 goto out; 3451 3452 case IB_WR_LOCAL_INV: 3453 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; 3454 qp->sq.wr_data[idx] = IB_WR_LOCAL_INV; 3455 ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey); 3456 set_linv_wr(qp, &seg, &size); 3457 num_sge = 0; 3458 break; 3459 3460 case IB_WR_REG_MR: 3461 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; 3462 qp->sq.wr_data[idx] = IB_WR_REG_MR; 3463 ctrl->imm = cpu_to_be32(reg_wr(wr)->key); 3464 err = set_reg_wr(qp, reg_wr(wr), &seg, &size); 3465 if (err) { 3466 *bad_wr = wr; 3467 goto out; 3468 } 3469 num_sge = 0; 3470 break; 3471 3472 case IB_WR_REG_SIG_MR: 3473 qp->sq.wr_data[idx] = IB_WR_REG_SIG_MR; 3474 mr = to_mmr(sig_handover_wr(wr)->sig_mr); 3475 3476 ctrl->imm = cpu_to_be32(mr->ibmr.rkey); 3477 err = set_sig_umr_wr(wr, qp, &seg, &size); 3478 if (err) { 3479 mlx5_ib_warn(dev, "\n"); 3480 *bad_wr = wr; 3481 goto out; 3482 } 3483 3484 finish_wqe(qp, ctrl, size, idx, wr->wr_id, 3485 nreq, get_fence(fence, wr), 3486 next_fence, MLX5_OPCODE_UMR); 3487 /* 3488 * SET_PSV WQEs are not signaled and solicited 3489 * on error 3490 */ 3491 wr->send_flags &= ~IB_SEND_SIGNALED; 3492 wr->send_flags |= IB_SEND_SOLICITED; 3493 err = begin_wqe(qp, &seg, &ctrl, wr, 3494 &idx, &size, nreq); 3495 if (err) { 3496 mlx5_ib_warn(dev, "\n"); 3497 err = -ENOMEM; 3498 *bad_wr = wr; 3499 goto out; 3500 } 3501 3502 err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->mem, 3503 mr->sig->psv_memory.psv_idx, &seg, 3504 &size); 3505 if (err) { 3506 mlx5_ib_warn(dev, "\n"); 3507 *bad_wr = wr; 3508 goto out; 3509 } 3510 3511 finish_wqe(qp, ctrl, size, idx, wr->wr_id, 3512 nreq, get_fence(fence, wr), 3513 next_fence, MLX5_OPCODE_SET_PSV); 3514 err = begin_wqe(qp, &seg, &ctrl, wr, 3515 &idx, &size, nreq); 3516 if (err) { 3517 mlx5_ib_warn(dev, "\n"); 3518 err = -ENOMEM; 3519 *bad_wr = wr; 3520 goto out; 3521 } 3522 3523 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; 3524 err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->wire, 3525 mr->sig->psv_wire.psv_idx, &seg, 3526 &size); 3527 if (err) { 3528 mlx5_ib_warn(dev, "\n"); 3529 *bad_wr = wr; 3530 goto out; 3531 } 3532 3533 finish_wqe(qp, ctrl, size, idx, wr->wr_id, 3534 nreq, get_fence(fence, wr), 3535 next_fence, MLX5_OPCODE_SET_PSV); 3536 num_sge = 0; 3537 goto skip_psv; 3538 3539 default: 3540 break; 3541 } 3542 break; 3543 3544 case IB_QPT_UC: 3545 switch (wr->opcode) { 3546 case IB_WR_RDMA_WRITE: 3547 case IB_WR_RDMA_WRITE_WITH_IMM: 3548 set_raddr_seg(seg, rdma_wr(wr)->remote_addr, 3549 rdma_wr(wr)->rkey); 3550 seg += sizeof(struct mlx5_wqe_raddr_seg); 3551 size += sizeof(struct mlx5_wqe_raddr_seg) / 16; 3552 break; 3553 3554 default: 3555 break; 3556 } 3557 break; 3558 3559 case IB_QPT_SMI: 3560 case MLX5_IB_QPT_HW_GSI: 3561 set_datagram_seg(seg, wr); 3562 seg += sizeof(struct mlx5_wqe_datagram_seg); 3563 size += sizeof(struct mlx5_wqe_datagram_seg) / 16; 3564 if (unlikely((seg == qend))) 3565 seg = mlx5_get_send_wqe(qp, 0); 3566 break; 3567 case IB_QPT_UD: 3568 set_datagram_seg(seg, wr); 3569 seg += sizeof(struct mlx5_wqe_datagram_seg); 3570 size += sizeof(struct mlx5_wqe_datagram_seg) / 16; 3571 3572 if (unlikely((seg == qend))) 3573 seg = mlx5_get_send_wqe(qp, 0); 3574 3575 /* handle qp that supports ud offload */ 3576 if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO) { 3577 struct mlx5_wqe_eth_pad *pad; 3578 3579 pad = seg; 3580 memset(pad, 0, sizeof(struct mlx5_wqe_eth_pad)); 3581 seg += sizeof(struct mlx5_wqe_eth_pad); 3582 size += sizeof(struct mlx5_wqe_eth_pad) / 16; 3583 3584 seg = set_eth_seg(seg, wr, qend, qp, &size); 3585 3586 if (unlikely((seg == qend))) 3587 seg = mlx5_get_send_wqe(qp, 0); 3588 } 3589 break; 3590 case MLX5_IB_QPT_REG_UMR: 3591 if (wr->opcode != MLX5_IB_WR_UMR) { 3592 err = -EINVAL; 3593 mlx5_ib_warn(dev, "bad opcode\n"); 3594 goto out; 3595 } 3596 qp->sq.wr_data[idx] = MLX5_IB_WR_UMR; 3597 ctrl->imm = cpu_to_be32(umr_wr(wr)->mkey); 3598 set_reg_umr_segment(seg, wr); 3599 seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); 3600 size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; 3601 if (unlikely((seg == qend))) 3602 seg = mlx5_get_send_wqe(qp, 0); 3603 set_reg_mkey_segment(seg, wr); 3604 seg += sizeof(struct mlx5_mkey_seg); 3605 size += sizeof(struct mlx5_mkey_seg) / 16; 3606 if (unlikely((seg == qend))) 3607 seg = mlx5_get_send_wqe(qp, 0); 3608 break; 3609 3610 default: 3611 break; 3612 } 3613 3614 if (wr->send_flags & IB_SEND_INLINE && num_sge) { 3615 int uninitialized_var(sz); 3616 3617 err = set_data_inl_seg(qp, wr, seg, &sz); 3618 if (unlikely(err)) { 3619 mlx5_ib_warn(dev, "\n"); 3620 *bad_wr = wr; 3621 goto out; 3622 } 3623 inl = 1; 3624 size += sz; 3625 } else { 3626 dpseg = seg; 3627 for (i = 0; i < num_sge; i++) { 3628 if (unlikely(dpseg == qend)) { 3629 seg = mlx5_get_send_wqe(qp, 0); 3630 dpseg = seg; 3631 } 3632 if (likely(wr->sg_list[i].length)) { 3633 set_data_ptr_seg(dpseg, wr->sg_list + i); 3634 size += sizeof(struct mlx5_wqe_data_seg) / 16; 3635 dpseg++; 3636 } 3637 } 3638 } 3639 3640 finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, 3641 get_fence(fence, wr), next_fence, 3642 mlx5_ib_opcode[wr->opcode]); 3643 skip_psv: 3644 if (0) 3645 dump_wqe(qp, idx, size); 3646 } 3647 3648 out: 3649 if (likely(nreq)) { 3650 qp->sq.head += nreq; 3651 3652 /* Make sure that descriptors are written before 3653 * updating doorbell record and ringing the doorbell 3654 */ 3655 wmb(); 3656 3657 qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post); 3658 3659 /* Make sure doorbell record is visible to the HCA before 3660 * we hit doorbell */ 3661 wmb(); 3662 3663 if (bf->need_lock) 3664 spin_lock(&bf->lock); 3665 else 3666 __acquire(&bf->lock); 3667 3668 /* TBD enable WC */ 3669 if (0 && nreq == 1 && bf->uuarn && inl && size > 1 && size <= bf->buf_size / 16) { 3670 mlx5_bf_copy(bf->reg + bf->offset, (u64 *)ctrl, ALIGN(size * 16, 64), qp); 3671 /* wc_wmb(); */ 3672 } else { 3673 mlx5_write64((__be32 *)ctrl, bf->regreg + bf->offset, 3674 MLX5_GET_DOORBELL_LOCK(&bf->lock32)); 3675 /* Make sure doorbells don't leak out of SQ spinlock 3676 * and reach the HCA out of order. 3677 */ 3678 mmiowb(); 3679 } 3680 bf->offset ^= bf->buf_size; 3681 if (bf->need_lock) 3682 spin_unlock(&bf->lock); 3683 else 3684 __release(&bf->lock); 3685 } 3686 3687 spin_unlock_irqrestore(&qp->sq.lock, flags); 3688 3689 return err; 3690 } 3691 3692 static void set_sig_seg(struct mlx5_rwqe_sig *sig, int size) 3693 { 3694 sig->signature = calc_sig(sig, size); 3695 } 3696 3697 int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, 3698 struct ib_recv_wr **bad_wr) 3699 { 3700 struct mlx5_ib_qp *qp = to_mqp(ibqp); 3701 struct mlx5_wqe_data_seg *scat; 3702 struct mlx5_rwqe_sig *sig; 3703 unsigned long flags; 3704 int err = 0; 3705 int nreq; 3706 int ind; 3707 int i; 3708 3709 if (unlikely(ibqp->qp_type == IB_QPT_GSI)) 3710 return mlx5_ib_gsi_post_recv(ibqp, wr, bad_wr); 3711 3712 spin_lock_irqsave(&qp->rq.lock, flags); 3713 3714 ind = qp->rq.head & (qp->rq.wqe_cnt - 1); 3715 3716 for (nreq = 0; wr; nreq++, wr = wr->next) { 3717 if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { 3718 err = -ENOMEM; 3719 *bad_wr = wr; 3720 goto out; 3721 } 3722 3723 if (unlikely(wr->num_sge > qp->rq.max_gs)) { 3724 err = -EINVAL; 3725 *bad_wr = wr; 3726 goto out; 3727 } 3728 3729 scat = get_recv_wqe(qp, ind); 3730 if (qp->wq_sig) 3731 scat++; 3732 3733 for (i = 0; i < wr->num_sge; i++) 3734 set_data_ptr_seg(scat + i, wr->sg_list + i); 3735 3736 if (i < qp->rq.max_gs) { 3737 scat[i].byte_count = 0; 3738 scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY); 3739 scat[i].addr = 0; 3740 } 3741 3742 if (qp->wq_sig) { 3743 sig = (struct mlx5_rwqe_sig *)scat; 3744 set_sig_seg(sig, (qp->rq.max_gs + 1) << 2); 3745 } 3746 3747 qp->rq.wrid[ind] = wr->wr_id; 3748 3749 ind = (ind + 1) & (qp->rq.wqe_cnt - 1); 3750 } 3751 3752 out: 3753 if (likely(nreq)) { 3754 qp->rq.head += nreq; 3755 3756 /* Make sure that descriptors are written before 3757 * doorbell record. 3758 */ 3759 wmb(); 3760 3761 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff); 3762 } 3763 3764 spin_unlock_irqrestore(&qp->rq.lock, flags); 3765 3766 return err; 3767 } 3768 3769 static inline enum ib_qp_state to_ib_qp_state(enum mlx5_qp_state mlx5_state) 3770 { 3771 switch (mlx5_state) { 3772 case MLX5_QP_STATE_RST: return IB_QPS_RESET; 3773 case MLX5_QP_STATE_INIT: return IB_QPS_INIT; 3774 case MLX5_QP_STATE_RTR: return IB_QPS_RTR; 3775 case MLX5_QP_STATE_RTS: return IB_QPS_RTS; 3776 case MLX5_QP_STATE_SQ_DRAINING: 3777 case MLX5_QP_STATE_SQD: return IB_QPS_SQD; 3778 case MLX5_QP_STATE_SQER: return IB_QPS_SQE; 3779 case MLX5_QP_STATE_ERR: return IB_QPS_ERR; 3780 default: return -1; 3781 } 3782 } 3783 3784 static inline enum ib_mig_state to_ib_mig_state(int mlx5_mig_state) 3785 { 3786 switch (mlx5_mig_state) { 3787 case MLX5_QP_PM_ARMED: return IB_MIG_ARMED; 3788 case MLX5_QP_PM_REARM: return IB_MIG_REARM; 3789 case MLX5_QP_PM_MIGRATED: return IB_MIG_MIGRATED; 3790 default: return -1; 3791 } 3792 } 3793 3794 static int to_ib_qp_access_flags(int mlx5_flags) 3795 { 3796 int ib_flags = 0; 3797 3798 if (mlx5_flags & MLX5_QP_BIT_RRE) 3799 ib_flags |= IB_ACCESS_REMOTE_READ; 3800 if (mlx5_flags & MLX5_QP_BIT_RWE) 3801 ib_flags |= IB_ACCESS_REMOTE_WRITE; 3802 if (mlx5_flags & MLX5_QP_BIT_RAE) 3803 ib_flags |= IB_ACCESS_REMOTE_ATOMIC; 3804 3805 return ib_flags; 3806 } 3807 3808 static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr, 3809 struct mlx5_qp_path *path) 3810 { 3811 struct mlx5_core_dev *dev = ibdev->mdev; 3812 3813 memset(ib_ah_attr, 0, sizeof(*ib_ah_attr)); 3814 ib_ah_attr->port_num = path->port; 3815 3816 if (ib_ah_attr->port_num == 0 || 3817 ib_ah_attr->port_num > MLX5_CAP_GEN(dev, num_ports)) 3818 return; 3819 3820 ib_ah_attr->sl = path->dci_cfi_prio_sl & 0xf; 3821 3822 ib_ah_attr->dlid = be16_to_cpu(path->rlid); 3823 ib_ah_attr->src_path_bits = path->grh_mlid & 0x7f; 3824 ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0; 3825 ib_ah_attr->ah_flags = (path->grh_mlid & (1 << 7)) ? IB_AH_GRH : 0; 3826 if (ib_ah_attr->ah_flags) { 3827 ib_ah_attr->grh.sgid_index = path->mgid_index; 3828 ib_ah_attr->grh.hop_limit = path->hop_limit; 3829 ib_ah_attr->grh.traffic_class = 3830 (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff; 3831 ib_ah_attr->grh.flow_label = 3832 be32_to_cpu(path->tclass_flowlabel) & 0xfffff; 3833 memcpy(ib_ah_attr->grh.dgid.raw, 3834 path->rgid, sizeof(ib_ah_attr->grh.dgid.raw)); 3835 } 3836 } 3837 3838 static int query_raw_packet_qp_sq_state(struct mlx5_ib_dev *dev, 3839 struct mlx5_ib_sq *sq, 3840 u8 *sq_state) 3841 { 3842 void *out; 3843 void *sqc; 3844 int inlen; 3845 int err; 3846 3847 inlen = MLX5_ST_SZ_BYTES(query_sq_out); 3848 out = mlx5_vzalloc(inlen); 3849 if (!out) 3850 return -ENOMEM; 3851 3852 err = mlx5_core_query_sq(dev->mdev, sq->base.mqp.qpn, out); 3853 if (err) 3854 goto out; 3855 3856 sqc = MLX5_ADDR_OF(query_sq_out, out, sq_context); 3857 *sq_state = MLX5_GET(sqc, sqc, state); 3858 sq->state = *sq_state; 3859 3860 out: 3861 kvfree(out); 3862 return err; 3863 } 3864 3865 static int query_raw_packet_qp_rq_state(struct mlx5_ib_dev *dev, 3866 struct mlx5_ib_rq *rq, 3867 u8 *rq_state) 3868 { 3869 void *out; 3870 void *rqc; 3871 int inlen; 3872 int err; 3873 3874 inlen = MLX5_ST_SZ_BYTES(query_rq_out); 3875 out = mlx5_vzalloc(inlen); 3876 if (!out) 3877 return -ENOMEM; 3878 3879 err = mlx5_core_query_rq(dev->mdev, rq->base.mqp.qpn, out); 3880 if (err) 3881 goto out; 3882 3883 rqc = MLX5_ADDR_OF(query_rq_out, out, rq_context); 3884 *rq_state = MLX5_GET(rqc, rqc, state); 3885 rq->state = *rq_state; 3886 3887 out: 3888 kvfree(out); 3889 return err; 3890 } 3891 3892 static int sqrq_state_to_qp_state(u8 sq_state, u8 rq_state, 3893 struct mlx5_ib_qp *qp, u8 *qp_state) 3894 { 3895 static const u8 sqrq_trans[MLX5_RQ_NUM_STATE][MLX5_SQ_NUM_STATE] = { 3896 [MLX5_RQC_STATE_RST] = { 3897 [MLX5_SQC_STATE_RST] = IB_QPS_RESET, 3898 [MLX5_SQC_STATE_RDY] = MLX5_QP_STATE_BAD, 3899 [MLX5_SQC_STATE_ERR] = MLX5_QP_STATE_BAD, 3900 [MLX5_SQ_STATE_NA] = IB_QPS_RESET, 3901 }, 3902 [MLX5_RQC_STATE_RDY] = { 3903 [MLX5_SQC_STATE_RST] = MLX5_QP_STATE_BAD, 3904 [MLX5_SQC_STATE_RDY] = MLX5_QP_STATE, 3905 [MLX5_SQC_STATE_ERR] = IB_QPS_SQE, 3906 [MLX5_SQ_STATE_NA] = MLX5_QP_STATE, 3907 }, 3908 [MLX5_RQC_STATE_ERR] = { 3909 [MLX5_SQC_STATE_RST] = MLX5_QP_STATE_BAD, 3910 [MLX5_SQC_STATE_RDY] = MLX5_QP_STATE_BAD, 3911 [MLX5_SQC_STATE_ERR] = IB_QPS_ERR, 3912 [MLX5_SQ_STATE_NA] = IB_QPS_ERR, 3913 }, 3914 [MLX5_RQ_STATE_NA] = { 3915 [MLX5_SQC_STATE_RST] = IB_QPS_RESET, 3916 [MLX5_SQC_STATE_RDY] = MLX5_QP_STATE, 3917 [MLX5_SQC_STATE_ERR] = MLX5_QP_STATE, 3918 [MLX5_SQ_STATE_NA] = MLX5_QP_STATE_BAD, 3919 }, 3920 }; 3921 3922 *qp_state = sqrq_trans[rq_state][sq_state]; 3923 3924 if (*qp_state == MLX5_QP_STATE_BAD) { 3925 WARN(1, "Buggy Raw Packet QP state, SQ 0x%x state: 0x%x, RQ 0x%x state: 0x%x", 3926 qp->raw_packet_qp.sq.base.mqp.qpn, sq_state, 3927 qp->raw_packet_qp.rq.base.mqp.qpn, rq_state); 3928 return -EINVAL; 3929 } 3930 3931 if (*qp_state == MLX5_QP_STATE) 3932 *qp_state = qp->state; 3933 3934 return 0; 3935 } 3936 3937 static int query_raw_packet_qp_state(struct mlx5_ib_dev *dev, 3938 struct mlx5_ib_qp *qp, 3939 u8 *raw_packet_qp_state) 3940 { 3941 struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; 3942 struct mlx5_ib_sq *sq = &raw_packet_qp->sq; 3943 struct mlx5_ib_rq *rq = &raw_packet_qp->rq; 3944 int err; 3945 u8 sq_state = MLX5_SQ_STATE_NA; 3946 u8 rq_state = MLX5_RQ_STATE_NA; 3947 3948 if (qp->sq.wqe_cnt) { 3949 err = query_raw_packet_qp_sq_state(dev, sq, &sq_state); 3950 if (err) 3951 return err; 3952 } 3953 3954 if (qp->rq.wqe_cnt) { 3955 err = query_raw_packet_qp_rq_state(dev, rq, &rq_state); 3956 if (err) 3957 return err; 3958 } 3959 3960 return sqrq_state_to_qp_state(sq_state, rq_state, qp, 3961 raw_packet_qp_state); 3962 } 3963 3964 static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 3965 struct ib_qp_attr *qp_attr) 3966 { 3967 struct mlx5_query_qp_mbox_out *outb; 3968 struct mlx5_qp_context *context; 3969 int mlx5_state; 3970 int err = 0; 3971 3972 outb = kzalloc(sizeof(*outb), GFP_KERNEL); 3973 if (!outb) 3974 return -ENOMEM; 3975 3976 context = &outb->ctx; 3977 err = mlx5_core_qp_query(dev->mdev, &qp->trans_qp.base.mqp, outb, 3978 sizeof(*outb)); 3979 if (err) 3980 goto out; 3981 3982 mlx5_state = be32_to_cpu(context->flags) >> 28; 3983 3984 qp->state = to_ib_qp_state(mlx5_state); 3985 qp_attr->path_mtu = context->mtu_msgmax >> 5; 3986 qp_attr->path_mig_state = 3987 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3); 3988 qp_attr->qkey = be32_to_cpu(context->qkey); 3989 qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff; 3990 qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff; 3991 qp_attr->dest_qp_num = be32_to_cpu(context->log_pg_sz_remote_qpn) & 0xffffff; 3992 qp_attr->qp_access_flags = 3993 to_ib_qp_access_flags(be32_to_cpu(context->params2)); 3994 3995 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) { 3996 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); 3997 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path); 3998 qp_attr->alt_pkey_index = context->alt_path.pkey_index & 0x7f; 3999 qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num; 4000 } 4001 4002 qp_attr->pkey_index = context->pri_path.pkey_index & 0x7f; 4003 qp_attr->port_num = context->pri_path.port; 4004 4005 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ 4006 qp_attr->sq_draining = mlx5_state == MLX5_QP_STATE_SQ_DRAINING; 4007 4008 qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7); 4009 4010 qp_attr->max_dest_rd_atomic = 4011 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7); 4012 qp_attr->min_rnr_timer = 4013 (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f; 4014 qp_attr->timeout = context->pri_path.ackto_lt >> 3; 4015 qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7; 4016 qp_attr->rnr_retry = (be32_to_cpu(context->params1) >> 13) & 0x7; 4017 qp_attr->alt_timeout = context->alt_path.ackto_lt >> 3; 4018 4019 out: 4020 kfree(outb); 4021 return err; 4022 } 4023 4024 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, 4025 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) 4026 { 4027 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 4028 struct mlx5_ib_qp *qp = to_mqp(ibqp); 4029 int err = 0; 4030 u8 raw_packet_qp_state; 4031 4032 if (unlikely(ibqp->qp_type == IB_QPT_GSI)) 4033 return mlx5_ib_gsi_query_qp(ibqp, qp_attr, qp_attr_mask, 4034 qp_init_attr); 4035 4036 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 4037 /* 4038 * Wait for any outstanding page faults, in case the user frees memory 4039 * based upon this query's result. 4040 */ 4041 flush_workqueue(mlx5_ib_page_fault_wq); 4042 #endif 4043 4044 mutex_lock(&qp->mutex); 4045 4046 if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) { 4047 err = query_raw_packet_qp_state(dev, qp, &raw_packet_qp_state); 4048 if (err) 4049 goto out; 4050 qp->state = raw_packet_qp_state; 4051 qp_attr->port_num = 1; 4052 } else { 4053 err = query_qp_attr(dev, qp, qp_attr); 4054 if (err) 4055 goto out; 4056 } 4057 4058 qp_attr->qp_state = qp->state; 4059 qp_attr->cur_qp_state = qp_attr->qp_state; 4060 qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt; 4061 qp_attr->cap.max_recv_sge = qp->rq.max_gs; 4062 4063 if (!ibqp->uobject) { 4064 qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; 4065 qp_attr->cap.max_send_sge = qp->sq.max_gs; 4066 } else { 4067 qp_attr->cap.max_send_wr = 0; 4068 qp_attr->cap.max_send_sge = 0; 4069 } 4070 4071 /* We don't support inline sends for kernel QPs (yet), and we 4072 * don't know what userspace's value should be. 4073 */ 4074 qp_attr->cap.max_inline_data = 0; 4075 4076 qp_init_attr->cap = qp_attr->cap; 4077 4078 qp_init_attr->create_flags = 0; 4079 if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK) 4080 qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK; 4081 4082 if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL) 4083 qp_init_attr->create_flags |= IB_QP_CREATE_CROSS_CHANNEL; 4084 if (qp->flags & MLX5_IB_QP_MANAGED_SEND) 4085 qp_init_attr->create_flags |= IB_QP_CREATE_MANAGED_SEND; 4086 if (qp->flags & MLX5_IB_QP_MANAGED_RECV) 4087 qp_init_attr->create_flags |= IB_QP_CREATE_MANAGED_RECV; 4088 if (qp->flags & MLX5_IB_QP_SQPN_QP1) 4089 qp_init_attr->create_flags |= mlx5_ib_create_qp_sqpn_qp1(); 4090 4091 qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ? 4092 IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; 4093 4094 out: 4095 mutex_unlock(&qp->mutex); 4096 return err; 4097 } 4098 4099 struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev, 4100 struct ib_ucontext *context, 4101 struct ib_udata *udata) 4102 { 4103 struct mlx5_ib_dev *dev = to_mdev(ibdev); 4104 struct mlx5_ib_xrcd *xrcd; 4105 int err; 4106 4107 if (!MLX5_CAP_GEN(dev->mdev, xrc)) 4108 return ERR_PTR(-ENOSYS); 4109 4110 xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL); 4111 if (!xrcd) 4112 return ERR_PTR(-ENOMEM); 4113 4114 err = mlx5_core_xrcd_alloc(dev->mdev, &xrcd->xrcdn); 4115 if (err) { 4116 kfree(xrcd); 4117 return ERR_PTR(-ENOMEM); 4118 } 4119 4120 return &xrcd->ibxrcd; 4121 } 4122 4123 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd) 4124 { 4125 struct mlx5_ib_dev *dev = to_mdev(xrcd->device); 4126 u32 xrcdn = to_mxrcd(xrcd)->xrcdn; 4127 int err; 4128 4129 err = mlx5_core_xrcd_dealloc(dev->mdev, xrcdn); 4130 if (err) { 4131 mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn); 4132 return err; 4133 } 4134 4135 kfree(xrcd); 4136 4137 return 0; 4138 } 4139