1 /* 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/module.h> 34 #include <rdma/ib_umem.h> 35 #include <rdma/ib_cache.h> 36 #include <rdma/ib_user_verbs.h> 37 #include <linux/mlx5/fs.h> 38 #include "mlx5_ib.h" 39 #include "ib_rep.h" 40 41 /* not supported currently */ 42 static int wq_signature; 43 44 enum { 45 MLX5_IB_ACK_REQ_FREQ = 8, 46 }; 47 48 enum { 49 MLX5_IB_DEFAULT_SCHED_QUEUE = 0x83, 50 MLX5_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f, 51 MLX5_IB_LINK_TYPE_IB = 0, 52 MLX5_IB_LINK_TYPE_ETH = 1 53 }; 54 55 enum { 56 MLX5_IB_SQ_STRIDE = 6, 57 MLX5_IB_SQ_UMR_INLINE_THRESHOLD = 64, 58 }; 59 60 static const u32 mlx5_ib_opcode[] = { 61 [IB_WR_SEND] = MLX5_OPCODE_SEND, 62 [IB_WR_LSO] = MLX5_OPCODE_LSO, 63 [IB_WR_SEND_WITH_IMM] = MLX5_OPCODE_SEND_IMM, 64 [IB_WR_RDMA_WRITE] = MLX5_OPCODE_RDMA_WRITE, 65 [IB_WR_RDMA_WRITE_WITH_IMM] = MLX5_OPCODE_RDMA_WRITE_IMM, 66 [IB_WR_RDMA_READ] = MLX5_OPCODE_RDMA_READ, 67 [IB_WR_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_CS, 68 [IB_WR_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_FA, 69 [IB_WR_SEND_WITH_INV] = MLX5_OPCODE_SEND_INVAL, 70 [IB_WR_LOCAL_INV] = MLX5_OPCODE_UMR, 71 [IB_WR_REG_MR] = MLX5_OPCODE_UMR, 72 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_MASKED_CS, 73 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_MASKED_FA, 74 [MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR, 75 }; 76 77 struct mlx5_wqe_eth_pad { 78 u8 rsvd0[16]; 79 }; 80 81 enum raw_qp_set_mask_map { 82 MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID = 1UL << 0, 83 MLX5_RAW_QP_RATE_LIMIT = 1UL << 1, 84 }; 85 86 struct mlx5_modify_raw_qp_param { 87 u16 operation; 88 89 u32 set_mask; /* raw_qp_set_mask_map */ 90 91 struct mlx5_rate_limit rl; 92 93 u8 rq_q_ctr_id; 94 }; 95 96 static void get_cqs(enum ib_qp_type qp_type, 97 struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq, 98 struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq); 99 100 static int is_qp0(enum ib_qp_type qp_type) 101 { 102 return qp_type == IB_QPT_SMI; 103 } 104 105 static int is_sqp(enum ib_qp_type qp_type) 106 { 107 return is_qp0(qp_type) || is_qp1(qp_type); 108 } 109 110 static void *get_wqe(struct mlx5_ib_qp *qp, int offset) 111 { 112 return mlx5_buf_offset(&qp->buf, offset); 113 } 114 115 static void *get_recv_wqe(struct mlx5_ib_qp *qp, int n) 116 { 117 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); 118 } 119 120 void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n) 121 { 122 return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE)); 123 } 124 125 /** 126 * mlx5_ib_read_user_wqe() - Copy a user-space WQE to kernel space. 127 * 128 * @qp: QP to copy from. 129 * @send: copy from the send queue when non-zero, use the receive queue 130 * otherwise. 131 * @wqe_index: index to start copying from. For send work queues, the 132 * wqe_index is in units of MLX5_SEND_WQE_BB. 133 * For receive work queue, it is the number of work queue 134 * element in the queue. 135 * @buffer: destination buffer. 136 * @length: maximum number of bytes to copy. 137 * 138 * Copies at least a single WQE, but may copy more data. 139 * 140 * Return: the number of bytes copied, or an error code. 141 */ 142 int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index, 143 void *buffer, u32 length, 144 struct mlx5_ib_qp_base *base) 145 { 146 struct ib_device *ibdev = qp->ibqp.device; 147 struct mlx5_ib_dev *dev = to_mdev(ibdev); 148 struct mlx5_ib_wq *wq = send ? &qp->sq : &qp->rq; 149 size_t offset; 150 size_t wq_end; 151 struct ib_umem *umem = base->ubuffer.umem; 152 u32 first_copy_length; 153 int wqe_length; 154 int ret; 155 156 if (wq->wqe_cnt == 0) { 157 mlx5_ib_dbg(dev, "mlx5_ib_read_user_wqe for a QP with wqe_cnt == 0. qp_type: 0x%x\n", 158 qp->ibqp.qp_type); 159 return -EINVAL; 160 } 161 162 offset = wq->offset + ((wqe_index % wq->wqe_cnt) << wq->wqe_shift); 163 wq_end = wq->offset + (wq->wqe_cnt << wq->wqe_shift); 164 165 if (send && length < sizeof(struct mlx5_wqe_ctrl_seg)) 166 return -EINVAL; 167 168 if (offset > umem->length || 169 (send && offset + sizeof(struct mlx5_wqe_ctrl_seg) > umem->length)) 170 return -EINVAL; 171 172 first_copy_length = min_t(u32, offset + length, wq_end) - offset; 173 ret = ib_umem_copy_from(buffer, umem, offset, first_copy_length); 174 if (ret) 175 return ret; 176 177 if (send) { 178 struct mlx5_wqe_ctrl_seg *ctrl = buffer; 179 int ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK; 180 181 wqe_length = ds * MLX5_WQE_DS_UNITS; 182 } else { 183 wqe_length = 1 << wq->wqe_shift; 184 } 185 186 if (wqe_length <= first_copy_length) 187 return first_copy_length; 188 189 ret = ib_umem_copy_from(buffer + first_copy_length, umem, wq->offset, 190 wqe_length - first_copy_length); 191 if (ret) 192 return ret; 193 194 return wqe_length; 195 } 196 197 static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type) 198 { 199 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; 200 struct ib_event event; 201 202 if (type == MLX5_EVENT_TYPE_PATH_MIG) { 203 /* This event is only valid for trans_qps */ 204 to_mibqp(qp)->port = to_mibqp(qp)->trans_qp.alt_port; 205 } 206 207 if (ibqp->event_handler) { 208 event.device = ibqp->device; 209 event.element.qp = ibqp; 210 switch (type) { 211 case MLX5_EVENT_TYPE_PATH_MIG: 212 event.event = IB_EVENT_PATH_MIG; 213 break; 214 case MLX5_EVENT_TYPE_COMM_EST: 215 event.event = IB_EVENT_COMM_EST; 216 break; 217 case MLX5_EVENT_TYPE_SQ_DRAINED: 218 event.event = IB_EVENT_SQ_DRAINED; 219 break; 220 case MLX5_EVENT_TYPE_SRQ_LAST_WQE: 221 event.event = IB_EVENT_QP_LAST_WQE_REACHED; 222 break; 223 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: 224 event.event = IB_EVENT_QP_FATAL; 225 break; 226 case MLX5_EVENT_TYPE_PATH_MIG_FAILED: 227 event.event = IB_EVENT_PATH_MIG_ERR; 228 break; 229 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: 230 event.event = IB_EVENT_QP_REQ_ERR; 231 break; 232 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: 233 event.event = IB_EVENT_QP_ACCESS_ERR; 234 break; 235 default: 236 pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn); 237 return; 238 } 239 240 ibqp->event_handler(&event, ibqp->qp_context); 241 } 242 } 243 244 static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap, 245 int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd) 246 { 247 int wqe_size; 248 int wq_size; 249 250 /* Sanity check RQ size before proceeding */ 251 if (cap->max_recv_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) 252 return -EINVAL; 253 254 if (!has_rq) { 255 qp->rq.max_gs = 0; 256 qp->rq.wqe_cnt = 0; 257 qp->rq.wqe_shift = 0; 258 cap->max_recv_wr = 0; 259 cap->max_recv_sge = 0; 260 } else { 261 if (ucmd) { 262 qp->rq.wqe_cnt = ucmd->rq_wqe_count; 263 if (ucmd->rq_wqe_shift > BITS_PER_BYTE * sizeof(ucmd->rq_wqe_shift)) 264 return -EINVAL; 265 qp->rq.wqe_shift = ucmd->rq_wqe_shift; 266 if ((1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) < qp->wq_sig) 267 return -EINVAL; 268 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig; 269 qp->rq.max_post = qp->rq.wqe_cnt; 270 } else { 271 wqe_size = qp->wq_sig ? sizeof(struct mlx5_wqe_signature_seg) : 0; 272 wqe_size += cap->max_recv_sge * sizeof(struct mlx5_wqe_data_seg); 273 wqe_size = roundup_pow_of_two(wqe_size); 274 wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size; 275 wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB); 276 qp->rq.wqe_cnt = wq_size / wqe_size; 277 if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq)) { 278 mlx5_ib_dbg(dev, "wqe_size %d, max %d\n", 279 wqe_size, 280 MLX5_CAP_GEN(dev->mdev, 281 max_wqe_sz_rq)); 282 return -EINVAL; 283 } 284 qp->rq.wqe_shift = ilog2(wqe_size); 285 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig; 286 qp->rq.max_post = qp->rq.wqe_cnt; 287 } 288 } 289 290 return 0; 291 } 292 293 static int sq_overhead(struct ib_qp_init_attr *attr) 294 { 295 int size = 0; 296 297 switch (attr->qp_type) { 298 case IB_QPT_XRC_INI: 299 size += sizeof(struct mlx5_wqe_xrc_seg); 300 /* fall through */ 301 case IB_QPT_RC: 302 size += sizeof(struct mlx5_wqe_ctrl_seg) + 303 max(sizeof(struct mlx5_wqe_atomic_seg) + 304 sizeof(struct mlx5_wqe_raddr_seg), 305 sizeof(struct mlx5_wqe_umr_ctrl_seg) + 306 sizeof(struct mlx5_mkey_seg) + 307 MLX5_IB_SQ_UMR_INLINE_THRESHOLD / 308 MLX5_IB_UMR_OCTOWORD); 309 break; 310 311 case IB_QPT_XRC_TGT: 312 return 0; 313 314 case IB_QPT_UC: 315 size += sizeof(struct mlx5_wqe_ctrl_seg) + 316 max(sizeof(struct mlx5_wqe_raddr_seg), 317 sizeof(struct mlx5_wqe_umr_ctrl_seg) + 318 sizeof(struct mlx5_mkey_seg)); 319 break; 320 321 case IB_QPT_UD: 322 if (attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) 323 size += sizeof(struct mlx5_wqe_eth_pad) + 324 sizeof(struct mlx5_wqe_eth_seg); 325 /* fall through */ 326 case IB_QPT_SMI: 327 case MLX5_IB_QPT_HW_GSI: 328 size += sizeof(struct mlx5_wqe_ctrl_seg) + 329 sizeof(struct mlx5_wqe_datagram_seg); 330 break; 331 332 case MLX5_IB_QPT_REG_UMR: 333 size += sizeof(struct mlx5_wqe_ctrl_seg) + 334 sizeof(struct mlx5_wqe_umr_ctrl_seg) + 335 sizeof(struct mlx5_mkey_seg); 336 break; 337 338 default: 339 return -EINVAL; 340 } 341 342 return size; 343 } 344 345 static int calc_send_wqe(struct ib_qp_init_attr *attr) 346 { 347 int inl_size = 0; 348 int size; 349 350 size = sq_overhead(attr); 351 if (size < 0) 352 return size; 353 354 if (attr->cap.max_inline_data) { 355 inl_size = size + sizeof(struct mlx5_wqe_inline_seg) + 356 attr->cap.max_inline_data; 357 } 358 359 size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg); 360 if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN && 361 ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB) < MLX5_SIG_WQE_SIZE) 362 return MLX5_SIG_WQE_SIZE; 363 else 364 return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB); 365 } 366 367 static int get_send_sge(struct ib_qp_init_attr *attr, int wqe_size) 368 { 369 int max_sge; 370 371 if (attr->qp_type == IB_QPT_RC) 372 max_sge = (min_t(int, wqe_size, 512) - 373 sizeof(struct mlx5_wqe_ctrl_seg) - 374 sizeof(struct mlx5_wqe_raddr_seg)) / 375 sizeof(struct mlx5_wqe_data_seg); 376 else if (attr->qp_type == IB_QPT_XRC_INI) 377 max_sge = (min_t(int, wqe_size, 512) - 378 sizeof(struct mlx5_wqe_ctrl_seg) - 379 sizeof(struct mlx5_wqe_xrc_seg) - 380 sizeof(struct mlx5_wqe_raddr_seg)) / 381 sizeof(struct mlx5_wqe_data_seg); 382 else 383 max_sge = (wqe_size - sq_overhead(attr)) / 384 sizeof(struct mlx5_wqe_data_seg); 385 386 return min_t(int, max_sge, wqe_size - sq_overhead(attr) / 387 sizeof(struct mlx5_wqe_data_seg)); 388 } 389 390 static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, 391 struct mlx5_ib_qp *qp) 392 { 393 int wqe_size; 394 int wq_size; 395 396 if (!attr->cap.max_send_wr) 397 return 0; 398 399 wqe_size = calc_send_wqe(attr); 400 mlx5_ib_dbg(dev, "wqe_size %d\n", wqe_size); 401 if (wqe_size < 0) 402 return wqe_size; 403 404 if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) { 405 mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n", 406 wqe_size, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)); 407 return -EINVAL; 408 } 409 410 qp->max_inline_data = wqe_size - sq_overhead(attr) - 411 sizeof(struct mlx5_wqe_inline_seg); 412 attr->cap.max_inline_data = qp->max_inline_data; 413 414 if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN) 415 qp->signature_en = true; 416 417 wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size); 418 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; 419 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) { 420 mlx5_ib_dbg(dev, "send queue size (%d * %d / %d -> %d) exceeds limits(%d)\n", 421 attr->cap.max_send_wr, wqe_size, MLX5_SEND_WQE_BB, 422 qp->sq.wqe_cnt, 423 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)); 424 return -ENOMEM; 425 } 426 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); 427 qp->sq.max_gs = get_send_sge(attr, wqe_size); 428 if (qp->sq.max_gs < attr->cap.max_send_sge) 429 return -ENOMEM; 430 431 attr->cap.max_send_sge = qp->sq.max_gs; 432 qp->sq.max_post = wq_size / wqe_size; 433 attr->cap.max_send_wr = qp->sq.max_post; 434 435 return wq_size; 436 } 437 438 static int set_user_buf_size(struct mlx5_ib_dev *dev, 439 struct mlx5_ib_qp *qp, 440 struct mlx5_ib_create_qp *ucmd, 441 struct mlx5_ib_qp_base *base, 442 struct ib_qp_init_attr *attr) 443 { 444 int desc_sz = 1 << qp->sq.wqe_shift; 445 446 if (desc_sz > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) { 447 mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n", 448 desc_sz, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)); 449 return -EINVAL; 450 } 451 452 if (ucmd->sq_wqe_count && ((1 << ilog2(ucmd->sq_wqe_count)) != ucmd->sq_wqe_count)) { 453 mlx5_ib_warn(dev, "sq_wqe_count %d, sq_wqe_count %d\n", 454 ucmd->sq_wqe_count, ucmd->sq_wqe_count); 455 return -EINVAL; 456 } 457 458 qp->sq.wqe_cnt = ucmd->sq_wqe_count; 459 460 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) { 461 mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n", 462 qp->sq.wqe_cnt, 463 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)); 464 return -EINVAL; 465 } 466 467 if (attr->qp_type == IB_QPT_RAW_PACKET || 468 qp->flags & MLX5_IB_QP_UNDERLAY) { 469 base->ubuffer.buf_size = qp->rq.wqe_cnt << qp->rq.wqe_shift; 470 qp->raw_packet_qp.sq.ubuffer.buf_size = qp->sq.wqe_cnt << 6; 471 } else { 472 base->ubuffer.buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + 473 (qp->sq.wqe_cnt << 6); 474 } 475 476 return 0; 477 } 478 479 static int qp_has_rq(struct ib_qp_init_attr *attr) 480 { 481 if (attr->qp_type == IB_QPT_XRC_INI || 482 attr->qp_type == IB_QPT_XRC_TGT || attr->srq || 483 attr->qp_type == MLX5_IB_QPT_REG_UMR || 484 !attr->cap.max_recv_wr) 485 return 0; 486 487 return 1; 488 } 489 490 enum { 491 /* this is the first blue flame register in the array of bfregs assigned 492 * to a processes. Since we do not use it for blue flame but rather 493 * regular 64 bit doorbells, we do not need a lock for maintaiing 494 * "odd/even" order 495 */ 496 NUM_NON_BLUE_FLAME_BFREGS = 1, 497 }; 498 499 static int max_bfregs(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi) 500 { 501 return get_num_static_uars(dev, bfregi) * MLX5_NON_FP_BFREGS_PER_UAR; 502 } 503 504 static int num_med_bfreg(struct mlx5_ib_dev *dev, 505 struct mlx5_bfreg_info *bfregi) 506 { 507 int n; 508 509 n = max_bfregs(dev, bfregi) - bfregi->num_low_latency_bfregs - 510 NUM_NON_BLUE_FLAME_BFREGS; 511 512 return n >= 0 ? n : 0; 513 } 514 515 static int first_med_bfreg(struct mlx5_ib_dev *dev, 516 struct mlx5_bfreg_info *bfregi) 517 { 518 return num_med_bfreg(dev, bfregi) ? 1 : -ENOMEM; 519 } 520 521 static int first_hi_bfreg(struct mlx5_ib_dev *dev, 522 struct mlx5_bfreg_info *bfregi) 523 { 524 int med; 525 526 med = num_med_bfreg(dev, bfregi); 527 return ++med; 528 } 529 530 static int alloc_high_class_bfreg(struct mlx5_ib_dev *dev, 531 struct mlx5_bfreg_info *bfregi) 532 { 533 int i; 534 535 for (i = first_hi_bfreg(dev, bfregi); i < max_bfregs(dev, bfregi); i++) { 536 if (!bfregi->count[i]) { 537 bfregi->count[i]++; 538 return i; 539 } 540 } 541 542 return -ENOMEM; 543 } 544 545 static int alloc_med_class_bfreg(struct mlx5_ib_dev *dev, 546 struct mlx5_bfreg_info *bfregi) 547 { 548 int minidx = first_med_bfreg(dev, bfregi); 549 int i; 550 551 if (minidx < 0) 552 return minidx; 553 554 for (i = minidx; i < first_hi_bfreg(dev, bfregi); i++) { 555 if (bfregi->count[i] < bfregi->count[minidx]) 556 minidx = i; 557 if (!bfregi->count[minidx]) 558 break; 559 } 560 561 bfregi->count[minidx]++; 562 return minidx; 563 } 564 565 static int alloc_bfreg(struct mlx5_ib_dev *dev, 566 struct mlx5_bfreg_info *bfregi) 567 { 568 int bfregn = -ENOMEM; 569 570 mutex_lock(&bfregi->lock); 571 if (bfregi->ver >= 2) { 572 bfregn = alloc_high_class_bfreg(dev, bfregi); 573 if (bfregn < 0) 574 bfregn = alloc_med_class_bfreg(dev, bfregi); 575 } 576 577 if (bfregn < 0) { 578 BUILD_BUG_ON(NUM_NON_BLUE_FLAME_BFREGS != 1); 579 bfregn = 0; 580 bfregi->count[bfregn]++; 581 } 582 mutex_unlock(&bfregi->lock); 583 584 return bfregn; 585 } 586 587 void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi, int bfregn) 588 { 589 mutex_lock(&bfregi->lock); 590 bfregi->count[bfregn]--; 591 mutex_unlock(&bfregi->lock); 592 } 593 594 static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state) 595 { 596 switch (state) { 597 case IB_QPS_RESET: return MLX5_QP_STATE_RST; 598 case IB_QPS_INIT: return MLX5_QP_STATE_INIT; 599 case IB_QPS_RTR: return MLX5_QP_STATE_RTR; 600 case IB_QPS_RTS: return MLX5_QP_STATE_RTS; 601 case IB_QPS_SQD: return MLX5_QP_STATE_SQD; 602 case IB_QPS_SQE: return MLX5_QP_STATE_SQER; 603 case IB_QPS_ERR: return MLX5_QP_STATE_ERR; 604 default: return -1; 605 } 606 } 607 608 static int to_mlx5_st(enum ib_qp_type type) 609 { 610 switch (type) { 611 case IB_QPT_RC: return MLX5_QP_ST_RC; 612 case IB_QPT_UC: return MLX5_QP_ST_UC; 613 case IB_QPT_UD: return MLX5_QP_ST_UD; 614 case MLX5_IB_QPT_REG_UMR: return MLX5_QP_ST_REG_UMR; 615 case IB_QPT_XRC_INI: 616 case IB_QPT_XRC_TGT: return MLX5_QP_ST_XRC; 617 case IB_QPT_SMI: return MLX5_QP_ST_QP0; 618 case MLX5_IB_QPT_HW_GSI: return MLX5_QP_ST_QP1; 619 case MLX5_IB_QPT_DCI: return MLX5_QP_ST_DCI; 620 case IB_QPT_RAW_IPV6: return MLX5_QP_ST_RAW_IPV6; 621 case IB_QPT_RAW_PACKET: 622 case IB_QPT_RAW_ETHERTYPE: return MLX5_QP_ST_RAW_ETHERTYPE; 623 case IB_QPT_MAX: 624 default: return -EINVAL; 625 } 626 } 627 628 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, 629 struct mlx5_ib_cq *recv_cq); 630 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, 631 struct mlx5_ib_cq *recv_cq); 632 633 int bfregn_to_uar_index(struct mlx5_ib_dev *dev, 634 struct mlx5_bfreg_info *bfregi, u32 bfregn, 635 bool dyn_bfreg) 636 { 637 unsigned int bfregs_per_sys_page; 638 u32 index_of_sys_page; 639 u32 offset; 640 641 bfregs_per_sys_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * 642 MLX5_NON_FP_BFREGS_PER_UAR; 643 index_of_sys_page = bfregn / bfregs_per_sys_page; 644 645 if (dyn_bfreg) { 646 index_of_sys_page += bfregi->num_static_sys_pages; 647 648 if (index_of_sys_page >= bfregi->num_sys_pages) 649 return -EINVAL; 650 651 if (bfregn > bfregi->num_dyn_bfregs || 652 bfregi->sys_pages[index_of_sys_page] == MLX5_IB_INVALID_UAR_INDEX) { 653 mlx5_ib_dbg(dev, "Invalid dynamic uar index\n"); 654 return -EINVAL; 655 } 656 } 657 658 offset = bfregn % bfregs_per_sys_page / MLX5_NON_FP_BFREGS_PER_UAR; 659 return bfregi->sys_pages[index_of_sys_page] + offset; 660 } 661 662 static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev, 663 struct ib_pd *pd, 664 unsigned long addr, size_t size, 665 struct ib_umem **umem, 666 int *npages, int *page_shift, int *ncont, 667 u32 *offset) 668 { 669 int err; 670 671 *umem = ib_umem_get(pd->uobject->context, addr, size, 0, 0); 672 if (IS_ERR(*umem)) { 673 mlx5_ib_dbg(dev, "umem_get failed\n"); 674 return PTR_ERR(*umem); 675 } 676 677 mlx5_ib_cont_pages(*umem, addr, 0, npages, page_shift, ncont, NULL); 678 679 err = mlx5_ib_get_buf_offset(addr, *page_shift, offset); 680 if (err) { 681 mlx5_ib_warn(dev, "bad offset\n"); 682 goto err_umem; 683 } 684 685 mlx5_ib_dbg(dev, "addr 0x%lx, size %zu, npages %d, page_shift %d, ncont %d, offset %d\n", 686 addr, size, *npages, *page_shift, *ncont, *offset); 687 688 return 0; 689 690 err_umem: 691 ib_umem_release(*umem); 692 *umem = NULL; 693 694 return err; 695 } 696 697 static void destroy_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd, 698 struct mlx5_ib_rwq *rwq) 699 { 700 struct mlx5_ib_ucontext *context; 701 702 if (rwq->create_flags & MLX5_IB_WQ_FLAGS_DELAY_DROP) 703 atomic_dec(&dev->delay_drop.rqs_cnt); 704 705 context = to_mucontext(pd->uobject->context); 706 mlx5_ib_db_unmap_user(context, &rwq->db); 707 if (rwq->umem) 708 ib_umem_release(rwq->umem); 709 } 710 711 static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd, 712 struct mlx5_ib_rwq *rwq, 713 struct mlx5_ib_create_wq *ucmd) 714 { 715 struct mlx5_ib_ucontext *context; 716 int page_shift = 0; 717 int npages; 718 u32 offset = 0; 719 int ncont = 0; 720 int err; 721 722 if (!ucmd->buf_addr) 723 return -EINVAL; 724 725 context = to_mucontext(pd->uobject->context); 726 rwq->umem = ib_umem_get(pd->uobject->context, ucmd->buf_addr, 727 rwq->buf_size, 0, 0); 728 if (IS_ERR(rwq->umem)) { 729 mlx5_ib_dbg(dev, "umem_get failed\n"); 730 err = PTR_ERR(rwq->umem); 731 return err; 732 } 733 734 mlx5_ib_cont_pages(rwq->umem, ucmd->buf_addr, 0, &npages, &page_shift, 735 &ncont, NULL); 736 err = mlx5_ib_get_buf_offset(ucmd->buf_addr, page_shift, 737 &rwq->rq_page_offset); 738 if (err) { 739 mlx5_ib_warn(dev, "bad offset\n"); 740 goto err_umem; 741 } 742 743 rwq->rq_num_pas = ncont; 744 rwq->page_shift = page_shift; 745 rwq->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT; 746 rwq->wq_sig = !!(ucmd->flags & MLX5_WQ_FLAG_SIGNATURE); 747 748 mlx5_ib_dbg(dev, "addr 0x%llx, size %zd, npages %d, page_shift %d, ncont %d, offset %d\n", 749 (unsigned long long)ucmd->buf_addr, rwq->buf_size, 750 npages, page_shift, ncont, offset); 751 752 err = mlx5_ib_db_map_user(context, ucmd->db_addr, &rwq->db); 753 if (err) { 754 mlx5_ib_dbg(dev, "map failed\n"); 755 goto err_umem; 756 } 757 758 rwq->create_type = MLX5_WQ_USER; 759 return 0; 760 761 err_umem: 762 ib_umem_release(rwq->umem); 763 return err; 764 } 765 766 static int adjust_bfregn(struct mlx5_ib_dev *dev, 767 struct mlx5_bfreg_info *bfregi, int bfregn) 768 { 769 return bfregn / MLX5_NON_FP_BFREGS_PER_UAR * MLX5_BFREGS_PER_UAR + 770 bfregn % MLX5_NON_FP_BFREGS_PER_UAR; 771 } 772 773 static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, 774 struct mlx5_ib_qp *qp, struct ib_udata *udata, 775 struct ib_qp_init_attr *attr, 776 u32 **in, 777 struct mlx5_ib_create_qp_resp *resp, int *inlen, 778 struct mlx5_ib_qp_base *base) 779 { 780 struct mlx5_ib_ucontext *context; 781 struct mlx5_ib_create_qp ucmd; 782 struct mlx5_ib_ubuffer *ubuffer = &base->ubuffer; 783 int page_shift = 0; 784 int uar_index = 0; 785 int npages; 786 u32 offset = 0; 787 int bfregn; 788 int ncont = 0; 789 __be64 *pas; 790 void *qpc; 791 int err; 792 793 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)); 794 if (err) { 795 mlx5_ib_dbg(dev, "copy failed\n"); 796 return err; 797 } 798 799 context = to_mucontext(pd->uobject->context); 800 if (ucmd.flags & MLX5_QP_FLAG_BFREG_INDEX) { 801 uar_index = bfregn_to_uar_index(dev, &context->bfregi, 802 ucmd.bfreg_index, true); 803 if (uar_index < 0) 804 return uar_index; 805 806 bfregn = MLX5_IB_INVALID_BFREG; 807 } else if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL) { 808 /* 809 * TBD: should come from the verbs when we have the API 810 */ 811 /* In CROSS_CHANNEL CQ and QP must use the same UAR */ 812 bfregn = MLX5_CROSS_CHANNEL_BFREG; 813 } 814 else { 815 bfregn = alloc_bfreg(dev, &context->bfregi); 816 if (bfregn < 0) 817 return bfregn; 818 } 819 820 mlx5_ib_dbg(dev, "bfregn 0x%x, uar_index 0x%x\n", bfregn, uar_index); 821 if (bfregn != MLX5_IB_INVALID_BFREG) 822 uar_index = bfregn_to_uar_index(dev, &context->bfregi, bfregn, 823 false); 824 825 qp->rq.offset = 0; 826 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); 827 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; 828 829 err = set_user_buf_size(dev, qp, &ucmd, base, attr); 830 if (err) 831 goto err_bfreg; 832 833 if (ucmd.buf_addr && ubuffer->buf_size) { 834 ubuffer->buf_addr = ucmd.buf_addr; 835 err = mlx5_ib_umem_get(dev, pd, ubuffer->buf_addr, 836 ubuffer->buf_size, 837 &ubuffer->umem, &npages, &page_shift, 838 &ncont, &offset); 839 if (err) 840 goto err_bfreg; 841 } else { 842 ubuffer->umem = NULL; 843 } 844 845 *inlen = MLX5_ST_SZ_BYTES(create_qp_in) + 846 MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * ncont; 847 *in = kvzalloc(*inlen, GFP_KERNEL); 848 if (!*in) { 849 err = -ENOMEM; 850 goto err_umem; 851 } 852 853 pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas); 854 if (ubuffer->umem) 855 mlx5_ib_populate_pas(dev, ubuffer->umem, page_shift, pas, 0); 856 857 qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc); 858 859 MLX5_SET(qpc, qpc, log_page_size, page_shift - MLX5_ADAPTER_PAGE_SHIFT); 860 MLX5_SET(qpc, qpc, page_offset, offset); 861 862 MLX5_SET(qpc, qpc, uar_page, uar_index); 863 if (bfregn != MLX5_IB_INVALID_BFREG) 864 resp->bfreg_index = adjust_bfregn(dev, &context->bfregi, bfregn); 865 else 866 resp->bfreg_index = MLX5_IB_INVALID_BFREG; 867 qp->bfregn = bfregn; 868 869 err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db); 870 if (err) { 871 mlx5_ib_dbg(dev, "map failed\n"); 872 goto err_free; 873 } 874 875 err = ib_copy_to_udata(udata, resp, min(udata->outlen, sizeof(*resp))); 876 if (err) { 877 mlx5_ib_dbg(dev, "copy failed\n"); 878 goto err_unmap; 879 } 880 qp->create_type = MLX5_QP_USER; 881 882 return 0; 883 884 err_unmap: 885 mlx5_ib_db_unmap_user(context, &qp->db); 886 887 err_free: 888 kvfree(*in); 889 890 err_umem: 891 if (ubuffer->umem) 892 ib_umem_release(ubuffer->umem); 893 894 err_bfreg: 895 if (bfregn != MLX5_IB_INVALID_BFREG) 896 mlx5_ib_free_bfreg(dev, &context->bfregi, bfregn); 897 return err; 898 } 899 900 static void destroy_qp_user(struct mlx5_ib_dev *dev, struct ib_pd *pd, 901 struct mlx5_ib_qp *qp, struct mlx5_ib_qp_base *base) 902 { 903 struct mlx5_ib_ucontext *context; 904 905 context = to_mucontext(pd->uobject->context); 906 mlx5_ib_db_unmap_user(context, &qp->db); 907 if (base->ubuffer.umem) 908 ib_umem_release(base->ubuffer.umem); 909 910 /* 911 * Free only the BFREGs which are handled by the kernel. 912 * BFREGs of UARs allocated dynamically are handled by user. 913 */ 914 if (qp->bfregn != MLX5_IB_INVALID_BFREG) 915 mlx5_ib_free_bfreg(dev, &context->bfregi, qp->bfregn); 916 } 917 918 static int create_kernel_qp(struct mlx5_ib_dev *dev, 919 struct ib_qp_init_attr *init_attr, 920 struct mlx5_ib_qp *qp, 921 u32 **in, int *inlen, 922 struct mlx5_ib_qp_base *base) 923 { 924 int uar_index; 925 void *qpc; 926 int err; 927 928 if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | 929 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK | 930 IB_QP_CREATE_IPOIB_UD_LSO | 931 IB_QP_CREATE_NETIF_QP | 932 mlx5_ib_create_qp_sqpn_qp1())) 933 return -EINVAL; 934 935 if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR) 936 qp->bf.bfreg = &dev->fp_bfreg; 937 else 938 qp->bf.bfreg = &dev->bfreg; 939 940 /* We need to divide by two since each register is comprised of 941 * two buffers of identical size, namely odd and even 942 */ 943 qp->bf.buf_size = (1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size)) / 2; 944 uar_index = qp->bf.bfreg->index; 945 946 err = calc_sq_size(dev, init_attr, qp); 947 if (err < 0) { 948 mlx5_ib_dbg(dev, "err %d\n", err); 949 return err; 950 } 951 952 qp->rq.offset = 0; 953 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; 954 base->ubuffer.buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift); 955 956 err = mlx5_buf_alloc(dev->mdev, base->ubuffer.buf_size, &qp->buf); 957 if (err) { 958 mlx5_ib_dbg(dev, "err %d\n", err); 959 return err; 960 } 961 962 qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt); 963 *inlen = MLX5_ST_SZ_BYTES(create_qp_in) + 964 MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * qp->buf.npages; 965 *in = kvzalloc(*inlen, GFP_KERNEL); 966 if (!*in) { 967 err = -ENOMEM; 968 goto err_buf; 969 } 970 971 qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc); 972 MLX5_SET(qpc, qpc, uar_page, uar_index); 973 MLX5_SET(qpc, qpc, log_page_size, qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); 974 975 /* Set "fast registration enabled" for all kernel QPs */ 976 MLX5_SET(qpc, qpc, fre, 1); 977 MLX5_SET(qpc, qpc, rlky, 1); 978 979 if (init_attr->create_flags & mlx5_ib_create_qp_sqpn_qp1()) { 980 MLX5_SET(qpc, qpc, deth_sqpn, 1); 981 qp->flags |= MLX5_IB_QP_SQPN_QP1; 982 } 983 984 mlx5_fill_page_array(&qp->buf, 985 (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas)); 986 987 err = mlx5_db_alloc(dev->mdev, &qp->db); 988 if (err) { 989 mlx5_ib_dbg(dev, "err %d\n", err); 990 goto err_free; 991 } 992 993 qp->sq.wrid = kvmalloc_array(qp->sq.wqe_cnt, 994 sizeof(*qp->sq.wrid), GFP_KERNEL); 995 qp->sq.wr_data = kvmalloc_array(qp->sq.wqe_cnt, 996 sizeof(*qp->sq.wr_data), GFP_KERNEL); 997 qp->rq.wrid = kvmalloc_array(qp->rq.wqe_cnt, 998 sizeof(*qp->rq.wrid), GFP_KERNEL); 999 qp->sq.w_list = kvmalloc_array(qp->sq.wqe_cnt, 1000 sizeof(*qp->sq.w_list), GFP_KERNEL); 1001 qp->sq.wqe_head = kvmalloc_array(qp->sq.wqe_cnt, 1002 sizeof(*qp->sq.wqe_head), GFP_KERNEL); 1003 1004 if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid || 1005 !qp->sq.w_list || !qp->sq.wqe_head) { 1006 err = -ENOMEM; 1007 goto err_wrid; 1008 } 1009 qp->create_type = MLX5_QP_KERNEL; 1010 1011 return 0; 1012 1013 err_wrid: 1014 kvfree(qp->sq.wqe_head); 1015 kvfree(qp->sq.w_list); 1016 kvfree(qp->sq.wrid); 1017 kvfree(qp->sq.wr_data); 1018 kvfree(qp->rq.wrid); 1019 mlx5_db_free(dev->mdev, &qp->db); 1020 1021 err_free: 1022 kvfree(*in); 1023 1024 err_buf: 1025 mlx5_buf_free(dev->mdev, &qp->buf); 1026 return err; 1027 } 1028 1029 static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) 1030 { 1031 kvfree(qp->sq.wqe_head); 1032 kvfree(qp->sq.w_list); 1033 kvfree(qp->sq.wrid); 1034 kvfree(qp->sq.wr_data); 1035 kvfree(qp->rq.wrid); 1036 mlx5_db_free(dev->mdev, &qp->db); 1037 mlx5_buf_free(dev->mdev, &qp->buf); 1038 } 1039 1040 static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr) 1041 { 1042 if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) || 1043 (attr->qp_type == MLX5_IB_QPT_DCI) || 1044 (attr->qp_type == IB_QPT_XRC_INI)) 1045 return MLX5_SRQ_RQ; 1046 else if (!qp->has_rq) 1047 return MLX5_ZERO_LEN_RQ; 1048 else 1049 return MLX5_NON_ZERO_RQ; 1050 } 1051 1052 static int is_connected(enum ib_qp_type qp_type) 1053 { 1054 if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC) 1055 return 1; 1056 1057 return 0; 1058 } 1059 1060 static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev, 1061 struct mlx5_ib_qp *qp, 1062 struct mlx5_ib_sq *sq, u32 tdn) 1063 { 1064 u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0}; 1065 void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); 1066 1067 MLX5_SET(tisc, tisc, transport_domain, tdn); 1068 if (qp->flags & MLX5_IB_QP_UNDERLAY) 1069 MLX5_SET(tisc, tisc, underlay_qpn, qp->underlay_qpn); 1070 1071 return mlx5_core_create_tis(dev->mdev, in, sizeof(in), &sq->tisn); 1072 } 1073 1074 static void destroy_raw_packet_qp_tis(struct mlx5_ib_dev *dev, 1075 struct mlx5_ib_sq *sq) 1076 { 1077 mlx5_core_destroy_tis(dev->mdev, sq->tisn); 1078 } 1079 1080 static void destroy_flow_rule_vport_sq(struct mlx5_ib_dev *dev, 1081 struct mlx5_ib_sq *sq) 1082 { 1083 if (sq->flow_rule) 1084 mlx5_del_flow_rules(sq->flow_rule); 1085 } 1086 1087 static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev, 1088 struct mlx5_ib_sq *sq, void *qpin, 1089 struct ib_pd *pd) 1090 { 1091 struct mlx5_ib_ubuffer *ubuffer = &sq->ubuffer; 1092 __be64 *pas; 1093 void *in; 1094 void *sqc; 1095 void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc); 1096 void *wq; 1097 int inlen; 1098 int err; 1099 int page_shift = 0; 1100 int npages; 1101 int ncont = 0; 1102 u32 offset = 0; 1103 1104 err = mlx5_ib_umem_get(dev, pd, ubuffer->buf_addr, ubuffer->buf_size, 1105 &sq->ubuffer.umem, &npages, &page_shift, 1106 &ncont, &offset); 1107 if (err) 1108 return err; 1109 1110 inlen = MLX5_ST_SZ_BYTES(create_sq_in) + sizeof(u64) * ncont; 1111 in = kvzalloc(inlen, GFP_KERNEL); 1112 if (!in) { 1113 err = -ENOMEM; 1114 goto err_umem; 1115 } 1116 1117 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx); 1118 MLX5_SET(sqc, sqc, flush_in_error_en, 1); 1119 if (MLX5_CAP_ETH(dev->mdev, multi_pkt_send_wqe)) 1120 MLX5_SET(sqc, sqc, allow_multi_pkt_send_wqe, 1); 1121 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); 1122 MLX5_SET(sqc, sqc, user_index, MLX5_GET(qpc, qpc, user_index)); 1123 MLX5_SET(sqc, sqc, cqn, MLX5_GET(qpc, qpc, cqn_snd)); 1124 MLX5_SET(sqc, sqc, tis_lst_sz, 1); 1125 MLX5_SET(sqc, sqc, tis_num_0, sq->tisn); 1126 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && 1127 MLX5_CAP_ETH(dev->mdev, swp)) 1128 MLX5_SET(sqc, sqc, allow_swp, 1); 1129 1130 wq = MLX5_ADDR_OF(sqc, sqc, wq); 1131 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); 1132 MLX5_SET(wq, wq, pd, MLX5_GET(qpc, qpc, pd)); 1133 MLX5_SET(wq, wq, uar_page, MLX5_GET(qpc, qpc, uar_page)); 1134 MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(qpc, qpc, dbr_addr)); 1135 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); 1136 MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(qpc, qpc, log_sq_size)); 1137 MLX5_SET(wq, wq, log_wq_pg_sz, page_shift - MLX5_ADAPTER_PAGE_SHIFT); 1138 MLX5_SET(wq, wq, page_offset, offset); 1139 1140 pas = (__be64 *)MLX5_ADDR_OF(wq, wq, pas); 1141 mlx5_ib_populate_pas(dev, sq->ubuffer.umem, page_shift, pas, 0); 1142 1143 err = mlx5_core_create_sq_tracked(dev->mdev, in, inlen, &sq->base.mqp); 1144 1145 kvfree(in); 1146 1147 if (err) 1148 goto err_umem; 1149 1150 err = create_flow_rule_vport_sq(dev, sq); 1151 if (err) 1152 goto err_flow; 1153 1154 return 0; 1155 1156 err_flow: 1157 mlx5_core_destroy_sq_tracked(dev->mdev, &sq->base.mqp); 1158 1159 err_umem: 1160 ib_umem_release(sq->ubuffer.umem); 1161 sq->ubuffer.umem = NULL; 1162 1163 return err; 1164 } 1165 1166 static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev, 1167 struct mlx5_ib_sq *sq) 1168 { 1169 destroy_flow_rule_vport_sq(dev, sq); 1170 mlx5_core_destroy_sq_tracked(dev->mdev, &sq->base.mqp); 1171 ib_umem_release(sq->ubuffer.umem); 1172 } 1173 1174 static size_t get_rq_pas_size(void *qpc) 1175 { 1176 u32 log_page_size = MLX5_GET(qpc, qpc, log_page_size) + 12; 1177 u32 log_rq_stride = MLX5_GET(qpc, qpc, log_rq_stride); 1178 u32 log_rq_size = MLX5_GET(qpc, qpc, log_rq_size); 1179 u32 page_offset = MLX5_GET(qpc, qpc, page_offset); 1180 u32 po_quanta = 1 << (log_page_size - 6); 1181 u32 rq_sz = 1 << (log_rq_size + 4 + log_rq_stride); 1182 u32 page_size = 1 << log_page_size; 1183 u32 rq_sz_po = rq_sz + (page_offset * po_quanta); 1184 u32 rq_num_pas = (rq_sz_po + page_size - 1) / page_size; 1185 1186 return rq_num_pas * sizeof(u64); 1187 } 1188 1189 static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev, 1190 struct mlx5_ib_rq *rq, void *qpin, 1191 size_t qpinlen) 1192 { 1193 struct mlx5_ib_qp *mqp = rq->base.container_mibqp; 1194 __be64 *pas; 1195 __be64 *qp_pas; 1196 void *in; 1197 void *rqc; 1198 void *wq; 1199 void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc); 1200 size_t rq_pas_size = get_rq_pas_size(qpc); 1201 size_t inlen; 1202 int err; 1203 1204 if (qpinlen < rq_pas_size + MLX5_BYTE_OFF(create_qp_in, pas)) 1205 return -EINVAL; 1206 1207 inlen = MLX5_ST_SZ_BYTES(create_rq_in) + rq_pas_size; 1208 in = kvzalloc(inlen, GFP_KERNEL); 1209 if (!in) 1210 return -ENOMEM; 1211 1212 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx); 1213 if (!(rq->flags & MLX5_IB_RQ_CVLAN_STRIPPING)) 1214 MLX5_SET(rqc, rqc, vsd, 1); 1215 MLX5_SET(rqc, rqc, mem_rq_type, MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE); 1216 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); 1217 MLX5_SET(rqc, rqc, flush_in_error_en, 1); 1218 MLX5_SET(rqc, rqc, user_index, MLX5_GET(qpc, qpc, user_index)); 1219 MLX5_SET(rqc, rqc, cqn, MLX5_GET(qpc, qpc, cqn_rcv)); 1220 1221 if (mqp->flags & MLX5_IB_QP_CAP_SCATTER_FCS) 1222 MLX5_SET(rqc, rqc, scatter_fcs, 1); 1223 1224 wq = MLX5_ADDR_OF(rqc, rqc, wq); 1225 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); 1226 if (rq->flags & MLX5_IB_RQ_PCI_WRITE_END_PADDING) 1227 MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); 1228 MLX5_SET(wq, wq, page_offset, MLX5_GET(qpc, qpc, page_offset)); 1229 MLX5_SET(wq, wq, pd, MLX5_GET(qpc, qpc, pd)); 1230 MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(qpc, qpc, dbr_addr)); 1231 MLX5_SET(wq, wq, log_wq_stride, MLX5_GET(qpc, qpc, log_rq_stride) + 4); 1232 MLX5_SET(wq, wq, log_wq_pg_sz, MLX5_GET(qpc, qpc, log_page_size)); 1233 MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(qpc, qpc, log_rq_size)); 1234 1235 pas = (__be64 *)MLX5_ADDR_OF(wq, wq, pas); 1236 qp_pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, qpin, pas); 1237 memcpy(pas, qp_pas, rq_pas_size); 1238 1239 err = mlx5_core_create_rq_tracked(dev->mdev, in, inlen, &rq->base.mqp); 1240 1241 kvfree(in); 1242 1243 return err; 1244 } 1245 1246 static void destroy_raw_packet_qp_rq(struct mlx5_ib_dev *dev, 1247 struct mlx5_ib_rq *rq) 1248 { 1249 mlx5_core_destroy_rq_tracked(dev->mdev, &rq->base.mqp); 1250 } 1251 1252 static bool tunnel_offload_supported(struct mlx5_core_dev *dev) 1253 { 1254 return (MLX5_CAP_ETH(dev, tunnel_stateless_vxlan) || 1255 MLX5_CAP_ETH(dev, tunnel_stateless_gre) || 1256 MLX5_CAP_ETH(dev, tunnel_stateless_geneve_rx)); 1257 } 1258 1259 static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev, 1260 struct mlx5_ib_rq *rq, u32 tdn, 1261 bool tunnel_offload_en) 1262 { 1263 u32 *in; 1264 void *tirc; 1265 int inlen; 1266 int err; 1267 1268 inlen = MLX5_ST_SZ_BYTES(create_tir_in); 1269 in = kvzalloc(inlen, GFP_KERNEL); 1270 if (!in) 1271 return -ENOMEM; 1272 1273 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); 1274 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT); 1275 MLX5_SET(tirc, tirc, inline_rqn, rq->base.mqp.qpn); 1276 MLX5_SET(tirc, tirc, transport_domain, tdn); 1277 if (tunnel_offload_en) 1278 MLX5_SET(tirc, tirc, tunneled_offload_en, 1); 1279 1280 if (dev->rep) 1281 MLX5_SET(tirc, tirc, self_lb_block, 1282 MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_); 1283 1284 err = mlx5_core_create_tir(dev->mdev, in, inlen, &rq->tirn); 1285 1286 kvfree(in); 1287 1288 return err; 1289 } 1290 1291 static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev, 1292 struct mlx5_ib_rq *rq) 1293 { 1294 mlx5_core_destroy_tir(dev->mdev, rq->tirn); 1295 } 1296 1297 static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 1298 u32 *in, size_t inlen, 1299 struct ib_pd *pd) 1300 { 1301 struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; 1302 struct mlx5_ib_sq *sq = &raw_packet_qp->sq; 1303 struct mlx5_ib_rq *rq = &raw_packet_qp->rq; 1304 struct ib_uobject *uobj = pd->uobject; 1305 struct ib_ucontext *ucontext = uobj->context; 1306 struct mlx5_ib_ucontext *mucontext = to_mucontext(ucontext); 1307 int err; 1308 u32 tdn = mucontext->tdn; 1309 1310 if (qp->sq.wqe_cnt) { 1311 err = create_raw_packet_qp_tis(dev, qp, sq, tdn); 1312 if (err) 1313 return err; 1314 1315 err = create_raw_packet_qp_sq(dev, sq, in, pd); 1316 if (err) 1317 goto err_destroy_tis; 1318 1319 sq->base.container_mibqp = qp; 1320 sq->base.mqp.event = mlx5_ib_qp_event; 1321 } 1322 1323 if (qp->rq.wqe_cnt) { 1324 rq->base.container_mibqp = qp; 1325 1326 if (qp->flags & MLX5_IB_QP_CVLAN_STRIPPING) 1327 rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING; 1328 if (qp->flags & MLX5_IB_QP_PCI_WRITE_END_PADDING) 1329 rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING; 1330 err = create_raw_packet_qp_rq(dev, rq, in, inlen); 1331 if (err) 1332 goto err_destroy_sq; 1333 1334 1335 err = create_raw_packet_qp_tir(dev, rq, tdn, 1336 qp->tunnel_offload_en); 1337 if (err) 1338 goto err_destroy_rq; 1339 } 1340 1341 qp->trans_qp.base.mqp.qpn = qp->sq.wqe_cnt ? sq->base.mqp.qpn : 1342 rq->base.mqp.qpn; 1343 1344 return 0; 1345 1346 err_destroy_rq: 1347 destroy_raw_packet_qp_rq(dev, rq); 1348 err_destroy_sq: 1349 if (!qp->sq.wqe_cnt) 1350 return err; 1351 destroy_raw_packet_qp_sq(dev, sq); 1352 err_destroy_tis: 1353 destroy_raw_packet_qp_tis(dev, sq); 1354 1355 return err; 1356 } 1357 1358 static void destroy_raw_packet_qp(struct mlx5_ib_dev *dev, 1359 struct mlx5_ib_qp *qp) 1360 { 1361 struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; 1362 struct mlx5_ib_sq *sq = &raw_packet_qp->sq; 1363 struct mlx5_ib_rq *rq = &raw_packet_qp->rq; 1364 1365 if (qp->rq.wqe_cnt) { 1366 destroy_raw_packet_qp_tir(dev, rq); 1367 destroy_raw_packet_qp_rq(dev, rq); 1368 } 1369 1370 if (qp->sq.wqe_cnt) { 1371 destroy_raw_packet_qp_sq(dev, sq); 1372 destroy_raw_packet_qp_tis(dev, sq); 1373 } 1374 } 1375 1376 static void raw_packet_qp_copy_info(struct mlx5_ib_qp *qp, 1377 struct mlx5_ib_raw_packet_qp *raw_packet_qp) 1378 { 1379 struct mlx5_ib_sq *sq = &raw_packet_qp->sq; 1380 struct mlx5_ib_rq *rq = &raw_packet_qp->rq; 1381 1382 sq->sq = &qp->sq; 1383 rq->rq = &qp->rq; 1384 sq->doorbell = &qp->db; 1385 rq->doorbell = &qp->db; 1386 } 1387 1388 static void destroy_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) 1389 { 1390 mlx5_core_destroy_tir(dev->mdev, qp->rss_qp.tirn); 1391 } 1392 1393 static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 1394 struct ib_pd *pd, 1395 struct ib_qp_init_attr *init_attr, 1396 struct ib_udata *udata) 1397 { 1398 struct ib_uobject *uobj = pd->uobject; 1399 struct ib_ucontext *ucontext = uobj->context; 1400 struct mlx5_ib_ucontext *mucontext = to_mucontext(ucontext); 1401 struct mlx5_ib_create_qp_resp resp = {}; 1402 int inlen; 1403 int err; 1404 u32 *in; 1405 void *tirc; 1406 void *hfso; 1407 u32 selected_fields = 0; 1408 u32 outer_l4; 1409 size_t min_resp_len; 1410 u32 tdn = mucontext->tdn; 1411 struct mlx5_ib_create_qp_rss ucmd = {}; 1412 size_t required_cmd_sz; 1413 1414 if (init_attr->qp_type != IB_QPT_RAW_PACKET) 1415 return -EOPNOTSUPP; 1416 1417 if (init_attr->create_flags || init_attr->send_cq) 1418 return -EINVAL; 1419 1420 min_resp_len = offsetof(typeof(resp), bfreg_index) + sizeof(resp.bfreg_index); 1421 if (udata->outlen < min_resp_len) 1422 return -EINVAL; 1423 1424 required_cmd_sz = offsetof(typeof(ucmd), flags) + sizeof(ucmd.flags); 1425 if (udata->inlen < required_cmd_sz) { 1426 mlx5_ib_dbg(dev, "invalid inlen\n"); 1427 return -EINVAL; 1428 } 1429 1430 if (udata->inlen > sizeof(ucmd) && 1431 !ib_is_udata_cleared(udata, sizeof(ucmd), 1432 udata->inlen - sizeof(ucmd))) { 1433 mlx5_ib_dbg(dev, "inlen is not supported\n"); 1434 return -EOPNOTSUPP; 1435 } 1436 1437 if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) { 1438 mlx5_ib_dbg(dev, "copy failed\n"); 1439 return -EFAULT; 1440 } 1441 1442 if (ucmd.comp_mask) { 1443 mlx5_ib_dbg(dev, "invalid comp mask\n"); 1444 return -EOPNOTSUPP; 1445 } 1446 1447 if (ucmd.flags & ~MLX5_QP_FLAG_TUNNEL_OFFLOADS) { 1448 mlx5_ib_dbg(dev, "invalid flags\n"); 1449 return -EOPNOTSUPP; 1450 } 1451 1452 if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS && 1453 !tunnel_offload_supported(dev->mdev)) { 1454 mlx5_ib_dbg(dev, "tunnel offloads isn't supported\n"); 1455 return -EOPNOTSUPP; 1456 } 1457 1458 if (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_INNER && 1459 !(ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)) { 1460 mlx5_ib_dbg(dev, "Tunnel offloads must be set for inner RSS\n"); 1461 return -EOPNOTSUPP; 1462 } 1463 1464 err = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp))); 1465 if (err) { 1466 mlx5_ib_dbg(dev, "copy failed\n"); 1467 return -EINVAL; 1468 } 1469 1470 inlen = MLX5_ST_SZ_BYTES(create_tir_in); 1471 in = kvzalloc(inlen, GFP_KERNEL); 1472 if (!in) 1473 return -ENOMEM; 1474 1475 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); 1476 MLX5_SET(tirc, tirc, disp_type, 1477 MLX5_TIRC_DISP_TYPE_INDIRECT); 1478 MLX5_SET(tirc, tirc, indirect_table, 1479 init_attr->rwq_ind_tbl->ind_tbl_num); 1480 MLX5_SET(tirc, tirc, transport_domain, tdn); 1481 1482 hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); 1483 1484 if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS) 1485 MLX5_SET(tirc, tirc, tunneled_offload_en, 1); 1486 1487 if (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_INNER) 1488 hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner); 1489 else 1490 hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); 1491 1492 switch (ucmd.rx_hash_function) { 1493 case MLX5_RX_HASH_FUNC_TOEPLITZ: 1494 { 1495 void *rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key); 1496 size_t len = MLX5_FLD_SZ_BYTES(tirc, rx_hash_toeplitz_key); 1497 1498 if (len != ucmd.rx_key_len) { 1499 err = -EINVAL; 1500 goto err; 1501 } 1502 1503 MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ); 1504 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); 1505 memcpy(rss_key, ucmd.rx_hash_key, len); 1506 break; 1507 } 1508 default: 1509 err = -EOPNOTSUPP; 1510 goto err; 1511 } 1512 1513 if (!ucmd.rx_hash_fields_mask) { 1514 /* special case when this TIR serves as steering entry without hashing */ 1515 if (!init_attr->rwq_ind_tbl->log_ind_tbl_size) 1516 goto create_tir; 1517 err = -EINVAL; 1518 goto err; 1519 } 1520 1521 if (((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) || 1522 (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4)) && 1523 ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) || 1524 (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))) { 1525 err = -EINVAL; 1526 goto err; 1527 } 1528 1529 /* If none of IPV4 & IPV6 SRC/DST was set - this bit field is ignored */ 1530 if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) || 1531 (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4)) 1532 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 1533 MLX5_L3_PROT_TYPE_IPV4); 1534 else if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) || 1535 (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6)) 1536 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, 1537 MLX5_L3_PROT_TYPE_IPV6); 1538 1539 outer_l4 = ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) || 1540 (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP)) << 0 | 1541 ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) || 1542 (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP)) << 1 | 1543 (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_IPSEC_SPI) << 2; 1544 1545 /* Check that only one l4 protocol is set */ 1546 if (outer_l4 & (outer_l4 - 1)) { 1547 err = -EINVAL; 1548 goto err; 1549 } 1550 1551 /* If none of TCP & UDP SRC/DST was set - this bit field is ignored */ 1552 if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) || 1553 (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP)) 1554 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, 1555 MLX5_L4_PROT_TYPE_TCP); 1556 else if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) || 1557 (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP)) 1558 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, 1559 MLX5_L4_PROT_TYPE_UDP); 1560 1561 if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) || 1562 (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6)) 1563 selected_fields |= MLX5_HASH_FIELD_SEL_SRC_IP; 1564 1565 if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4) || 1566 (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6)) 1567 selected_fields |= MLX5_HASH_FIELD_SEL_DST_IP; 1568 1569 if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) || 1570 (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP)) 1571 selected_fields |= MLX5_HASH_FIELD_SEL_L4_SPORT; 1572 1573 if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP) || 1574 (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP)) 1575 selected_fields |= MLX5_HASH_FIELD_SEL_L4_DPORT; 1576 1577 if (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_IPSEC_SPI) 1578 selected_fields |= MLX5_HASH_FIELD_SEL_IPSEC_SPI; 1579 1580 MLX5_SET(rx_hash_field_select, hfso, selected_fields, selected_fields); 1581 1582 create_tir: 1583 if (dev->rep) 1584 MLX5_SET(tirc, tirc, self_lb_block, 1585 MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_); 1586 1587 err = mlx5_core_create_tir(dev->mdev, in, inlen, &qp->rss_qp.tirn); 1588 1589 if (err) 1590 goto err; 1591 1592 kvfree(in); 1593 /* qpn is reserved for that QP */ 1594 qp->trans_qp.base.mqp.qpn = 0; 1595 qp->flags |= MLX5_IB_QP_RSS; 1596 return 0; 1597 1598 err: 1599 kvfree(in); 1600 return err; 1601 } 1602 1603 static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, 1604 struct ib_qp_init_attr *init_attr, 1605 struct ib_udata *udata, struct mlx5_ib_qp *qp) 1606 { 1607 struct mlx5_ib_resources *devr = &dev->devr; 1608 int inlen = MLX5_ST_SZ_BYTES(create_qp_in); 1609 struct mlx5_core_dev *mdev = dev->mdev; 1610 struct mlx5_ib_create_qp_resp resp = {}; 1611 struct mlx5_ib_cq *send_cq; 1612 struct mlx5_ib_cq *recv_cq; 1613 unsigned long flags; 1614 u32 uidx = MLX5_IB_DEFAULT_UIDX; 1615 struct mlx5_ib_create_qp ucmd; 1616 struct mlx5_ib_qp_base *base; 1617 int mlx5_st; 1618 void *qpc; 1619 u32 *in; 1620 int err; 1621 1622 mutex_init(&qp->mutex); 1623 spin_lock_init(&qp->sq.lock); 1624 spin_lock_init(&qp->rq.lock); 1625 1626 mlx5_st = to_mlx5_st(init_attr->qp_type); 1627 if (mlx5_st < 0) 1628 return -EINVAL; 1629 1630 if (init_attr->rwq_ind_tbl) { 1631 if (!udata) 1632 return -ENOSYS; 1633 1634 err = create_rss_raw_qp_tir(dev, qp, pd, init_attr, udata); 1635 return err; 1636 } 1637 1638 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { 1639 if (!MLX5_CAP_GEN(mdev, block_lb_mc)) { 1640 mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n"); 1641 return -EINVAL; 1642 } else { 1643 qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK; 1644 } 1645 } 1646 1647 if (init_attr->create_flags & 1648 (IB_QP_CREATE_CROSS_CHANNEL | 1649 IB_QP_CREATE_MANAGED_SEND | 1650 IB_QP_CREATE_MANAGED_RECV)) { 1651 if (!MLX5_CAP_GEN(mdev, cd)) { 1652 mlx5_ib_dbg(dev, "cross-channel isn't supported\n"); 1653 return -EINVAL; 1654 } 1655 if (init_attr->create_flags & IB_QP_CREATE_CROSS_CHANNEL) 1656 qp->flags |= MLX5_IB_QP_CROSS_CHANNEL; 1657 if (init_attr->create_flags & IB_QP_CREATE_MANAGED_SEND) 1658 qp->flags |= MLX5_IB_QP_MANAGED_SEND; 1659 if (init_attr->create_flags & IB_QP_CREATE_MANAGED_RECV) 1660 qp->flags |= MLX5_IB_QP_MANAGED_RECV; 1661 } 1662 1663 if (init_attr->qp_type == IB_QPT_UD && 1664 (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)) 1665 if (!MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) { 1666 mlx5_ib_dbg(dev, "ipoib UD lso qp isn't supported\n"); 1667 return -EOPNOTSUPP; 1668 } 1669 1670 if (init_attr->create_flags & IB_QP_CREATE_SCATTER_FCS) { 1671 if (init_attr->qp_type != IB_QPT_RAW_PACKET) { 1672 mlx5_ib_dbg(dev, "Scatter FCS is supported only for Raw Packet QPs"); 1673 return -EOPNOTSUPP; 1674 } 1675 if (!MLX5_CAP_GEN(dev->mdev, eth_net_offloads) || 1676 !MLX5_CAP_ETH(dev->mdev, scatter_fcs)) { 1677 mlx5_ib_dbg(dev, "Scatter FCS isn't supported\n"); 1678 return -EOPNOTSUPP; 1679 } 1680 qp->flags |= MLX5_IB_QP_CAP_SCATTER_FCS; 1681 } 1682 1683 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) 1684 qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE; 1685 1686 if (init_attr->create_flags & IB_QP_CREATE_CVLAN_STRIPPING) { 1687 if (!(MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && 1688 MLX5_CAP_ETH(dev->mdev, vlan_cap)) || 1689 (init_attr->qp_type != IB_QPT_RAW_PACKET)) 1690 return -EOPNOTSUPP; 1691 qp->flags |= MLX5_IB_QP_CVLAN_STRIPPING; 1692 } 1693 1694 if (pd && pd->uobject) { 1695 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { 1696 mlx5_ib_dbg(dev, "copy failed\n"); 1697 return -EFAULT; 1698 } 1699 1700 err = get_qp_user_index(to_mucontext(pd->uobject->context), 1701 &ucmd, udata->inlen, &uidx); 1702 if (err) 1703 return err; 1704 1705 qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE); 1706 qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE); 1707 if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS) { 1708 if (init_attr->qp_type != IB_QPT_RAW_PACKET || 1709 !tunnel_offload_supported(mdev)) { 1710 mlx5_ib_dbg(dev, "Tunnel offload isn't supported\n"); 1711 return -EOPNOTSUPP; 1712 } 1713 qp->tunnel_offload_en = true; 1714 } 1715 1716 if (init_attr->create_flags & IB_QP_CREATE_SOURCE_QPN) { 1717 if (init_attr->qp_type != IB_QPT_UD || 1718 (MLX5_CAP_GEN(dev->mdev, port_type) != 1719 MLX5_CAP_PORT_TYPE_IB) || 1720 !mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS)) { 1721 mlx5_ib_dbg(dev, "Source QP option isn't supported\n"); 1722 return -EOPNOTSUPP; 1723 } 1724 1725 qp->flags |= MLX5_IB_QP_UNDERLAY; 1726 qp->underlay_qpn = init_attr->source_qpn; 1727 } 1728 } else { 1729 qp->wq_sig = !!wq_signature; 1730 } 1731 1732 base = (init_attr->qp_type == IB_QPT_RAW_PACKET || 1733 qp->flags & MLX5_IB_QP_UNDERLAY) ? 1734 &qp->raw_packet_qp.rq.base : 1735 &qp->trans_qp.base; 1736 1737 qp->has_rq = qp_has_rq(init_attr); 1738 err = set_rq_size(dev, &init_attr->cap, qp->has_rq, 1739 qp, (pd && pd->uobject) ? &ucmd : NULL); 1740 if (err) { 1741 mlx5_ib_dbg(dev, "err %d\n", err); 1742 return err; 1743 } 1744 1745 if (pd) { 1746 if (pd->uobject) { 1747 __u32 max_wqes = 1748 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz); 1749 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count); 1750 if (ucmd.rq_wqe_shift != qp->rq.wqe_shift || 1751 ucmd.rq_wqe_count != qp->rq.wqe_cnt) { 1752 mlx5_ib_dbg(dev, "invalid rq params\n"); 1753 return -EINVAL; 1754 } 1755 if (ucmd.sq_wqe_count > max_wqes) { 1756 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n", 1757 ucmd.sq_wqe_count, max_wqes); 1758 return -EINVAL; 1759 } 1760 if (init_attr->create_flags & 1761 mlx5_ib_create_qp_sqpn_qp1()) { 1762 mlx5_ib_dbg(dev, "user-space is not allowed to create UD QPs spoofing as QP1\n"); 1763 return -EINVAL; 1764 } 1765 err = create_user_qp(dev, pd, qp, udata, init_attr, &in, 1766 &resp, &inlen, base); 1767 if (err) 1768 mlx5_ib_dbg(dev, "err %d\n", err); 1769 } else { 1770 err = create_kernel_qp(dev, init_attr, qp, &in, &inlen, 1771 base); 1772 if (err) 1773 mlx5_ib_dbg(dev, "err %d\n", err); 1774 } 1775 1776 if (err) 1777 return err; 1778 } else { 1779 in = kvzalloc(inlen, GFP_KERNEL); 1780 if (!in) 1781 return -ENOMEM; 1782 1783 qp->create_type = MLX5_QP_EMPTY; 1784 } 1785 1786 if (is_sqp(init_attr->qp_type)) 1787 qp->port = init_attr->port_num; 1788 1789 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); 1790 1791 MLX5_SET(qpc, qpc, st, mlx5_st); 1792 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 1793 1794 if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR) 1795 MLX5_SET(qpc, qpc, pd, to_mpd(pd ? pd : devr->p0)->pdn); 1796 else 1797 MLX5_SET(qpc, qpc, latency_sensitive, 1); 1798 1799 1800 if (qp->wq_sig) 1801 MLX5_SET(qpc, qpc, wq_signature, 1); 1802 1803 if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK) 1804 MLX5_SET(qpc, qpc, block_lb_mc, 1); 1805 1806 if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL) 1807 MLX5_SET(qpc, qpc, cd_master, 1); 1808 if (qp->flags & MLX5_IB_QP_MANAGED_SEND) 1809 MLX5_SET(qpc, qpc, cd_slave_send, 1); 1810 if (qp->flags & MLX5_IB_QP_MANAGED_RECV) 1811 MLX5_SET(qpc, qpc, cd_slave_receive, 1); 1812 1813 if (qp->scat_cqe && is_connected(init_attr->qp_type)) { 1814 int rcqe_sz; 1815 int scqe_sz; 1816 1817 rcqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->recv_cq); 1818 scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq); 1819 1820 if (rcqe_sz == 128) 1821 MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE); 1822 else 1823 MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA32_CQE); 1824 1825 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) { 1826 if (scqe_sz == 128) 1827 MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA64_CQE); 1828 else 1829 MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA32_CQE); 1830 } 1831 } 1832 1833 if (qp->rq.wqe_cnt) { 1834 MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4); 1835 MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt)); 1836 } 1837 1838 MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, init_attr)); 1839 1840 if (qp->sq.wqe_cnt) { 1841 MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt)); 1842 } else { 1843 MLX5_SET(qpc, qpc, no_sq, 1); 1844 if (init_attr->srq && 1845 init_attr->srq->srq_type == IB_SRQT_TM) 1846 MLX5_SET(qpc, qpc, offload_type, 1847 MLX5_QPC_OFFLOAD_TYPE_RNDV); 1848 } 1849 1850 /* Set default resources */ 1851 switch (init_attr->qp_type) { 1852 case IB_QPT_XRC_TGT: 1853 MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn); 1854 MLX5_SET(qpc, qpc, cqn_snd, to_mcq(devr->c0)->mcq.cqn); 1855 MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn); 1856 MLX5_SET(qpc, qpc, xrcd, to_mxrcd(init_attr->xrcd)->xrcdn); 1857 break; 1858 case IB_QPT_XRC_INI: 1859 MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn); 1860 MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x1)->xrcdn); 1861 MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn); 1862 break; 1863 default: 1864 if (init_attr->srq) { 1865 MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x0)->xrcdn); 1866 MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(init_attr->srq)->msrq.srqn); 1867 } else { 1868 MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x1)->xrcdn); 1869 MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s1)->msrq.srqn); 1870 } 1871 } 1872 1873 if (init_attr->send_cq) 1874 MLX5_SET(qpc, qpc, cqn_snd, to_mcq(init_attr->send_cq)->mcq.cqn); 1875 1876 if (init_attr->recv_cq) 1877 MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(init_attr->recv_cq)->mcq.cqn); 1878 1879 MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma); 1880 1881 /* 0xffffff means we ask to work with cqe version 0 */ 1882 if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1) 1883 MLX5_SET(qpc, qpc, user_index, uidx); 1884 1885 /* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */ 1886 if (init_attr->qp_type == IB_QPT_UD && 1887 (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)) { 1888 MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, 1); 1889 qp->flags |= MLX5_IB_QP_LSO; 1890 } 1891 1892 if (init_attr->create_flags & IB_QP_CREATE_PCI_WRITE_END_PADDING) { 1893 if (!MLX5_CAP_GEN(dev->mdev, end_pad)) { 1894 mlx5_ib_dbg(dev, "scatter end padding is not supported\n"); 1895 err = -EOPNOTSUPP; 1896 goto err; 1897 } else if (init_attr->qp_type != IB_QPT_RAW_PACKET) { 1898 MLX5_SET(qpc, qpc, end_padding_mode, 1899 MLX5_WQ_END_PAD_MODE_ALIGN); 1900 } else { 1901 qp->flags |= MLX5_IB_QP_PCI_WRITE_END_PADDING; 1902 } 1903 } 1904 1905 if (inlen < 0) { 1906 err = -EINVAL; 1907 goto err; 1908 } 1909 1910 if (init_attr->qp_type == IB_QPT_RAW_PACKET || 1911 qp->flags & MLX5_IB_QP_UNDERLAY) { 1912 qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr; 1913 raw_packet_qp_copy_info(qp, &qp->raw_packet_qp); 1914 err = create_raw_packet_qp(dev, qp, in, inlen, pd); 1915 } else { 1916 err = mlx5_core_create_qp(dev->mdev, &base->mqp, in, inlen); 1917 } 1918 1919 if (err) { 1920 mlx5_ib_dbg(dev, "create qp failed\n"); 1921 goto err_create; 1922 } 1923 1924 kvfree(in); 1925 1926 base->container_mibqp = qp; 1927 base->mqp.event = mlx5_ib_qp_event; 1928 1929 get_cqs(init_attr->qp_type, init_attr->send_cq, init_attr->recv_cq, 1930 &send_cq, &recv_cq); 1931 spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); 1932 mlx5_ib_lock_cqs(send_cq, recv_cq); 1933 /* Maintain device to QPs access, needed for further handling via reset 1934 * flow 1935 */ 1936 list_add_tail(&qp->qps_list, &dev->qp_list); 1937 /* Maintain CQ to QPs access, needed for further handling via reset flow 1938 */ 1939 if (send_cq) 1940 list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp); 1941 if (recv_cq) 1942 list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp); 1943 mlx5_ib_unlock_cqs(send_cq, recv_cq); 1944 spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); 1945 1946 return 0; 1947 1948 err_create: 1949 if (qp->create_type == MLX5_QP_USER) 1950 destroy_qp_user(dev, pd, qp, base); 1951 else if (qp->create_type == MLX5_QP_KERNEL) 1952 destroy_qp_kernel(dev, qp); 1953 1954 err: 1955 kvfree(in); 1956 return err; 1957 } 1958 1959 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq) 1960 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) 1961 { 1962 if (send_cq) { 1963 if (recv_cq) { 1964 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { 1965 spin_lock(&send_cq->lock); 1966 spin_lock_nested(&recv_cq->lock, 1967 SINGLE_DEPTH_NESTING); 1968 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) { 1969 spin_lock(&send_cq->lock); 1970 __acquire(&recv_cq->lock); 1971 } else { 1972 spin_lock(&recv_cq->lock); 1973 spin_lock_nested(&send_cq->lock, 1974 SINGLE_DEPTH_NESTING); 1975 } 1976 } else { 1977 spin_lock(&send_cq->lock); 1978 __acquire(&recv_cq->lock); 1979 } 1980 } else if (recv_cq) { 1981 spin_lock(&recv_cq->lock); 1982 __acquire(&send_cq->lock); 1983 } else { 1984 __acquire(&send_cq->lock); 1985 __acquire(&recv_cq->lock); 1986 } 1987 } 1988 1989 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq) 1990 __releases(&send_cq->lock) __releases(&recv_cq->lock) 1991 { 1992 if (send_cq) { 1993 if (recv_cq) { 1994 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { 1995 spin_unlock(&recv_cq->lock); 1996 spin_unlock(&send_cq->lock); 1997 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) { 1998 __release(&recv_cq->lock); 1999 spin_unlock(&send_cq->lock); 2000 } else { 2001 spin_unlock(&send_cq->lock); 2002 spin_unlock(&recv_cq->lock); 2003 } 2004 } else { 2005 __release(&recv_cq->lock); 2006 spin_unlock(&send_cq->lock); 2007 } 2008 } else if (recv_cq) { 2009 __release(&send_cq->lock); 2010 spin_unlock(&recv_cq->lock); 2011 } else { 2012 __release(&recv_cq->lock); 2013 __release(&send_cq->lock); 2014 } 2015 } 2016 2017 static struct mlx5_ib_pd *get_pd(struct mlx5_ib_qp *qp) 2018 { 2019 return to_mpd(qp->ibqp.pd); 2020 } 2021 2022 static void get_cqs(enum ib_qp_type qp_type, 2023 struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq, 2024 struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq) 2025 { 2026 switch (qp_type) { 2027 case IB_QPT_XRC_TGT: 2028 *send_cq = NULL; 2029 *recv_cq = NULL; 2030 break; 2031 case MLX5_IB_QPT_REG_UMR: 2032 case IB_QPT_XRC_INI: 2033 *send_cq = ib_send_cq ? to_mcq(ib_send_cq) : NULL; 2034 *recv_cq = NULL; 2035 break; 2036 2037 case IB_QPT_SMI: 2038 case MLX5_IB_QPT_HW_GSI: 2039 case IB_QPT_RC: 2040 case IB_QPT_UC: 2041 case IB_QPT_UD: 2042 case IB_QPT_RAW_IPV6: 2043 case IB_QPT_RAW_ETHERTYPE: 2044 case IB_QPT_RAW_PACKET: 2045 *send_cq = ib_send_cq ? to_mcq(ib_send_cq) : NULL; 2046 *recv_cq = ib_recv_cq ? to_mcq(ib_recv_cq) : NULL; 2047 break; 2048 2049 case IB_QPT_MAX: 2050 default: 2051 *send_cq = NULL; 2052 *recv_cq = NULL; 2053 break; 2054 } 2055 } 2056 2057 static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 2058 const struct mlx5_modify_raw_qp_param *raw_qp_param, 2059 u8 lag_tx_affinity); 2060 2061 static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) 2062 { 2063 struct mlx5_ib_cq *send_cq, *recv_cq; 2064 struct mlx5_ib_qp_base *base; 2065 unsigned long flags; 2066 int err; 2067 2068 if (qp->ibqp.rwq_ind_tbl) { 2069 destroy_rss_raw_qp_tir(dev, qp); 2070 return; 2071 } 2072 2073 base = (qp->ibqp.qp_type == IB_QPT_RAW_PACKET || 2074 qp->flags & MLX5_IB_QP_UNDERLAY) ? 2075 &qp->raw_packet_qp.rq.base : 2076 &qp->trans_qp.base; 2077 2078 if (qp->state != IB_QPS_RESET) { 2079 if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET && 2080 !(qp->flags & MLX5_IB_QP_UNDERLAY)) { 2081 err = mlx5_core_qp_modify(dev->mdev, 2082 MLX5_CMD_OP_2RST_QP, 0, 2083 NULL, &base->mqp); 2084 } else { 2085 struct mlx5_modify_raw_qp_param raw_qp_param = { 2086 .operation = MLX5_CMD_OP_2RST_QP 2087 }; 2088 2089 err = modify_raw_packet_qp(dev, qp, &raw_qp_param, 0); 2090 } 2091 if (err) 2092 mlx5_ib_warn(dev, "mlx5_ib: modify QP 0x%06x to RESET failed\n", 2093 base->mqp.qpn); 2094 } 2095 2096 get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq, 2097 &send_cq, &recv_cq); 2098 2099 spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); 2100 mlx5_ib_lock_cqs(send_cq, recv_cq); 2101 /* del from lists under both locks above to protect reset flow paths */ 2102 list_del(&qp->qps_list); 2103 if (send_cq) 2104 list_del(&qp->cq_send_list); 2105 2106 if (recv_cq) 2107 list_del(&qp->cq_recv_list); 2108 2109 if (qp->create_type == MLX5_QP_KERNEL) { 2110 __mlx5_ib_cq_clean(recv_cq, base->mqp.qpn, 2111 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 2112 if (send_cq != recv_cq) 2113 __mlx5_ib_cq_clean(send_cq, base->mqp.qpn, 2114 NULL); 2115 } 2116 mlx5_ib_unlock_cqs(send_cq, recv_cq); 2117 spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); 2118 2119 if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET || 2120 qp->flags & MLX5_IB_QP_UNDERLAY) { 2121 destroy_raw_packet_qp(dev, qp); 2122 } else { 2123 err = mlx5_core_destroy_qp(dev->mdev, &base->mqp); 2124 if (err) 2125 mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", 2126 base->mqp.qpn); 2127 } 2128 2129 if (qp->create_type == MLX5_QP_KERNEL) 2130 destroy_qp_kernel(dev, qp); 2131 else if (qp->create_type == MLX5_QP_USER) 2132 destroy_qp_user(dev, &get_pd(qp)->ibpd, qp, base); 2133 } 2134 2135 static const char *ib_qp_type_str(enum ib_qp_type type) 2136 { 2137 switch (type) { 2138 case IB_QPT_SMI: 2139 return "IB_QPT_SMI"; 2140 case IB_QPT_GSI: 2141 return "IB_QPT_GSI"; 2142 case IB_QPT_RC: 2143 return "IB_QPT_RC"; 2144 case IB_QPT_UC: 2145 return "IB_QPT_UC"; 2146 case IB_QPT_UD: 2147 return "IB_QPT_UD"; 2148 case IB_QPT_RAW_IPV6: 2149 return "IB_QPT_RAW_IPV6"; 2150 case IB_QPT_RAW_ETHERTYPE: 2151 return "IB_QPT_RAW_ETHERTYPE"; 2152 case IB_QPT_XRC_INI: 2153 return "IB_QPT_XRC_INI"; 2154 case IB_QPT_XRC_TGT: 2155 return "IB_QPT_XRC_TGT"; 2156 case IB_QPT_RAW_PACKET: 2157 return "IB_QPT_RAW_PACKET"; 2158 case MLX5_IB_QPT_REG_UMR: 2159 return "MLX5_IB_QPT_REG_UMR"; 2160 case IB_QPT_DRIVER: 2161 return "IB_QPT_DRIVER"; 2162 case IB_QPT_MAX: 2163 default: 2164 return "Invalid QP type"; 2165 } 2166 } 2167 2168 static struct ib_qp *mlx5_ib_create_dct(struct ib_pd *pd, 2169 struct ib_qp_init_attr *attr, 2170 struct mlx5_ib_create_qp *ucmd) 2171 { 2172 struct mlx5_ib_qp *qp; 2173 int err = 0; 2174 u32 uidx = MLX5_IB_DEFAULT_UIDX; 2175 void *dctc; 2176 2177 if (!attr->srq || !attr->recv_cq) 2178 return ERR_PTR(-EINVAL); 2179 2180 err = get_qp_user_index(to_mucontext(pd->uobject->context), 2181 ucmd, sizeof(*ucmd), &uidx); 2182 if (err) 2183 return ERR_PTR(err); 2184 2185 qp = kzalloc(sizeof(*qp), GFP_KERNEL); 2186 if (!qp) 2187 return ERR_PTR(-ENOMEM); 2188 2189 qp->dct.in = kzalloc(MLX5_ST_SZ_BYTES(create_dct_in), GFP_KERNEL); 2190 if (!qp->dct.in) { 2191 err = -ENOMEM; 2192 goto err_free; 2193 } 2194 2195 dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry); 2196 qp->qp_sub_type = MLX5_IB_QPT_DCT; 2197 MLX5_SET(dctc, dctc, pd, to_mpd(pd)->pdn); 2198 MLX5_SET(dctc, dctc, srqn_xrqn, to_msrq(attr->srq)->msrq.srqn); 2199 MLX5_SET(dctc, dctc, cqn, to_mcq(attr->recv_cq)->mcq.cqn); 2200 MLX5_SET64(dctc, dctc, dc_access_key, ucmd->access_key); 2201 MLX5_SET(dctc, dctc, user_index, uidx); 2202 2203 qp->state = IB_QPS_RESET; 2204 2205 return &qp->ibqp; 2206 err_free: 2207 kfree(qp); 2208 return ERR_PTR(err); 2209 } 2210 2211 static int set_mlx_qp_type(struct mlx5_ib_dev *dev, 2212 struct ib_qp_init_attr *init_attr, 2213 struct mlx5_ib_create_qp *ucmd, 2214 struct ib_udata *udata) 2215 { 2216 enum { MLX_QP_FLAGS = MLX5_QP_FLAG_TYPE_DCT | MLX5_QP_FLAG_TYPE_DCI }; 2217 int err; 2218 2219 if (!udata) 2220 return -EINVAL; 2221 2222 if (udata->inlen < sizeof(*ucmd)) { 2223 mlx5_ib_dbg(dev, "create_qp user command is smaller than expected\n"); 2224 return -EINVAL; 2225 } 2226 err = ib_copy_from_udata(ucmd, udata, sizeof(*ucmd)); 2227 if (err) 2228 return err; 2229 2230 if ((ucmd->flags & MLX_QP_FLAGS) == MLX5_QP_FLAG_TYPE_DCI) { 2231 init_attr->qp_type = MLX5_IB_QPT_DCI; 2232 } else { 2233 if ((ucmd->flags & MLX_QP_FLAGS) == MLX5_QP_FLAG_TYPE_DCT) { 2234 init_attr->qp_type = MLX5_IB_QPT_DCT; 2235 } else { 2236 mlx5_ib_dbg(dev, "Invalid QP flags\n"); 2237 return -EINVAL; 2238 } 2239 } 2240 2241 if (!MLX5_CAP_GEN(dev->mdev, dct)) { 2242 mlx5_ib_dbg(dev, "DC transport is not supported\n"); 2243 return -EOPNOTSUPP; 2244 } 2245 2246 return 0; 2247 } 2248 2249 struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, 2250 struct ib_qp_init_attr *verbs_init_attr, 2251 struct ib_udata *udata) 2252 { 2253 struct mlx5_ib_dev *dev; 2254 struct mlx5_ib_qp *qp; 2255 u16 xrcdn = 0; 2256 int err; 2257 struct ib_qp_init_attr mlx_init_attr; 2258 struct ib_qp_init_attr *init_attr = verbs_init_attr; 2259 2260 if (pd) { 2261 dev = to_mdev(pd->device); 2262 2263 if (init_attr->qp_type == IB_QPT_RAW_PACKET) { 2264 if (!pd->uobject) { 2265 mlx5_ib_dbg(dev, "Raw Packet QP is not supported for kernel consumers\n"); 2266 return ERR_PTR(-EINVAL); 2267 } else if (!to_mucontext(pd->uobject->context)->cqe_version) { 2268 mlx5_ib_dbg(dev, "Raw Packet QP is only supported for CQE version > 0\n"); 2269 return ERR_PTR(-EINVAL); 2270 } 2271 } 2272 } else { 2273 /* being cautious here */ 2274 if (init_attr->qp_type != IB_QPT_XRC_TGT && 2275 init_attr->qp_type != MLX5_IB_QPT_REG_UMR) { 2276 pr_warn("%s: no PD for transport %s\n", __func__, 2277 ib_qp_type_str(init_attr->qp_type)); 2278 return ERR_PTR(-EINVAL); 2279 } 2280 dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device); 2281 } 2282 2283 if (init_attr->qp_type == IB_QPT_DRIVER) { 2284 struct mlx5_ib_create_qp ucmd; 2285 2286 init_attr = &mlx_init_attr; 2287 memcpy(init_attr, verbs_init_attr, sizeof(*verbs_init_attr)); 2288 err = set_mlx_qp_type(dev, init_attr, &ucmd, udata); 2289 if (err) 2290 return ERR_PTR(err); 2291 2292 if (init_attr->qp_type == MLX5_IB_QPT_DCI) { 2293 if (init_attr->cap.max_recv_wr || 2294 init_attr->cap.max_recv_sge) { 2295 mlx5_ib_dbg(dev, "DCI QP requires zero size receive queue\n"); 2296 return ERR_PTR(-EINVAL); 2297 } 2298 } else { 2299 return mlx5_ib_create_dct(pd, init_attr, &ucmd); 2300 } 2301 } 2302 2303 switch (init_attr->qp_type) { 2304 case IB_QPT_XRC_TGT: 2305 case IB_QPT_XRC_INI: 2306 if (!MLX5_CAP_GEN(dev->mdev, xrc)) { 2307 mlx5_ib_dbg(dev, "XRC not supported\n"); 2308 return ERR_PTR(-ENOSYS); 2309 } 2310 init_attr->recv_cq = NULL; 2311 if (init_attr->qp_type == IB_QPT_XRC_TGT) { 2312 xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn; 2313 init_attr->send_cq = NULL; 2314 } 2315 2316 /* fall through */ 2317 case IB_QPT_RAW_PACKET: 2318 case IB_QPT_RC: 2319 case IB_QPT_UC: 2320 case IB_QPT_UD: 2321 case IB_QPT_SMI: 2322 case MLX5_IB_QPT_HW_GSI: 2323 case MLX5_IB_QPT_REG_UMR: 2324 case MLX5_IB_QPT_DCI: 2325 qp = kzalloc(sizeof(*qp), GFP_KERNEL); 2326 if (!qp) 2327 return ERR_PTR(-ENOMEM); 2328 2329 err = create_qp_common(dev, pd, init_attr, udata, qp); 2330 if (err) { 2331 mlx5_ib_dbg(dev, "create_qp_common failed\n"); 2332 kfree(qp); 2333 return ERR_PTR(err); 2334 } 2335 2336 if (is_qp0(init_attr->qp_type)) 2337 qp->ibqp.qp_num = 0; 2338 else if (is_qp1(init_attr->qp_type)) 2339 qp->ibqp.qp_num = 1; 2340 else 2341 qp->ibqp.qp_num = qp->trans_qp.base.mqp.qpn; 2342 2343 mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n", 2344 qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn, 2345 init_attr->recv_cq ? to_mcq(init_attr->recv_cq)->mcq.cqn : -1, 2346 init_attr->send_cq ? to_mcq(init_attr->send_cq)->mcq.cqn : -1); 2347 2348 qp->trans_qp.xrcdn = xrcdn; 2349 2350 break; 2351 2352 case IB_QPT_GSI: 2353 return mlx5_ib_gsi_create_qp(pd, init_attr); 2354 2355 case IB_QPT_RAW_IPV6: 2356 case IB_QPT_RAW_ETHERTYPE: 2357 case IB_QPT_MAX: 2358 default: 2359 mlx5_ib_dbg(dev, "unsupported qp type %d\n", 2360 init_attr->qp_type); 2361 /* Don't support raw QPs */ 2362 return ERR_PTR(-EINVAL); 2363 } 2364 2365 if (verbs_init_attr->qp_type == IB_QPT_DRIVER) 2366 qp->qp_sub_type = init_attr->qp_type; 2367 2368 return &qp->ibqp; 2369 } 2370 2371 static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp) 2372 { 2373 struct mlx5_ib_dev *dev = to_mdev(mqp->ibqp.device); 2374 2375 if (mqp->state == IB_QPS_RTR) { 2376 int err; 2377 2378 err = mlx5_core_destroy_dct(dev->mdev, &mqp->dct.mdct); 2379 if (err) { 2380 mlx5_ib_warn(dev, "failed to destroy DCT %d\n", err); 2381 return err; 2382 } 2383 } 2384 2385 kfree(mqp->dct.in); 2386 kfree(mqp); 2387 return 0; 2388 } 2389 2390 int mlx5_ib_destroy_qp(struct ib_qp *qp) 2391 { 2392 struct mlx5_ib_dev *dev = to_mdev(qp->device); 2393 struct mlx5_ib_qp *mqp = to_mqp(qp); 2394 2395 if (unlikely(qp->qp_type == IB_QPT_GSI)) 2396 return mlx5_ib_gsi_destroy_qp(qp); 2397 2398 if (mqp->qp_sub_type == MLX5_IB_QPT_DCT) 2399 return mlx5_ib_destroy_dct(mqp); 2400 2401 destroy_qp_common(dev, mqp); 2402 2403 kfree(mqp); 2404 2405 return 0; 2406 } 2407 2408 static __be32 to_mlx5_access_flags(struct mlx5_ib_qp *qp, const struct ib_qp_attr *attr, 2409 int attr_mask) 2410 { 2411 u32 hw_access_flags = 0; 2412 u8 dest_rd_atomic; 2413 u32 access_flags; 2414 2415 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 2416 dest_rd_atomic = attr->max_dest_rd_atomic; 2417 else 2418 dest_rd_atomic = qp->trans_qp.resp_depth; 2419 2420 if (attr_mask & IB_QP_ACCESS_FLAGS) 2421 access_flags = attr->qp_access_flags; 2422 else 2423 access_flags = qp->trans_qp.atomic_rd_en; 2424 2425 if (!dest_rd_atomic) 2426 access_flags &= IB_ACCESS_REMOTE_WRITE; 2427 2428 if (access_flags & IB_ACCESS_REMOTE_READ) 2429 hw_access_flags |= MLX5_QP_BIT_RRE; 2430 if (access_flags & IB_ACCESS_REMOTE_ATOMIC) 2431 hw_access_flags |= (MLX5_QP_BIT_RAE | MLX5_ATOMIC_MODE_CX); 2432 if (access_flags & IB_ACCESS_REMOTE_WRITE) 2433 hw_access_flags |= MLX5_QP_BIT_RWE; 2434 2435 return cpu_to_be32(hw_access_flags); 2436 } 2437 2438 enum { 2439 MLX5_PATH_FLAG_FL = 1 << 0, 2440 MLX5_PATH_FLAG_FREE_AR = 1 << 1, 2441 MLX5_PATH_FLAG_COUNTER = 1 << 2, 2442 }; 2443 2444 static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate) 2445 { 2446 if (rate == IB_RATE_PORT_CURRENT) 2447 return 0; 2448 2449 if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) 2450 return -EINVAL; 2451 2452 while (rate != IB_RATE_PORT_CURRENT && 2453 !(1 << (rate + MLX5_STAT_RATE_OFFSET) & 2454 MLX5_CAP_GEN(dev->mdev, stat_rate_support))) 2455 --rate; 2456 2457 return rate ? rate + MLX5_STAT_RATE_OFFSET : rate; 2458 } 2459 2460 static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev, 2461 struct mlx5_ib_sq *sq, u8 sl) 2462 { 2463 void *in; 2464 void *tisc; 2465 int inlen; 2466 int err; 2467 2468 inlen = MLX5_ST_SZ_BYTES(modify_tis_in); 2469 in = kvzalloc(inlen, GFP_KERNEL); 2470 if (!in) 2471 return -ENOMEM; 2472 2473 MLX5_SET(modify_tis_in, in, bitmask.prio, 1); 2474 2475 tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx); 2476 MLX5_SET(tisc, tisc, prio, ((sl & 0x7) << 1)); 2477 2478 err = mlx5_core_modify_tis(dev, sq->tisn, in, inlen); 2479 2480 kvfree(in); 2481 2482 return err; 2483 } 2484 2485 static int modify_raw_packet_tx_affinity(struct mlx5_core_dev *dev, 2486 struct mlx5_ib_sq *sq, u8 tx_affinity) 2487 { 2488 void *in; 2489 void *tisc; 2490 int inlen; 2491 int err; 2492 2493 inlen = MLX5_ST_SZ_BYTES(modify_tis_in); 2494 in = kvzalloc(inlen, GFP_KERNEL); 2495 if (!in) 2496 return -ENOMEM; 2497 2498 MLX5_SET(modify_tis_in, in, bitmask.lag_tx_port_affinity, 1); 2499 2500 tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx); 2501 MLX5_SET(tisc, tisc, lag_tx_port_affinity, tx_affinity); 2502 2503 err = mlx5_core_modify_tis(dev, sq->tisn, in, inlen); 2504 2505 kvfree(in); 2506 2507 return err; 2508 } 2509 2510 static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 2511 const struct rdma_ah_attr *ah, 2512 struct mlx5_qp_path *path, u8 port, int attr_mask, 2513 u32 path_flags, const struct ib_qp_attr *attr, 2514 bool alt) 2515 { 2516 const struct ib_global_route *grh = rdma_ah_read_grh(ah); 2517 int err; 2518 enum ib_gid_type gid_type; 2519 u8 ah_flags = rdma_ah_get_ah_flags(ah); 2520 u8 sl = rdma_ah_get_sl(ah); 2521 2522 if (attr_mask & IB_QP_PKEY_INDEX) 2523 path->pkey_index = cpu_to_be16(alt ? attr->alt_pkey_index : 2524 attr->pkey_index); 2525 2526 if (ah_flags & IB_AH_GRH) { 2527 if (grh->sgid_index >= 2528 dev->mdev->port_caps[port - 1].gid_table_len) { 2529 pr_err("sgid_index (%u) too large. max is %d\n", 2530 grh->sgid_index, 2531 dev->mdev->port_caps[port - 1].gid_table_len); 2532 return -EINVAL; 2533 } 2534 } 2535 2536 if (ah->type == RDMA_AH_ATTR_TYPE_ROCE) { 2537 if (!(ah_flags & IB_AH_GRH)) 2538 return -EINVAL; 2539 2540 memcpy(path->rmac, ah->roce.dmac, sizeof(ah->roce.dmac)); 2541 if (qp->ibqp.qp_type == IB_QPT_RC || 2542 qp->ibqp.qp_type == IB_QPT_UC || 2543 qp->ibqp.qp_type == IB_QPT_XRC_INI || 2544 qp->ibqp.qp_type == IB_QPT_XRC_TGT) 2545 path->udp_sport = 2546 mlx5_get_roce_udp_sport(dev, ah->grh.sgid_attr); 2547 path->dci_cfi_prio_sl = (sl & 0x7) << 4; 2548 gid_type = ah->grh.sgid_attr->gid_type; 2549 if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) 2550 path->ecn_dscp = (grh->traffic_class >> 2) & 0x3f; 2551 } else { 2552 path->fl_free_ar = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0; 2553 path->fl_free_ar |= 2554 (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x40 : 0; 2555 path->rlid = cpu_to_be16(rdma_ah_get_dlid(ah)); 2556 path->grh_mlid = rdma_ah_get_path_bits(ah) & 0x7f; 2557 if (ah_flags & IB_AH_GRH) 2558 path->grh_mlid |= 1 << 7; 2559 path->dci_cfi_prio_sl = sl & 0xf; 2560 } 2561 2562 if (ah_flags & IB_AH_GRH) { 2563 path->mgid_index = grh->sgid_index; 2564 path->hop_limit = grh->hop_limit; 2565 path->tclass_flowlabel = 2566 cpu_to_be32((grh->traffic_class << 20) | 2567 (grh->flow_label)); 2568 memcpy(path->rgid, grh->dgid.raw, 16); 2569 } 2570 2571 err = ib_rate_to_mlx5(dev, rdma_ah_get_static_rate(ah)); 2572 if (err < 0) 2573 return err; 2574 path->static_rate = err; 2575 path->port = port; 2576 2577 if (attr_mask & IB_QP_TIMEOUT) 2578 path->ackto_lt = (alt ? attr->alt_timeout : attr->timeout) << 3; 2579 2580 if ((qp->ibqp.qp_type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt) 2581 return modify_raw_packet_eth_prio(dev->mdev, 2582 &qp->raw_packet_qp.sq, 2583 sl & 0xf); 2584 2585 return 0; 2586 } 2587 2588 static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_QP_ST_MAX] = { 2589 [MLX5_QP_STATE_INIT] = { 2590 [MLX5_QP_STATE_INIT] = { 2591 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE | 2592 MLX5_QP_OPTPAR_RAE | 2593 MLX5_QP_OPTPAR_RWE | 2594 MLX5_QP_OPTPAR_PKEY_INDEX | 2595 MLX5_QP_OPTPAR_PRI_PORT, 2596 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE | 2597 MLX5_QP_OPTPAR_PKEY_INDEX | 2598 MLX5_QP_OPTPAR_PRI_PORT, 2599 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX | 2600 MLX5_QP_OPTPAR_Q_KEY | 2601 MLX5_QP_OPTPAR_PRI_PORT, 2602 }, 2603 [MLX5_QP_STATE_RTR] = { 2604 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | 2605 MLX5_QP_OPTPAR_RRE | 2606 MLX5_QP_OPTPAR_RAE | 2607 MLX5_QP_OPTPAR_RWE | 2608 MLX5_QP_OPTPAR_PKEY_INDEX, 2609 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | 2610 MLX5_QP_OPTPAR_RWE | 2611 MLX5_QP_OPTPAR_PKEY_INDEX, 2612 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX | 2613 MLX5_QP_OPTPAR_Q_KEY, 2614 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX | 2615 MLX5_QP_OPTPAR_Q_KEY, 2616 [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | 2617 MLX5_QP_OPTPAR_RRE | 2618 MLX5_QP_OPTPAR_RAE | 2619 MLX5_QP_OPTPAR_RWE | 2620 MLX5_QP_OPTPAR_PKEY_INDEX, 2621 }, 2622 }, 2623 [MLX5_QP_STATE_RTR] = { 2624 [MLX5_QP_STATE_RTS] = { 2625 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | 2626 MLX5_QP_OPTPAR_RRE | 2627 MLX5_QP_OPTPAR_RAE | 2628 MLX5_QP_OPTPAR_RWE | 2629 MLX5_QP_OPTPAR_PM_STATE | 2630 MLX5_QP_OPTPAR_RNR_TIMEOUT, 2631 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | 2632 MLX5_QP_OPTPAR_RWE | 2633 MLX5_QP_OPTPAR_PM_STATE, 2634 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY, 2635 }, 2636 }, 2637 [MLX5_QP_STATE_RTS] = { 2638 [MLX5_QP_STATE_RTS] = { 2639 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE | 2640 MLX5_QP_OPTPAR_RAE | 2641 MLX5_QP_OPTPAR_RWE | 2642 MLX5_QP_OPTPAR_RNR_TIMEOUT | 2643 MLX5_QP_OPTPAR_PM_STATE | 2644 MLX5_QP_OPTPAR_ALT_ADDR_PATH, 2645 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE | 2646 MLX5_QP_OPTPAR_PM_STATE | 2647 MLX5_QP_OPTPAR_ALT_ADDR_PATH, 2648 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY | 2649 MLX5_QP_OPTPAR_SRQN | 2650 MLX5_QP_OPTPAR_CQN_RCV, 2651 }, 2652 }, 2653 [MLX5_QP_STATE_SQER] = { 2654 [MLX5_QP_STATE_RTS] = { 2655 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY, 2656 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY, 2657 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE, 2658 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT | 2659 MLX5_QP_OPTPAR_RWE | 2660 MLX5_QP_OPTPAR_RAE | 2661 MLX5_QP_OPTPAR_RRE, 2662 }, 2663 }, 2664 }; 2665 2666 static int ib_nr_to_mlx5_nr(int ib_mask) 2667 { 2668 switch (ib_mask) { 2669 case IB_QP_STATE: 2670 return 0; 2671 case IB_QP_CUR_STATE: 2672 return 0; 2673 case IB_QP_EN_SQD_ASYNC_NOTIFY: 2674 return 0; 2675 case IB_QP_ACCESS_FLAGS: 2676 return MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_RRE | 2677 MLX5_QP_OPTPAR_RAE; 2678 case IB_QP_PKEY_INDEX: 2679 return MLX5_QP_OPTPAR_PKEY_INDEX; 2680 case IB_QP_PORT: 2681 return MLX5_QP_OPTPAR_PRI_PORT; 2682 case IB_QP_QKEY: 2683 return MLX5_QP_OPTPAR_Q_KEY; 2684 case IB_QP_AV: 2685 return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH | 2686 MLX5_QP_OPTPAR_PRI_PORT; 2687 case IB_QP_PATH_MTU: 2688 return 0; 2689 case IB_QP_TIMEOUT: 2690 return MLX5_QP_OPTPAR_ACK_TIMEOUT; 2691 case IB_QP_RETRY_CNT: 2692 return MLX5_QP_OPTPAR_RETRY_COUNT; 2693 case IB_QP_RNR_RETRY: 2694 return MLX5_QP_OPTPAR_RNR_RETRY; 2695 case IB_QP_RQ_PSN: 2696 return 0; 2697 case IB_QP_MAX_QP_RD_ATOMIC: 2698 return MLX5_QP_OPTPAR_SRA_MAX; 2699 case IB_QP_ALT_PATH: 2700 return MLX5_QP_OPTPAR_ALT_ADDR_PATH; 2701 case IB_QP_MIN_RNR_TIMER: 2702 return MLX5_QP_OPTPAR_RNR_TIMEOUT; 2703 case IB_QP_SQ_PSN: 2704 return 0; 2705 case IB_QP_MAX_DEST_RD_ATOMIC: 2706 return MLX5_QP_OPTPAR_RRA_MAX | MLX5_QP_OPTPAR_RWE | 2707 MLX5_QP_OPTPAR_RRE | MLX5_QP_OPTPAR_RAE; 2708 case IB_QP_PATH_MIG_STATE: 2709 return MLX5_QP_OPTPAR_PM_STATE; 2710 case IB_QP_CAP: 2711 return 0; 2712 case IB_QP_DEST_QPN: 2713 return 0; 2714 } 2715 return 0; 2716 } 2717 2718 static int ib_mask_to_mlx5_opt(int ib_mask) 2719 { 2720 int result = 0; 2721 int i; 2722 2723 for (i = 0; i < 8 * sizeof(int); i++) { 2724 if ((1 << i) & ib_mask) 2725 result |= ib_nr_to_mlx5_nr(1 << i); 2726 } 2727 2728 return result; 2729 } 2730 2731 static int modify_raw_packet_qp_rq(struct mlx5_ib_dev *dev, 2732 struct mlx5_ib_rq *rq, int new_state, 2733 const struct mlx5_modify_raw_qp_param *raw_qp_param) 2734 { 2735 void *in; 2736 void *rqc; 2737 int inlen; 2738 int err; 2739 2740 inlen = MLX5_ST_SZ_BYTES(modify_rq_in); 2741 in = kvzalloc(inlen, GFP_KERNEL); 2742 if (!in) 2743 return -ENOMEM; 2744 2745 MLX5_SET(modify_rq_in, in, rq_state, rq->state); 2746 2747 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); 2748 MLX5_SET(rqc, rqc, state, new_state); 2749 2750 if (raw_qp_param->set_mask & MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID) { 2751 if (MLX5_CAP_GEN(dev->mdev, modify_rq_counter_set_id)) { 2752 MLX5_SET64(modify_rq_in, in, modify_bitmask, 2753 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID); 2754 MLX5_SET(rqc, rqc, counter_set_id, raw_qp_param->rq_q_ctr_id); 2755 } else 2756 pr_info_once("%s: RAW PACKET QP counters are not supported on current FW\n", 2757 dev->ib_dev.name); 2758 } 2759 2760 err = mlx5_core_modify_rq(dev->mdev, rq->base.mqp.qpn, in, inlen); 2761 if (err) 2762 goto out; 2763 2764 rq->state = new_state; 2765 2766 out: 2767 kvfree(in); 2768 return err; 2769 } 2770 2771 static int modify_raw_packet_qp_sq(struct mlx5_core_dev *dev, 2772 struct mlx5_ib_sq *sq, 2773 int new_state, 2774 const struct mlx5_modify_raw_qp_param *raw_qp_param) 2775 { 2776 struct mlx5_ib_qp *ibqp = sq->base.container_mibqp; 2777 struct mlx5_rate_limit old_rl = ibqp->rl; 2778 struct mlx5_rate_limit new_rl = old_rl; 2779 bool new_rate_added = false; 2780 u16 rl_index = 0; 2781 void *in; 2782 void *sqc; 2783 int inlen; 2784 int err; 2785 2786 inlen = MLX5_ST_SZ_BYTES(modify_sq_in); 2787 in = kvzalloc(inlen, GFP_KERNEL); 2788 if (!in) 2789 return -ENOMEM; 2790 2791 MLX5_SET(modify_sq_in, in, sq_state, sq->state); 2792 2793 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); 2794 MLX5_SET(sqc, sqc, state, new_state); 2795 2796 if (raw_qp_param->set_mask & MLX5_RAW_QP_RATE_LIMIT) { 2797 if (new_state != MLX5_SQC_STATE_RDY) 2798 pr_warn("%s: Rate limit can only be changed when SQ is moving to RDY\n", 2799 __func__); 2800 else 2801 new_rl = raw_qp_param->rl; 2802 } 2803 2804 if (!mlx5_rl_are_equal(&old_rl, &new_rl)) { 2805 if (new_rl.rate) { 2806 err = mlx5_rl_add_rate(dev, &rl_index, &new_rl); 2807 if (err) { 2808 pr_err("Failed configuring rate limit(err %d): \ 2809 rate %u, max_burst_sz %u, typical_pkt_sz %u\n", 2810 err, new_rl.rate, new_rl.max_burst_sz, 2811 new_rl.typical_pkt_sz); 2812 2813 goto out; 2814 } 2815 new_rate_added = true; 2816 } 2817 2818 MLX5_SET64(modify_sq_in, in, modify_bitmask, 1); 2819 /* index 0 means no limit */ 2820 MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index); 2821 } 2822 2823 err = mlx5_core_modify_sq(dev, sq->base.mqp.qpn, in, inlen); 2824 if (err) { 2825 /* Remove new rate from table if failed */ 2826 if (new_rate_added) 2827 mlx5_rl_remove_rate(dev, &new_rl); 2828 goto out; 2829 } 2830 2831 /* Only remove the old rate after new rate was set */ 2832 if ((old_rl.rate && 2833 !mlx5_rl_are_equal(&old_rl, &new_rl)) || 2834 (new_state != MLX5_SQC_STATE_RDY)) 2835 mlx5_rl_remove_rate(dev, &old_rl); 2836 2837 ibqp->rl = new_rl; 2838 sq->state = new_state; 2839 2840 out: 2841 kvfree(in); 2842 return err; 2843 } 2844 2845 static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 2846 const struct mlx5_modify_raw_qp_param *raw_qp_param, 2847 u8 tx_affinity) 2848 { 2849 struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; 2850 struct mlx5_ib_rq *rq = &raw_packet_qp->rq; 2851 struct mlx5_ib_sq *sq = &raw_packet_qp->sq; 2852 int modify_rq = !!qp->rq.wqe_cnt; 2853 int modify_sq = !!qp->sq.wqe_cnt; 2854 int rq_state; 2855 int sq_state; 2856 int err; 2857 2858 switch (raw_qp_param->operation) { 2859 case MLX5_CMD_OP_RST2INIT_QP: 2860 rq_state = MLX5_RQC_STATE_RDY; 2861 sq_state = MLX5_SQC_STATE_RDY; 2862 break; 2863 case MLX5_CMD_OP_2ERR_QP: 2864 rq_state = MLX5_RQC_STATE_ERR; 2865 sq_state = MLX5_SQC_STATE_ERR; 2866 break; 2867 case MLX5_CMD_OP_2RST_QP: 2868 rq_state = MLX5_RQC_STATE_RST; 2869 sq_state = MLX5_SQC_STATE_RST; 2870 break; 2871 case MLX5_CMD_OP_RTR2RTS_QP: 2872 case MLX5_CMD_OP_RTS2RTS_QP: 2873 if (raw_qp_param->set_mask == 2874 MLX5_RAW_QP_RATE_LIMIT) { 2875 modify_rq = 0; 2876 sq_state = sq->state; 2877 } else { 2878 return raw_qp_param->set_mask ? -EINVAL : 0; 2879 } 2880 break; 2881 case MLX5_CMD_OP_INIT2INIT_QP: 2882 case MLX5_CMD_OP_INIT2RTR_QP: 2883 if (raw_qp_param->set_mask) 2884 return -EINVAL; 2885 else 2886 return 0; 2887 default: 2888 WARN_ON(1); 2889 return -EINVAL; 2890 } 2891 2892 if (modify_rq) { 2893 err = modify_raw_packet_qp_rq(dev, rq, rq_state, raw_qp_param); 2894 if (err) 2895 return err; 2896 } 2897 2898 if (modify_sq) { 2899 if (tx_affinity) { 2900 err = modify_raw_packet_tx_affinity(dev->mdev, sq, 2901 tx_affinity); 2902 if (err) 2903 return err; 2904 } 2905 2906 return modify_raw_packet_qp_sq(dev->mdev, sq, sq_state, raw_qp_param); 2907 } 2908 2909 return 0; 2910 } 2911 2912 static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, 2913 const struct ib_qp_attr *attr, int attr_mask, 2914 enum ib_qp_state cur_state, enum ib_qp_state new_state, 2915 const struct mlx5_ib_modify_qp *ucmd) 2916 { 2917 static const u16 optab[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE] = { 2918 [MLX5_QP_STATE_RST] = { 2919 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 2920 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 2921 [MLX5_QP_STATE_INIT] = MLX5_CMD_OP_RST2INIT_QP, 2922 }, 2923 [MLX5_QP_STATE_INIT] = { 2924 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 2925 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 2926 [MLX5_QP_STATE_INIT] = MLX5_CMD_OP_INIT2INIT_QP, 2927 [MLX5_QP_STATE_RTR] = MLX5_CMD_OP_INIT2RTR_QP, 2928 }, 2929 [MLX5_QP_STATE_RTR] = { 2930 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 2931 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 2932 [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTR2RTS_QP, 2933 }, 2934 [MLX5_QP_STATE_RTS] = { 2935 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 2936 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 2937 [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTS2RTS_QP, 2938 }, 2939 [MLX5_QP_STATE_SQD] = { 2940 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 2941 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 2942 }, 2943 [MLX5_QP_STATE_SQER] = { 2944 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 2945 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 2946 [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_SQERR2RTS_QP, 2947 }, 2948 [MLX5_QP_STATE_ERR] = { 2949 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, 2950 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, 2951 } 2952 }; 2953 2954 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 2955 struct mlx5_ib_qp *qp = to_mqp(ibqp); 2956 struct mlx5_ib_qp_base *base = &qp->trans_qp.base; 2957 struct mlx5_ib_cq *send_cq, *recv_cq; 2958 struct mlx5_qp_context *context; 2959 struct mlx5_ib_pd *pd; 2960 struct mlx5_ib_port *mibport = NULL; 2961 enum mlx5_qp_state mlx5_cur, mlx5_new; 2962 enum mlx5_qp_optpar optpar; 2963 int mlx5_st; 2964 int err; 2965 u16 op; 2966 u8 tx_affinity = 0; 2967 2968 mlx5_st = to_mlx5_st(ibqp->qp_type == IB_QPT_DRIVER ? 2969 qp->qp_sub_type : ibqp->qp_type); 2970 if (mlx5_st < 0) 2971 return -EINVAL; 2972 2973 context = kzalloc(sizeof(*context), GFP_KERNEL); 2974 if (!context) 2975 return -ENOMEM; 2976 2977 context->flags = cpu_to_be32(mlx5_st << 16); 2978 2979 if (!(attr_mask & IB_QP_PATH_MIG_STATE)) { 2980 context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11); 2981 } else { 2982 switch (attr->path_mig_state) { 2983 case IB_MIG_MIGRATED: 2984 context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11); 2985 break; 2986 case IB_MIG_REARM: 2987 context->flags |= cpu_to_be32(MLX5_QP_PM_REARM << 11); 2988 break; 2989 case IB_MIG_ARMED: 2990 context->flags |= cpu_to_be32(MLX5_QP_PM_ARMED << 11); 2991 break; 2992 } 2993 } 2994 2995 if ((cur_state == IB_QPS_RESET) && (new_state == IB_QPS_INIT)) { 2996 if ((ibqp->qp_type == IB_QPT_RC) || 2997 (ibqp->qp_type == IB_QPT_UD && 2998 !(qp->flags & MLX5_IB_QP_SQPN_QP1)) || 2999 (ibqp->qp_type == IB_QPT_UC) || 3000 (ibqp->qp_type == IB_QPT_RAW_PACKET) || 3001 (ibqp->qp_type == IB_QPT_XRC_INI) || 3002 (ibqp->qp_type == IB_QPT_XRC_TGT)) { 3003 if (mlx5_lag_is_active(dev->mdev)) { 3004 u8 p = mlx5_core_native_port_num(dev->mdev); 3005 tx_affinity = (unsigned int)atomic_add_return(1, 3006 &dev->roce[p].next_port) % 3007 MLX5_MAX_PORTS + 1; 3008 context->flags |= cpu_to_be32(tx_affinity << 24); 3009 } 3010 } 3011 } 3012 3013 if (is_sqp(ibqp->qp_type)) { 3014 context->mtu_msgmax = (IB_MTU_256 << 5) | 8; 3015 } else if ((ibqp->qp_type == IB_QPT_UD && 3016 !(qp->flags & MLX5_IB_QP_UNDERLAY)) || 3017 ibqp->qp_type == MLX5_IB_QPT_REG_UMR) { 3018 context->mtu_msgmax = (IB_MTU_4096 << 5) | 12; 3019 } else if (attr_mask & IB_QP_PATH_MTU) { 3020 if (attr->path_mtu < IB_MTU_256 || 3021 attr->path_mtu > IB_MTU_4096) { 3022 mlx5_ib_warn(dev, "invalid mtu %d\n", attr->path_mtu); 3023 err = -EINVAL; 3024 goto out; 3025 } 3026 context->mtu_msgmax = (attr->path_mtu << 5) | 3027 (u8)MLX5_CAP_GEN(dev->mdev, log_max_msg); 3028 } 3029 3030 if (attr_mask & IB_QP_DEST_QPN) 3031 context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num); 3032 3033 if (attr_mask & IB_QP_PKEY_INDEX) 3034 context->pri_path.pkey_index = cpu_to_be16(attr->pkey_index); 3035 3036 /* todo implement counter_index functionality */ 3037 3038 if (is_sqp(ibqp->qp_type)) 3039 context->pri_path.port = qp->port; 3040 3041 if (attr_mask & IB_QP_PORT) 3042 context->pri_path.port = attr->port_num; 3043 3044 if (attr_mask & IB_QP_AV) { 3045 err = mlx5_set_path(dev, qp, &attr->ah_attr, &context->pri_path, 3046 attr_mask & IB_QP_PORT ? attr->port_num : qp->port, 3047 attr_mask, 0, attr, false); 3048 if (err) 3049 goto out; 3050 } 3051 3052 if (attr_mask & IB_QP_TIMEOUT) 3053 context->pri_path.ackto_lt |= attr->timeout << 3; 3054 3055 if (attr_mask & IB_QP_ALT_PATH) { 3056 err = mlx5_set_path(dev, qp, &attr->alt_ah_attr, 3057 &context->alt_path, 3058 attr->alt_port_num, 3059 attr_mask | IB_QP_PKEY_INDEX | IB_QP_TIMEOUT, 3060 0, attr, true); 3061 if (err) 3062 goto out; 3063 } 3064 3065 pd = get_pd(qp); 3066 get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq, 3067 &send_cq, &recv_cq); 3068 3069 context->flags_pd = cpu_to_be32(pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn); 3070 context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0; 3071 context->cqn_recv = recv_cq ? cpu_to_be32(recv_cq->mcq.cqn) : 0; 3072 context->params1 = cpu_to_be32(MLX5_IB_ACK_REQ_FREQ << 28); 3073 3074 if (attr_mask & IB_QP_RNR_RETRY) 3075 context->params1 |= cpu_to_be32(attr->rnr_retry << 13); 3076 3077 if (attr_mask & IB_QP_RETRY_CNT) 3078 context->params1 |= cpu_to_be32(attr->retry_cnt << 16); 3079 3080 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { 3081 if (attr->max_rd_atomic) 3082 context->params1 |= 3083 cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21); 3084 } 3085 3086 if (attr_mask & IB_QP_SQ_PSN) 3087 context->next_send_psn = cpu_to_be32(attr->sq_psn); 3088 3089 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { 3090 if (attr->max_dest_rd_atomic) 3091 context->params2 |= 3092 cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21); 3093 } 3094 3095 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) 3096 context->params2 |= to_mlx5_access_flags(qp, attr, attr_mask); 3097 3098 if (attr_mask & IB_QP_MIN_RNR_TIMER) 3099 context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24); 3100 3101 if (attr_mask & IB_QP_RQ_PSN) 3102 context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn); 3103 3104 if (attr_mask & IB_QP_QKEY) 3105 context->qkey = cpu_to_be32(attr->qkey); 3106 3107 if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) 3108 context->db_rec_addr = cpu_to_be64(qp->db.dma); 3109 3110 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { 3111 u8 port_num = (attr_mask & IB_QP_PORT ? attr->port_num : 3112 qp->port) - 1; 3113 3114 /* Underlay port should be used - index 0 function per port */ 3115 if (qp->flags & MLX5_IB_QP_UNDERLAY) 3116 port_num = 0; 3117 3118 mibport = &dev->port[port_num]; 3119 context->qp_counter_set_usr_page |= 3120 cpu_to_be32((u32)(mibport->cnts.set_id) << 24); 3121 } 3122 3123 if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) 3124 context->sq_crq_size |= cpu_to_be16(1 << 4); 3125 3126 if (qp->flags & MLX5_IB_QP_SQPN_QP1) 3127 context->deth_sqpn = cpu_to_be32(1); 3128 3129 mlx5_cur = to_mlx5_state(cur_state); 3130 mlx5_new = to_mlx5_state(new_state); 3131 3132 if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE || 3133 !optab[mlx5_cur][mlx5_new]) { 3134 err = -EINVAL; 3135 goto out; 3136 } 3137 3138 op = optab[mlx5_cur][mlx5_new]; 3139 optpar = ib_mask_to_mlx5_opt(attr_mask); 3140 optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st]; 3141 3142 if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET || 3143 qp->flags & MLX5_IB_QP_UNDERLAY) { 3144 struct mlx5_modify_raw_qp_param raw_qp_param = {}; 3145 3146 raw_qp_param.operation = op; 3147 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { 3148 raw_qp_param.rq_q_ctr_id = mibport->cnts.set_id; 3149 raw_qp_param.set_mask |= MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID; 3150 } 3151 3152 if (attr_mask & IB_QP_RATE_LIMIT) { 3153 raw_qp_param.rl.rate = attr->rate_limit; 3154 3155 if (ucmd->burst_info.max_burst_sz) { 3156 if (attr->rate_limit && 3157 MLX5_CAP_QOS(dev->mdev, packet_pacing_burst_bound)) { 3158 raw_qp_param.rl.max_burst_sz = 3159 ucmd->burst_info.max_burst_sz; 3160 } else { 3161 err = -EINVAL; 3162 goto out; 3163 } 3164 } 3165 3166 if (ucmd->burst_info.typical_pkt_sz) { 3167 if (attr->rate_limit && 3168 MLX5_CAP_QOS(dev->mdev, packet_pacing_typical_size)) { 3169 raw_qp_param.rl.typical_pkt_sz = 3170 ucmd->burst_info.typical_pkt_sz; 3171 } else { 3172 err = -EINVAL; 3173 goto out; 3174 } 3175 } 3176 3177 raw_qp_param.set_mask |= MLX5_RAW_QP_RATE_LIMIT; 3178 } 3179 3180 err = modify_raw_packet_qp(dev, qp, &raw_qp_param, tx_affinity); 3181 } else { 3182 err = mlx5_core_qp_modify(dev->mdev, op, optpar, context, 3183 &base->mqp); 3184 } 3185 3186 if (err) 3187 goto out; 3188 3189 qp->state = new_state; 3190 3191 if (attr_mask & IB_QP_ACCESS_FLAGS) 3192 qp->trans_qp.atomic_rd_en = attr->qp_access_flags; 3193 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 3194 qp->trans_qp.resp_depth = attr->max_dest_rd_atomic; 3195 if (attr_mask & IB_QP_PORT) 3196 qp->port = attr->port_num; 3197 if (attr_mask & IB_QP_ALT_PATH) 3198 qp->trans_qp.alt_port = attr->alt_port_num; 3199 3200 /* 3201 * If we moved a kernel QP to RESET, clean up all old CQ 3202 * entries and reinitialize the QP. 3203 */ 3204 if (new_state == IB_QPS_RESET && 3205 !ibqp->uobject && ibqp->qp_type != IB_QPT_XRC_TGT) { 3206 mlx5_ib_cq_clean(recv_cq, base->mqp.qpn, 3207 ibqp->srq ? to_msrq(ibqp->srq) : NULL); 3208 if (send_cq != recv_cq) 3209 mlx5_ib_cq_clean(send_cq, base->mqp.qpn, NULL); 3210 3211 qp->rq.head = 0; 3212 qp->rq.tail = 0; 3213 qp->sq.head = 0; 3214 qp->sq.tail = 0; 3215 qp->sq.cur_post = 0; 3216 qp->sq.last_poll = 0; 3217 qp->db.db[MLX5_RCV_DBR] = 0; 3218 qp->db.db[MLX5_SND_DBR] = 0; 3219 } 3220 3221 out: 3222 kfree(context); 3223 return err; 3224 } 3225 3226 static inline bool is_valid_mask(int mask, int req, int opt) 3227 { 3228 if ((mask & req) != req) 3229 return false; 3230 3231 if (mask & ~(req | opt)) 3232 return false; 3233 3234 return true; 3235 } 3236 3237 /* check valid transition for driver QP types 3238 * for now the only QP type that this function supports is DCI 3239 */ 3240 static bool modify_dci_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state new_state, 3241 enum ib_qp_attr_mask attr_mask) 3242 { 3243 int req = IB_QP_STATE; 3244 int opt = 0; 3245 3246 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { 3247 req |= IB_QP_PKEY_INDEX | IB_QP_PORT; 3248 return is_valid_mask(attr_mask, req, opt); 3249 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) { 3250 opt = IB_QP_PKEY_INDEX | IB_QP_PORT; 3251 return is_valid_mask(attr_mask, req, opt); 3252 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { 3253 req |= IB_QP_PATH_MTU; 3254 opt = IB_QP_PKEY_INDEX; 3255 return is_valid_mask(attr_mask, req, opt); 3256 } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) { 3257 req |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT | IB_QP_RNR_RETRY | 3258 IB_QP_MAX_QP_RD_ATOMIC | IB_QP_SQ_PSN; 3259 opt = IB_QP_MIN_RNR_TIMER; 3260 return is_valid_mask(attr_mask, req, opt); 3261 } else if (cur_state == IB_QPS_RTS && new_state == IB_QPS_RTS) { 3262 opt = IB_QP_MIN_RNR_TIMER; 3263 return is_valid_mask(attr_mask, req, opt); 3264 } else if (cur_state != IB_QPS_RESET && new_state == IB_QPS_ERR) { 3265 return is_valid_mask(attr_mask, req, opt); 3266 } 3267 return false; 3268 } 3269 3270 /* mlx5_ib_modify_dct: modify a DCT QP 3271 * valid transitions are: 3272 * RESET to INIT: must set access_flags, pkey_index and port 3273 * INIT to RTR : must set min_rnr_timer, tclass, flow_label, 3274 * mtu, gid_index and hop_limit 3275 * Other transitions and attributes are illegal 3276 */ 3277 static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr, 3278 int attr_mask, struct ib_udata *udata) 3279 { 3280 struct mlx5_ib_qp *qp = to_mqp(ibqp); 3281 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 3282 enum ib_qp_state cur_state, new_state; 3283 int err = 0; 3284 int required = IB_QP_STATE; 3285 void *dctc; 3286 3287 if (!(attr_mask & IB_QP_STATE)) 3288 return -EINVAL; 3289 3290 cur_state = qp->state; 3291 new_state = attr->qp_state; 3292 3293 dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry); 3294 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { 3295 required |= IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT; 3296 if (!is_valid_mask(attr_mask, required, 0)) 3297 return -EINVAL; 3298 3299 if (attr->port_num == 0 || 3300 attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports)) { 3301 mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n", 3302 attr->port_num, dev->num_ports); 3303 return -EINVAL; 3304 } 3305 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ) 3306 MLX5_SET(dctc, dctc, rre, 1); 3307 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE) 3308 MLX5_SET(dctc, dctc, rwe, 1); 3309 if (attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) { 3310 if (!mlx5_ib_dc_atomic_is_supported(dev)) 3311 return -EOPNOTSUPP; 3312 MLX5_SET(dctc, dctc, rae, 1); 3313 MLX5_SET(dctc, dctc, atomic_mode, MLX5_ATOMIC_MODE_DCT_CX); 3314 } 3315 MLX5_SET(dctc, dctc, pkey_index, attr->pkey_index); 3316 MLX5_SET(dctc, dctc, port, attr->port_num); 3317 MLX5_SET(dctc, dctc, counter_set_id, dev->port[attr->port_num - 1].cnts.set_id); 3318 3319 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { 3320 struct mlx5_ib_modify_qp_resp resp = {}; 3321 u32 min_resp_len = offsetof(typeof(resp), dctn) + 3322 sizeof(resp.dctn); 3323 3324 if (udata->outlen < min_resp_len) 3325 return -EINVAL; 3326 resp.response_length = min_resp_len; 3327 3328 required |= IB_QP_MIN_RNR_TIMER | IB_QP_AV | IB_QP_PATH_MTU; 3329 if (!is_valid_mask(attr_mask, required, 0)) 3330 return -EINVAL; 3331 MLX5_SET(dctc, dctc, min_rnr_nak, attr->min_rnr_timer); 3332 MLX5_SET(dctc, dctc, tclass, attr->ah_attr.grh.traffic_class); 3333 MLX5_SET(dctc, dctc, flow_label, attr->ah_attr.grh.flow_label); 3334 MLX5_SET(dctc, dctc, mtu, attr->path_mtu); 3335 MLX5_SET(dctc, dctc, my_addr_index, attr->ah_attr.grh.sgid_index); 3336 MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit); 3337 3338 err = mlx5_core_create_dct(dev->mdev, &qp->dct.mdct, qp->dct.in, 3339 MLX5_ST_SZ_BYTES(create_dct_in)); 3340 if (err) 3341 return err; 3342 resp.dctn = qp->dct.mdct.mqp.qpn; 3343 err = ib_copy_to_udata(udata, &resp, resp.response_length); 3344 if (err) { 3345 mlx5_core_destroy_dct(dev->mdev, &qp->dct.mdct); 3346 return err; 3347 } 3348 } else { 3349 mlx5_ib_warn(dev, "Modify DCT: Invalid transition from %d to %d\n", cur_state, new_state); 3350 return -EINVAL; 3351 } 3352 if (err) 3353 qp->state = IB_QPS_ERR; 3354 else 3355 qp->state = new_state; 3356 return err; 3357 } 3358 3359 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 3360 int attr_mask, struct ib_udata *udata) 3361 { 3362 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 3363 struct mlx5_ib_qp *qp = to_mqp(ibqp); 3364 struct mlx5_ib_modify_qp ucmd = {}; 3365 enum ib_qp_type qp_type; 3366 enum ib_qp_state cur_state, new_state; 3367 size_t required_cmd_sz; 3368 int err = -EINVAL; 3369 int port; 3370 enum rdma_link_layer ll = IB_LINK_LAYER_UNSPECIFIED; 3371 3372 if (ibqp->rwq_ind_tbl) 3373 return -ENOSYS; 3374 3375 if (udata && udata->inlen) { 3376 required_cmd_sz = offsetof(typeof(ucmd), reserved) + 3377 sizeof(ucmd.reserved); 3378 if (udata->inlen < required_cmd_sz) 3379 return -EINVAL; 3380 3381 if (udata->inlen > sizeof(ucmd) && 3382 !ib_is_udata_cleared(udata, sizeof(ucmd), 3383 udata->inlen - sizeof(ucmd))) 3384 return -EOPNOTSUPP; 3385 3386 if (ib_copy_from_udata(&ucmd, udata, 3387 min(udata->inlen, sizeof(ucmd)))) 3388 return -EFAULT; 3389 3390 if (ucmd.comp_mask || 3391 memchr_inv(&ucmd.reserved, 0, sizeof(ucmd.reserved)) || 3392 memchr_inv(&ucmd.burst_info.reserved, 0, 3393 sizeof(ucmd.burst_info.reserved))) 3394 return -EOPNOTSUPP; 3395 } 3396 3397 if (unlikely(ibqp->qp_type == IB_QPT_GSI)) 3398 return mlx5_ib_gsi_modify_qp(ibqp, attr, attr_mask); 3399 3400 if (ibqp->qp_type == IB_QPT_DRIVER) 3401 qp_type = qp->qp_sub_type; 3402 else 3403 qp_type = (unlikely(ibqp->qp_type == MLX5_IB_QPT_HW_GSI)) ? 3404 IB_QPT_GSI : ibqp->qp_type; 3405 3406 if (qp_type == MLX5_IB_QPT_DCT) 3407 return mlx5_ib_modify_dct(ibqp, attr, attr_mask, udata); 3408 3409 mutex_lock(&qp->mutex); 3410 3411 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; 3412 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 3413 3414 if (!(cur_state == new_state && cur_state == IB_QPS_RESET)) { 3415 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; 3416 ll = dev->ib_dev.get_link_layer(&dev->ib_dev, port); 3417 } 3418 3419 if (qp->flags & MLX5_IB_QP_UNDERLAY) { 3420 if (attr_mask & ~(IB_QP_STATE | IB_QP_CUR_STATE)) { 3421 mlx5_ib_dbg(dev, "invalid attr_mask 0x%x when underlay QP is used\n", 3422 attr_mask); 3423 goto out; 3424 } 3425 } else if (qp_type != MLX5_IB_QPT_REG_UMR && 3426 qp_type != MLX5_IB_QPT_DCI && 3427 !ib_modify_qp_is_ok(cur_state, new_state, qp_type, attr_mask, ll)) { 3428 mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n", 3429 cur_state, new_state, ibqp->qp_type, attr_mask); 3430 goto out; 3431 } else if (qp_type == MLX5_IB_QPT_DCI && 3432 !modify_dci_qp_is_ok(cur_state, new_state, attr_mask)) { 3433 mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n", 3434 cur_state, new_state, qp_type, attr_mask); 3435 goto out; 3436 } 3437 3438 if ((attr_mask & IB_QP_PORT) && 3439 (attr->port_num == 0 || 3440 attr->port_num > dev->num_ports)) { 3441 mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n", 3442 attr->port_num, dev->num_ports); 3443 goto out; 3444 } 3445 3446 if (attr_mask & IB_QP_PKEY_INDEX) { 3447 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; 3448 if (attr->pkey_index >= 3449 dev->mdev->port_caps[port - 1].pkey_table_len) { 3450 mlx5_ib_dbg(dev, "invalid pkey index %d\n", 3451 attr->pkey_index); 3452 goto out; 3453 } 3454 } 3455 3456 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && 3457 attr->max_rd_atomic > 3458 (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp))) { 3459 mlx5_ib_dbg(dev, "invalid max_rd_atomic value %d\n", 3460 attr->max_rd_atomic); 3461 goto out; 3462 } 3463 3464 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && 3465 attr->max_dest_rd_atomic > 3466 (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp))) { 3467 mlx5_ib_dbg(dev, "invalid max_dest_rd_atomic value %d\n", 3468 attr->max_dest_rd_atomic); 3469 goto out; 3470 } 3471 3472 if (cur_state == new_state && cur_state == IB_QPS_RESET) { 3473 err = 0; 3474 goto out; 3475 } 3476 3477 err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state, 3478 new_state, &ucmd); 3479 3480 out: 3481 mutex_unlock(&qp->mutex); 3482 return err; 3483 } 3484 3485 static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq) 3486 { 3487 struct mlx5_ib_cq *cq; 3488 unsigned cur; 3489 3490 cur = wq->head - wq->tail; 3491 if (likely(cur + nreq < wq->max_post)) 3492 return 0; 3493 3494 cq = to_mcq(ib_cq); 3495 spin_lock(&cq->lock); 3496 cur = wq->head - wq->tail; 3497 spin_unlock(&cq->lock); 3498 3499 return cur + nreq >= wq->max_post; 3500 } 3501 3502 static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg, 3503 u64 remote_addr, u32 rkey) 3504 { 3505 rseg->raddr = cpu_to_be64(remote_addr); 3506 rseg->rkey = cpu_to_be32(rkey); 3507 rseg->reserved = 0; 3508 } 3509 3510 static void *set_eth_seg(struct mlx5_wqe_eth_seg *eseg, 3511 const struct ib_send_wr *wr, void *qend, 3512 struct mlx5_ib_qp *qp, int *size) 3513 { 3514 void *seg = eseg; 3515 3516 memset(eseg, 0, sizeof(struct mlx5_wqe_eth_seg)); 3517 3518 if (wr->send_flags & IB_SEND_IP_CSUM) 3519 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | 3520 MLX5_ETH_WQE_L4_CSUM; 3521 3522 seg += sizeof(struct mlx5_wqe_eth_seg); 3523 *size += sizeof(struct mlx5_wqe_eth_seg) / 16; 3524 3525 if (wr->opcode == IB_WR_LSO) { 3526 struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr); 3527 int size_of_inl_hdr_start = sizeof(eseg->inline_hdr.start); 3528 u64 left, leftlen, copysz; 3529 void *pdata = ud_wr->header; 3530 3531 left = ud_wr->hlen; 3532 eseg->mss = cpu_to_be16(ud_wr->mss); 3533 eseg->inline_hdr.sz = cpu_to_be16(left); 3534 3535 /* 3536 * check if there is space till the end of queue, if yes, 3537 * copy all in one shot, otherwise copy till the end of queue, 3538 * rollback and than the copy the left 3539 */ 3540 leftlen = qend - (void *)eseg->inline_hdr.start; 3541 copysz = min_t(u64, leftlen, left); 3542 3543 memcpy(seg - size_of_inl_hdr_start, pdata, copysz); 3544 3545 if (likely(copysz > size_of_inl_hdr_start)) { 3546 seg += ALIGN(copysz - size_of_inl_hdr_start, 16); 3547 *size += ALIGN(copysz - size_of_inl_hdr_start, 16) / 16; 3548 } 3549 3550 if (unlikely(copysz < left)) { /* the last wqe in the queue */ 3551 seg = mlx5_get_send_wqe(qp, 0); 3552 left -= copysz; 3553 pdata += copysz; 3554 memcpy(seg, pdata, left); 3555 seg += ALIGN(left, 16); 3556 *size += ALIGN(left, 16) / 16; 3557 } 3558 } 3559 3560 return seg; 3561 } 3562 3563 static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg, 3564 const struct ib_send_wr *wr) 3565 { 3566 memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av)); 3567 dseg->av.dqp_dct = cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV); 3568 dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr)->remote_qkey); 3569 } 3570 3571 static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg) 3572 { 3573 dseg->byte_count = cpu_to_be32(sg->length); 3574 dseg->lkey = cpu_to_be32(sg->lkey); 3575 dseg->addr = cpu_to_be64(sg->addr); 3576 } 3577 3578 static u64 get_xlt_octo(u64 bytes) 3579 { 3580 return ALIGN(bytes, MLX5_IB_UMR_XLT_ALIGNMENT) / 3581 MLX5_IB_UMR_OCTOWORD; 3582 } 3583 3584 static __be64 frwr_mkey_mask(void) 3585 { 3586 u64 result; 3587 3588 result = MLX5_MKEY_MASK_LEN | 3589 MLX5_MKEY_MASK_PAGE_SIZE | 3590 MLX5_MKEY_MASK_START_ADDR | 3591 MLX5_MKEY_MASK_EN_RINVAL | 3592 MLX5_MKEY_MASK_KEY | 3593 MLX5_MKEY_MASK_LR | 3594 MLX5_MKEY_MASK_LW | 3595 MLX5_MKEY_MASK_RR | 3596 MLX5_MKEY_MASK_RW | 3597 MLX5_MKEY_MASK_A | 3598 MLX5_MKEY_MASK_SMALL_FENCE | 3599 MLX5_MKEY_MASK_FREE; 3600 3601 return cpu_to_be64(result); 3602 } 3603 3604 static __be64 sig_mkey_mask(void) 3605 { 3606 u64 result; 3607 3608 result = MLX5_MKEY_MASK_LEN | 3609 MLX5_MKEY_MASK_PAGE_SIZE | 3610 MLX5_MKEY_MASK_START_ADDR | 3611 MLX5_MKEY_MASK_EN_SIGERR | 3612 MLX5_MKEY_MASK_EN_RINVAL | 3613 MLX5_MKEY_MASK_KEY | 3614 MLX5_MKEY_MASK_LR | 3615 MLX5_MKEY_MASK_LW | 3616 MLX5_MKEY_MASK_RR | 3617 MLX5_MKEY_MASK_RW | 3618 MLX5_MKEY_MASK_SMALL_FENCE | 3619 MLX5_MKEY_MASK_FREE | 3620 MLX5_MKEY_MASK_BSF_EN; 3621 3622 return cpu_to_be64(result); 3623 } 3624 3625 static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr, 3626 struct mlx5_ib_mr *mr, bool umr_inline) 3627 { 3628 int size = mr->ndescs * mr->desc_size; 3629 3630 memset(umr, 0, sizeof(*umr)); 3631 3632 umr->flags = MLX5_UMR_CHECK_NOT_FREE; 3633 if (umr_inline) 3634 umr->flags |= MLX5_UMR_INLINE; 3635 umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size)); 3636 umr->mkey_mask = frwr_mkey_mask(); 3637 } 3638 3639 static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr) 3640 { 3641 memset(umr, 0, sizeof(*umr)); 3642 umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); 3643 umr->flags = MLX5_UMR_INLINE; 3644 } 3645 3646 static __be64 get_umr_enable_mr_mask(void) 3647 { 3648 u64 result; 3649 3650 result = MLX5_MKEY_MASK_KEY | 3651 MLX5_MKEY_MASK_FREE; 3652 3653 return cpu_to_be64(result); 3654 } 3655 3656 static __be64 get_umr_disable_mr_mask(void) 3657 { 3658 u64 result; 3659 3660 result = MLX5_MKEY_MASK_FREE; 3661 3662 return cpu_to_be64(result); 3663 } 3664 3665 static __be64 get_umr_update_translation_mask(void) 3666 { 3667 u64 result; 3668 3669 result = MLX5_MKEY_MASK_LEN | 3670 MLX5_MKEY_MASK_PAGE_SIZE | 3671 MLX5_MKEY_MASK_START_ADDR; 3672 3673 return cpu_to_be64(result); 3674 } 3675 3676 static __be64 get_umr_update_access_mask(int atomic) 3677 { 3678 u64 result; 3679 3680 result = MLX5_MKEY_MASK_LR | 3681 MLX5_MKEY_MASK_LW | 3682 MLX5_MKEY_MASK_RR | 3683 MLX5_MKEY_MASK_RW; 3684 3685 if (atomic) 3686 result |= MLX5_MKEY_MASK_A; 3687 3688 return cpu_to_be64(result); 3689 } 3690 3691 static __be64 get_umr_update_pd_mask(void) 3692 { 3693 u64 result; 3694 3695 result = MLX5_MKEY_MASK_PD; 3696 3697 return cpu_to_be64(result); 3698 } 3699 3700 static int umr_check_mkey_mask(struct mlx5_ib_dev *dev, u64 mask) 3701 { 3702 if ((mask & MLX5_MKEY_MASK_PAGE_SIZE && 3703 MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) || 3704 (mask & MLX5_MKEY_MASK_A && 3705 MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))) 3706 return -EPERM; 3707 return 0; 3708 } 3709 3710 static int set_reg_umr_segment(struct mlx5_ib_dev *dev, 3711 struct mlx5_wqe_umr_ctrl_seg *umr, 3712 const struct ib_send_wr *wr, int atomic) 3713 { 3714 const struct mlx5_umr_wr *umrwr = umr_wr(wr); 3715 3716 memset(umr, 0, sizeof(*umr)); 3717 3718 if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE) 3719 umr->flags = MLX5_UMR_CHECK_FREE; /* fail if free */ 3720 else 3721 umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */ 3722 3723 umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size)); 3724 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) { 3725 u64 offset = get_xlt_octo(umrwr->offset); 3726 3727 umr->xlt_offset = cpu_to_be16(offset & 0xffff); 3728 umr->xlt_offset_47_16 = cpu_to_be32(offset >> 16); 3729 umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN; 3730 } 3731 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION) 3732 umr->mkey_mask |= get_umr_update_translation_mask(); 3733 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS) { 3734 umr->mkey_mask |= get_umr_update_access_mask(atomic); 3735 umr->mkey_mask |= get_umr_update_pd_mask(); 3736 } 3737 if (wr->send_flags & MLX5_IB_SEND_UMR_ENABLE_MR) 3738 umr->mkey_mask |= get_umr_enable_mr_mask(); 3739 if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR) 3740 umr->mkey_mask |= get_umr_disable_mr_mask(); 3741 3742 if (!wr->num_sge) 3743 umr->flags |= MLX5_UMR_INLINE; 3744 3745 return umr_check_mkey_mask(dev, be64_to_cpu(umr->mkey_mask)); 3746 } 3747 3748 static u8 get_umr_flags(int acc) 3749 { 3750 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) | 3751 (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) | 3752 (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) | 3753 (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) | 3754 MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN; 3755 } 3756 3757 static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg, 3758 struct mlx5_ib_mr *mr, 3759 u32 key, int access) 3760 { 3761 int ndescs = ALIGN(mr->ndescs, 8) >> 1; 3762 3763 memset(seg, 0, sizeof(*seg)); 3764 3765 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_MTT) 3766 seg->log2_page_size = ilog2(mr->ibmr.page_size); 3767 else if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS) 3768 /* KLMs take twice the size of MTTs */ 3769 ndescs *= 2; 3770 3771 seg->flags = get_umr_flags(access) | mr->access_mode; 3772 seg->qpn_mkey7_0 = cpu_to_be32((key & 0xff) | 0xffffff00); 3773 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL); 3774 seg->start_addr = cpu_to_be64(mr->ibmr.iova); 3775 seg->len = cpu_to_be64(mr->ibmr.length); 3776 seg->xlt_oct_size = cpu_to_be32(ndescs); 3777 } 3778 3779 static void set_linv_mkey_seg(struct mlx5_mkey_seg *seg) 3780 { 3781 memset(seg, 0, sizeof(*seg)); 3782 seg->status = MLX5_MKEY_STATUS_FREE; 3783 } 3784 3785 static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, 3786 const struct ib_send_wr *wr) 3787 { 3788 const struct mlx5_umr_wr *umrwr = umr_wr(wr); 3789 3790 memset(seg, 0, sizeof(*seg)); 3791 if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR) 3792 seg->status = MLX5_MKEY_STATUS_FREE; 3793 3794 seg->flags = convert_access(umrwr->access_flags); 3795 if (umrwr->pd) 3796 seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn); 3797 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION && 3798 !umrwr->length) 3799 seg->flags_pd |= cpu_to_be32(MLX5_MKEY_LEN64); 3800 3801 seg->start_addr = cpu_to_be64(umrwr->virt_addr); 3802 seg->len = cpu_to_be64(umrwr->length); 3803 seg->log2_page_size = umrwr->page_shift; 3804 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 | 3805 mlx5_mkey_variant(umrwr->mkey)); 3806 } 3807 3808 static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg, 3809 struct mlx5_ib_mr *mr, 3810 struct mlx5_ib_pd *pd) 3811 { 3812 int bcount = mr->desc_size * mr->ndescs; 3813 3814 dseg->addr = cpu_to_be64(mr->desc_map); 3815 dseg->byte_count = cpu_to_be32(ALIGN(bcount, 64)); 3816 dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey); 3817 } 3818 3819 static void set_reg_umr_inline_seg(void *seg, struct mlx5_ib_qp *qp, 3820 struct mlx5_ib_mr *mr, int mr_list_size) 3821 { 3822 void *qend = qp->sq.qend; 3823 void *addr = mr->descs; 3824 int copy; 3825 3826 if (unlikely(seg + mr_list_size > qend)) { 3827 copy = qend - seg; 3828 memcpy(seg, addr, copy); 3829 addr += copy; 3830 mr_list_size -= copy; 3831 seg = mlx5_get_send_wqe(qp, 0); 3832 } 3833 memcpy(seg, addr, mr_list_size); 3834 seg += mr_list_size; 3835 } 3836 3837 static __be32 send_ieth(const struct ib_send_wr *wr) 3838 { 3839 switch (wr->opcode) { 3840 case IB_WR_SEND_WITH_IMM: 3841 case IB_WR_RDMA_WRITE_WITH_IMM: 3842 return wr->ex.imm_data; 3843 3844 case IB_WR_SEND_WITH_INV: 3845 return cpu_to_be32(wr->ex.invalidate_rkey); 3846 3847 default: 3848 return 0; 3849 } 3850 } 3851 3852 static u8 calc_sig(void *wqe, int size) 3853 { 3854 u8 *p = wqe; 3855 u8 res = 0; 3856 int i; 3857 3858 for (i = 0; i < size; i++) 3859 res ^= p[i]; 3860 3861 return ~res; 3862 } 3863 3864 static u8 wq_sig(void *wqe) 3865 { 3866 return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4); 3867 } 3868 3869 static int set_data_inl_seg(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, 3870 void *wqe, int *sz) 3871 { 3872 struct mlx5_wqe_inline_seg *seg; 3873 void *qend = qp->sq.qend; 3874 void *addr; 3875 int inl = 0; 3876 int copy; 3877 int len; 3878 int i; 3879 3880 seg = wqe; 3881 wqe += sizeof(*seg); 3882 for (i = 0; i < wr->num_sge; i++) { 3883 addr = (void *)(unsigned long)(wr->sg_list[i].addr); 3884 len = wr->sg_list[i].length; 3885 inl += len; 3886 3887 if (unlikely(inl > qp->max_inline_data)) 3888 return -ENOMEM; 3889 3890 if (unlikely(wqe + len > qend)) { 3891 copy = qend - wqe; 3892 memcpy(wqe, addr, copy); 3893 addr += copy; 3894 len -= copy; 3895 wqe = mlx5_get_send_wqe(qp, 0); 3896 } 3897 memcpy(wqe, addr, len); 3898 wqe += len; 3899 } 3900 3901 seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG); 3902 3903 *sz = ALIGN(inl + sizeof(seg->byte_count), 16) / 16; 3904 3905 return 0; 3906 } 3907 3908 static u16 prot_field_size(enum ib_signature_type type) 3909 { 3910 switch (type) { 3911 case IB_SIG_TYPE_T10_DIF: 3912 return MLX5_DIF_SIZE; 3913 default: 3914 return 0; 3915 } 3916 } 3917 3918 static u8 bs_selector(int block_size) 3919 { 3920 switch (block_size) { 3921 case 512: return 0x1; 3922 case 520: return 0x2; 3923 case 4096: return 0x3; 3924 case 4160: return 0x4; 3925 case 1073741824: return 0x5; 3926 default: return 0; 3927 } 3928 } 3929 3930 static void mlx5_fill_inl_bsf(struct ib_sig_domain *domain, 3931 struct mlx5_bsf_inl *inl) 3932 { 3933 /* Valid inline section and allow BSF refresh */ 3934 inl->vld_refresh = cpu_to_be16(MLX5_BSF_INL_VALID | 3935 MLX5_BSF_REFRESH_DIF); 3936 inl->dif_apptag = cpu_to_be16(domain->sig.dif.app_tag); 3937 inl->dif_reftag = cpu_to_be32(domain->sig.dif.ref_tag); 3938 /* repeating block */ 3939 inl->rp_inv_seed = MLX5_BSF_REPEAT_BLOCK; 3940 inl->sig_type = domain->sig.dif.bg_type == IB_T10DIF_CRC ? 3941 MLX5_DIF_CRC : MLX5_DIF_IPCS; 3942 3943 if (domain->sig.dif.ref_remap) 3944 inl->dif_inc_ref_guard_check |= MLX5_BSF_INC_REFTAG; 3945 3946 if (domain->sig.dif.app_escape) { 3947 if (domain->sig.dif.ref_escape) 3948 inl->dif_inc_ref_guard_check |= MLX5_BSF_APPREF_ESCAPE; 3949 else 3950 inl->dif_inc_ref_guard_check |= MLX5_BSF_APPTAG_ESCAPE; 3951 } 3952 3953 inl->dif_app_bitmask_check = 3954 cpu_to_be16(domain->sig.dif.apptag_check_mask); 3955 } 3956 3957 static int mlx5_set_bsf(struct ib_mr *sig_mr, 3958 struct ib_sig_attrs *sig_attrs, 3959 struct mlx5_bsf *bsf, u32 data_size) 3960 { 3961 struct mlx5_core_sig_ctx *msig = to_mmr(sig_mr)->sig; 3962 struct mlx5_bsf_basic *basic = &bsf->basic; 3963 struct ib_sig_domain *mem = &sig_attrs->mem; 3964 struct ib_sig_domain *wire = &sig_attrs->wire; 3965 3966 memset(bsf, 0, sizeof(*bsf)); 3967 3968 /* Basic + Extended + Inline */ 3969 basic->bsf_size_sbs = 1 << 7; 3970 /* Input domain check byte mask */ 3971 basic->check_byte_mask = sig_attrs->check_mask; 3972 basic->raw_data_size = cpu_to_be32(data_size); 3973 3974 /* Memory domain */ 3975 switch (sig_attrs->mem.sig_type) { 3976 case IB_SIG_TYPE_NONE: 3977 break; 3978 case IB_SIG_TYPE_T10_DIF: 3979 basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval); 3980 basic->m_bfs_psv = cpu_to_be32(msig->psv_memory.psv_idx); 3981 mlx5_fill_inl_bsf(mem, &bsf->m_inl); 3982 break; 3983 default: 3984 return -EINVAL; 3985 } 3986 3987 /* Wire domain */ 3988 switch (sig_attrs->wire.sig_type) { 3989 case IB_SIG_TYPE_NONE: 3990 break; 3991 case IB_SIG_TYPE_T10_DIF: 3992 if (mem->sig.dif.pi_interval == wire->sig.dif.pi_interval && 3993 mem->sig_type == wire->sig_type) { 3994 /* Same block structure */ 3995 basic->bsf_size_sbs |= 1 << 4; 3996 if (mem->sig.dif.bg_type == wire->sig.dif.bg_type) 3997 basic->wire.copy_byte_mask |= MLX5_CPY_GRD_MASK; 3998 if (mem->sig.dif.app_tag == wire->sig.dif.app_tag) 3999 basic->wire.copy_byte_mask |= MLX5_CPY_APP_MASK; 4000 if (mem->sig.dif.ref_tag == wire->sig.dif.ref_tag) 4001 basic->wire.copy_byte_mask |= MLX5_CPY_REF_MASK; 4002 } else 4003 basic->wire.bs_selector = bs_selector(wire->sig.dif.pi_interval); 4004 4005 basic->w_bfs_psv = cpu_to_be32(msig->psv_wire.psv_idx); 4006 mlx5_fill_inl_bsf(wire, &bsf->w_inl); 4007 break; 4008 default: 4009 return -EINVAL; 4010 } 4011 4012 return 0; 4013 } 4014 4015 static int set_sig_data_segment(const struct ib_sig_handover_wr *wr, 4016 struct mlx5_ib_qp *qp, void **seg, int *size) 4017 { 4018 struct ib_sig_attrs *sig_attrs = wr->sig_attrs; 4019 struct ib_mr *sig_mr = wr->sig_mr; 4020 struct mlx5_bsf *bsf; 4021 u32 data_len = wr->wr.sg_list->length; 4022 u32 data_key = wr->wr.sg_list->lkey; 4023 u64 data_va = wr->wr.sg_list->addr; 4024 int ret; 4025 int wqe_size; 4026 4027 if (!wr->prot || 4028 (data_key == wr->prot->lkey && 4029 data_va == wr->prot->addr && 4030 data_len == wr->prot->length)) { 4031 /** 4032 * Source domain doesn't contain signature information 4033 * or data and protection are interleaved in memory. 4034 * So need construct: 4035 * ------------------ 4036 * | data_klm | 4037 * ------------------ 4038 * | BSF | 4039 * ------------------ 4040 **/ 4041 struct mlx5_klm *data_klm = *seg; 4042 4043 data_klm->bcount = cpu_to_be32(data_len); 4044 data_klm->key = cpu_to_be32(data_key); 4045 data_klm->va = cpu_to_be64(data_va); 4046 wqe_size = ALIGN(sizeof(*data_klm), 64); 4047 } else { 4048 /** 4049 * Source domain contains signature information 4050 * So need construct a strided block format: 4051 * --------------------------- 4052 * | stride_block_ctrl | 4053 * --------------------------- 4054 * | data_klm | 4055 * --------------------------- 4056 * | prot_klm | 4057 * --------------------------- 4058 * | BSF | 4059 * --------------------------- 4060 **/ 4061 struct mlx5_stride_block_ctrl_seg *sblock_ctrl; 4062 struct mlx5_stride_block_entry *data_sentry; 4063 struct mlx5_stride_block_entry *prot_sentry; 4064 u32 prot_key = wr->prot->lkey; 4065 u64 prot_va = wr->prot->addr; 4066 u16 block_size = sig_attrs->mem.sig.dif.pi_interval; 4067 int prot_size; 4068 4069 sblock_ctrl = *seg; 4070 data_sentry = (void *)sblock_ctrl + sizeof(*sblock_ctrl); 4071 prot_sentry = (void *)data_sentry + sizeof(*data_sentry); 4072 4073 prot_size = prot_field_size(sig_attrs->mem.sig_type); 4074 if (!prot_size) { 4075 pr_err("Bad block size given: %u\n", block_size); 4076 return -EINVAL; 4077 } 4078 sblock_ctrl->bcount_per_cycle = cpu_to_be32(block_size + 4079 prot_size); 4080 sblock_ctrl->op = cpu_to_be32(MLX5_STRIDE_BLOCK_OP); 4081 sblock_ctrl->repeat_count = cpu_to_be32(data_len / block_size); 4082 sblock_ctrl->num_entries = cpu_to_be16(2); 4083 4084 data_sentry->bcount = cpu_to_be16(block_size); 4085 data_sentry->key = cpu_to_be32(data_key); 4086 data_sentry->va = cpu_to_be64(data_va); 4087 data_sentry->stride = cpu_to_be16(block_size); 4088 4089 prot_sentry->bcount = cpu_to_be16(prot_size); 4090 prot_sentry->key = cpu_to_be32(prot_key); 4091 prot_sentry->va = cpu_to_be64(prot_va); 4092 prot_sentry->stride = cpu_to_be16(prot_size); 4093 4094 wqe_size = ALIGN(sizeof(*sblock_ctrl) + sizeof(*data_sentry) + 4095 sizeof(*prot_sentry), 64); 4096 } 4097 4098 *seg += wqe_size; 4099 *size += wqe_size / 16; 4100 if (unlikely((*seg == qp->sq.qend))) 4101 *seg = mlx5_get_send_wqe(qp, 0); 4102 4103 bsf = *seg; 4104 ret = mlx5_set_bsf(sig_mr, sig_attrs, bsf, data_len); 4105 if (ret) 4106 return -EINVAL; 4107 4108 *seg += sizeof(*bsf); 4109 *size += sizeof(*bsf) / 16; 4110 if (unlikely((*seg == qp->sq.qend))) 4111 *seg = mlx5_get_send_wqe(qp, 0); 4112 4113 return 0; 4114 } 4115 4116 static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg, 4117 const struct ib_sig_handover_wr *wr, u32 size, 4118 u32 length, u32 pdn) 4119 { 4120 struct ib_mr *sig_mr = wr->sig_mr; 4121 u32 sig_key = sig_mr->rkey; 4122 u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1; 4123 4124 memset(seg, 0, sizeof(*seg)); 4125 4126 seg->flags = get_umr_flags(wr->access_flags) | 4127 MLX5_MKC_ACCESS_MODE_KLMS; 4128 seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00); 4129 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 | 4130 MLX5_MKEY_BSF_EN | pdn); 4131 seg->len = cpu_to_be64(length); 4132 seg->xlt_oct_size = cpu_to_be32(get_xlt_octo(size)); 4133 seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE); 4134 } 4135 4136 static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, 4137 u32 size) 4138 { 4139 memset(umr, 0, sizeof(*umr)); 4140 4141 umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE; 4142 umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size)); 4143 umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE); 4144 umr->mkey_mask = sig_mkey_mask(); 4145 } 4146 4147 4148 static int set_sig_umr_wr(const struct ib_send_wr *send_wr, 4149 struct mlx5_ib_qp *qp, void **seg, int *size) 4150 { 4151 const struct ib_sig_handover_wr *wr = sig_handover_wr(send_wr); 4152 struct mlx5_ib_mr *sig_mr = to_mmr(wr->sig_mr); 4153 u32 pdn = get_pd(qp)->pdn; 4154 u32 xlt_size; 4155 int region_len, ret; 4156 4157 if (unlikely(wr->wr.num_sge != 1) || 4158 unlikely(wr->access_flags & IB_ACCESS_REMOTE_ATOMIC) || 4159 unlikely(!sig_mr->sig) || unlikely(!qp->signature_en) || 4160 unlikely(!sig_mr->sig->sig_status_checked)) 4161 return -EINVAL; 4162 4163 /* length of the protected region, data + protection */ 4164 region_len = wr->wr.sg_list->length; 4165 if (wr->prot && 4166 (wr->prot->lkey != wr->wr.sg_list->lkey || 4167 wr->prot->addr != wr->wr.sg_list->addr || 4168 wr->prot->length != wr->wr.sg_list->length)) 4169 region_len += wr->prot->length; 4170 4171 /** 4172 * KLM octoword size - if protection was provided 4173 * then we use strided block format (3 octowords), 4174 * else we use single KLM (1 octoword) 4175 **/ 4176 xlt_size = wr->prot ? 0x30 : sizeof(struct mlx5_klm); 4177 4178 set_sig_umr_segment(*seg, xlt_size); 4179 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); 4180 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; 4181 if (unlikely((*seg == qp->sq.qend))) 4182 *seg = mlx5_get_send_wqe(qp, 0); 4183 4184 set_sig_mkey_segment(*seg, wr, xlt_size, region_len, pdn); 4185 *seg += sizeof(struct mlx5_mkey_seg); 4186 *size += sizeof(struct mlx5_mkey_seg) / 16; 4187 if (unlikely((*seg == qp->sq.qend))) 4188 *seg = mlx5_get_send_wqe(qp, 0); 4189 4190 ret = set_sig_data_segment(wr, qp, seg, size); 4191 if (ret) 4192 return ret; 4193 4194 sig_mr->sig->sig_status_checked = false; 4195 return 0; 4196 } 4197 4198 static int set_psv_wr(struct ib_sig_domain *domain, 4199 u32 psv_idx, void **seg, int *size) 4200 { 4201 struct mlx5_seg_set_psv *psv_seg = *seg; 4202 4203 memset(psv_seg, 0, sizeof(*psv_seg)); 4204 psv_seg->psv_num = cpu_to_be32(psv_idx); 4205 switch (domain->sig_type) { 4206 case IB_SIG_TYPE_NONE: 4207 break; 4208 case IB_SIG_TYPE_T10_DIF: 4209 psv_seg->transient_sig = cpu_to_be32(domain->sig.dif.bg << 16 | 4210 domain->sig.dif.app_tag); 4211 psv_seg->ref_tag = cpu_to_be32(domain->sig.dif.ref_tag); 4212 break; 4213 default: 4214 pr_err("Bad signature type (%d) is given.\n", 4215 domain->sig_type); 4216 return -EINVAL; 4217 } 4218 4219 *seg += sizeof(*psv_seg); 4220 *size += sizeof(*psv_seg) / 16; 4221 4222 return 0; 4223 } 4224 4225 static int set_reg_wr(struct mlx5_ib_qp *qp, 4226 const struct ib_reg_wr *wr, 4227 void **seg, int *size) 4228 { 4229 struct mlx5_ib_mr *mr = to_mmr(wr->mr); 4230 struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd); 4231 int mr_list_size = mr->ndescs * mr->desc_size; 4232 bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD; 4233 4234 if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) { 4235 mlx5_ib_warn(to_mdev(qp->ibqp.device), 4236 "Invalid IB_SEND_INLINE send flag\n"); 4237 return -EINVAL; 4238 } 4239 4240 set_reg_umr_seg(*seg, mr, umr_inline); 4241 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); 4242 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; 4243 if (unlikely((*seg == qp->sq.qend))) 4244 *seg = mlx5_get_send_wqe(qp, 0); 4245 4246 set_reg_mkey_seg(*seg, mr, wr->key, wr->access); 4247 *seg += sizeof(struct mlx5_mkey_seg); 4248 *size += sizeof(struct mlx5_mkey_seg) / 16; 4249 if (unlikely((*seg == qp->sq.qend))) 4250 *seg = mlx5_get_send_wqe(qp, 0); 4251 4252 if (umr_inline) { 4253 set_reg_umr_inline_seg(*seg, qp, mr, mr_list_size); 4254 *size += get_xlt_octo(mr_list_size); 4255 } else { 4256 set_reg_data_seg(*seg, mr, pd); 4257 *seg += sizeof(struct mlx5_wqe_data_seg); 4258 *size += (sizeof(struct mlx5_wqe_data_seg) / 16); 4259 } 4260 return 0; 4261 } 4262 4263 static void set_linv_wr(struct mlx5_ib_qp *qp, void **seg, int *size) 4264 { 4265 set_linv_umr_seg(*seg); 4266 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); 4267 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; 4268 if (unlikely((*seg == qp->sq.qend))) 4269 *seg = mlx5_get_send_wqe(qp, 0); 4270 set_linv_mkey_seg(*seg); 4271 *seg += sizeof(struct mlx5_mkey_seg); 4272 *size += sizeof(struct mlx5_mkey_seg) / 16; 4273 if (unlikely((*seg == qp->sq.qend))) 4274 *seg = mlx5_get_send_wqe(qp, 0); 4275 } 4276 4277 static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16) 4278 { 4279 __be32 *p = NULL; 4280 int tidx = idx; 4281 int i, j; 4282 4283 pr_debug("dump wqe at %p\n", mlx5_get_send_wqe(qp, tidx)); 4284 for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) { 4285 if ((i & 0xf) == 0) { 4286 void *buf = mlx5_get_send_wqe(qp, tidx); 4287 tidx = (tidx + 1) & (qp->sq.wqe_cnt - 1); 4288 p = buf; 4289 j = 0; 4290 } 4291 pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]), 4292 be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]), 4293 be32_to_cpu(p[j + 3])); 4294 } 4295 } 4296 4297 static int __begin_wqe(struct mlx5_ib_qp *qp, void **seg, 4298 struct mlx5_wqe_ctrl_seg **ctrl, 4299 const struct ib_send_wr *wr, unsigned *idx, 4300 int *size, int nreq, bool send_signaled, bool solicited) 4301 { 4302 if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) 4303 return -ENOMEM; 4304 4305 *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1); 4306 *seg = mlx5_get_send_wqe(qp, *idx); 4307 *ctrl = *seg; 4308 *(uint32_t *)(*seg + 8) = 0; 4309 (*ctrl)->imm = send_ieth(wr); 4310 (*ctrl)->fm_ce_se = qp->sq_signal_bits | 4311 (send_signaled ? MLX5_WQE_CTRL_CQ_UPDATE : 0) | 4312 (solicited ? MLX5_WQE_CTRL_SOLICITED : 0); 4313 4314 *seg += sizeof(**ctrl); 4315 *size = sizeof(**ctrl) / 16; 4316 4317 return 0; 4318 } 4319 4320 static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, 4321 struct mlx5_wqe_ctrl_seg **ctrl, 4322 const struct ib_send_wr *wr, unsigned *idx, 4323 int *size, int nreq) 4324 { 4325 return __begin_wqe(qp, seg, ctrl, wr, idx, size, nreq, 4326 wr->send_flags & IB_SEND_SIGNALED, 4327 wr->send_flags & IB_SEND_SOLICITED); 4328 } 4329 4330 static void finish_wqe(struct mlx5_ib_qp *qp, 4331 struct mlx5_wqe_ctrl_seg *ctrl, 4332 u8 size, unsigned idx, u64 wr_id, 4333 int nreq, u8 fence, u32 mlx5_opcode) 4334 { 4335 u8 opmod = 0; 4336 4337 ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) | 4338 mlx5_opcode | ((u32)opmod << 24)); 4339 ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8)); 4340 ctrl->fm_ce_se |= fence; 4341 if (unlikely(qp->wq_sig)) 4342 ctrl->signature = wq_sig(ctrl); 4343 4344 qp->sq.wrid[idx] = wr_id; 4345 qp->sq.w_list[idx].opcode = mlx5_opcode; 4346 qp->sq.wqe_head[idx] = qp->sq.head + nreq; 4347 qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB); 4348 qp->sq.w_list[idx].next = qp->sq.cur_post; 4349 } 4350 4351 static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, 4352 const struct ib_send_wr **bad_wr, bool drain) 4353 { 4354 struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */ 4355 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 4356 struct mlx5_core_dev *mdev = dev->mdev; 4357 struct mlx5_ib_qp *qp; 4358 struct mlx5_ib_mr *mr; 4359 struct mlx5_wqe_data_seg *dpseg; 4360 struct mlx5_wqe_xrc_seg *xrc; 4361 struct mlx5_bf *bf; 4362 int uninitialized_var(size); 4363 void *qend; 4364 unsigned long flags; 4365 unsigned idx; 4366 int err = 0; 4367 int num_sge; 4368 void *seg; 4369 int nreq; 4370 int i; 4371 u8 next_fence = 0; 4372 u8 fence; 4373 4374 if (unlikely(ibqp->qp_type == IB_QPT_GSI)) 4375 return mlx5_ib_gsi_post_send(ibqp, wr, bad_wr); 4376 4377 qp = to_mqp(ibqp); 4378 bf = &qp->bf; 4379 qend = qp->sq.qend; 4380 4381 spin_lock_irqsave(&qp->sq.lock, flags); 4382 4383 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR && !drain) { 4384 err = -EIO; 4385 *bad_wr = wr; 4386 nreq = 0; 4387 goto out; 4388 } 4389 4390 for (nreq = 0; wr; nreq++, wr = wr->next) { 4391 if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) { 4392 mlx5_ib_warn(dev, "\n"); 4393 err = -EINVAL; 4394 *bad_wr = wr; 4395 goto out; 4396 } 4397 4398 num_sge = wr->num_sge; 4399 if (unlikely(num_sge > qp->sq.max_gs)) { 4400 mlx5_ib_warn(dev, "\n"); 4401 err = -EINVAL; 4402 *bad_wr = wr; 4403 goto out; 4404 } 4405 4406 err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, nreq); 4407 if (err) { 4408 mlx5_ib_warn(dev, "\n"); 4409 err = -ENOMEM; 4410 *bad_wr = wr; 4411 goto out; 4412 } 4413 4414 if (wr->opcode == IB_WR_LOCAL_INV || 4415 wr->opcode == IB_WR_REG_MR) { 4416 fence = dev->umr_fence; 4417 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; 4418 } else if (wr->send_flags & IB_SEND_FENCE) { 4419 if (qp->next_fence) 4420 fence = MLX5_FENCE_MODE_SMALL_AND_FENCE; 4421 else 4422 fence = MLX5_FENCE_MODE_FENCE; 4423 } else { 4424 fence = qp->next_fence; 4425 } 4426 4427 switch (ibqp->qp_type) { 4428 case IB_QPT_XRC_INI: 4429 xrc = seg; 4430 seg += sizeof(*xrc); 4431 size += sizeof(*xrc) / 16; 4432 /* fall through */ 4433 case IB_QPT_RC: 4434 switch (wr->opcode) { 4435 case IB_WR_RDMA_READ: 4436 case IB_WR_RDMA_WRITE: 4437 case IB_WR_RDMA_WRITE_WITH_IMM: 4438 set_raddr_seg(seg, rdma_wr(wr)->remote_addr, 4439 rdma_wr(wr)->rkey); 4440 seg += sizeof(struct mlx5_wqe_raddr_seg); 4441 size += sizeof(struct mlx5_wqe_raddr_seg) / 16; 4442 break; 4443 4444 case IB_WR_ATOMIC_CMP_AND_SWP: 4445 case IB_WR_ATOMIC_FETCH_AND_ADD: 4446 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: 4447 mlx5_ib_warn(dev, "Atomic operations are not supported yet\n"); 4448 err = -ENOSYS; 4449 *bad_wr = wr; 4450 goto out; 4451 4452 case IB_WR_LOCAL_INV: 4453 qp->sq.wr_data[idx] = IB_WR_LOCAL_INV; 4454 ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey); 4455 set_linv_wr(qp, &seg, &size); 4456 num_sge = 0; 4457 break; 4458 4459 case IB_WR_REG_MR: 4460 qp->sq.wr_data[idx] = IB_WR_REG_MR; 4461 ctrl->imm = cpu_to_be32(reg_wr(wr)->key); 4462 err = set_reg_wr(qp, reg_wr(wr), &seg, &size); 4463 if (err) { 4464 *bad_wr = wr; 4465 goto out; 4466 } 4467 num_sge = 0; 4468 break; 4469 4470 case IB_WR_REG_SIG_MR: 4471 qp->sq.wr_data[idx] = IB_WR_REG_SIG_MR; 4472 mr = to_mmr(sig_handover_wr(wr)->sig_mr); 4473 4474 ctrl->imm = cpu_to_be32(mr->ibmr.rkey); 4475 err = set_sig_umr_wr(wr, qp, &seg, &size); 4476 if (err) { 4477 mlx5_ib_warn(dev, "\n"); 4478 *bad_wr = wr; 4479 goto out; 4480 } 4481 4482 finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, 4483 fence, MLX5_OPCODE_UMR); 4484 /* 4485 * SET_PSV WQEs are not signaled and solicited 4486 * on error 4487 */ 4488 err = __begin_wqe(qp, &seg, &ctrl, wr, &idx, 4489 &size, nreq, false, true); 4490 if (err) { 4491 mlx5_ib_warn(dev, "\n"); 4492 err = -ENOMEM; 4493 *bad_wr = wr; 4494 goto out; 4495 } 4496 4497 err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->mem, 4498 mr->sig->psv_memory.psv_idx, &seg, 4499 &size); 4500 if (err) { 4501 mlx5_ib_warn(dev, "\n"); 4502 *bad_wr = wr; 4503 goto out; 4504 } 4505 4506 finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, 4507 fence, MLX5_OPCODE_SET_PSV); 4508 err = __begin_wqe(qp, &seg, &ctrl, wr, &idx, 4509 &size, nreq, false, true); 4510 if (err) { 4511 mlx5_ib_warn(dev, "\n"); 4512 err = -ENOMEM; 4513 *bad_wr = wr; 4514 goto out; 4515 } 4516 4517 err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->wire, 4518 mr->sig->psv_wire.psv_idx, &seg, 4519 &size); 4520 if (err) { 4521 mlx5_ib_warn(dev, "\n"); 4522 *bad_wr = wr; 4523 goto out; 4524 } 4525 4526 finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, 4527 fence, MLX5_OPCODE_SET_PSV); 4528 qp->next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; 4529 num_sge = 0; 4530 goto skip_psv; 4531 4532 default: 4533 break; 4534 } 4535 break; 4536 4537 case IB_QPT_UC: 4538 switch (wr->opcode) { 4539 case IB_WR_RDMA_WRITE: 4540 case IB_WR_RDMA_WRITE_WITH_IMM: 4541 set_raddr_seg(seg, rdma_wr(wr)->remote_addr, 4542 rdma_wr(wr)->rkey); 4543 seg += sizeof(struct mlx5_wqe_raddr_seg); 4544 size += sizeof(struct mlx5_wqe_raddr_seg) / 16; 4545 break; 4546 4547 default: 4548 break; 4549 } 4550 break; 4551 4552 case IB_QPT_SMI: 4553 if (unlikely(!mdev->port_caps[qp->port - 1].has_smi)) { 4554 mlx5_ib_warn(dev, "Send SMP MADs is not allowed\n"); 4555 err = -EPERM; 4556 *bad_wr = wr; 4557 goto out; 4558 } 4559 /* fall through */ 4560 case MLX5_IB_QPT_HW_GSI: 4561 set_datagram_seg(seg, wr); 4562 seg += sizeof(struct mlx5_wqe_datagram_seg); 4563 size += sizeof(struct mlx5_wqe_datagram_seg) / 16; 4564 if (unlikely((seg == qend))) 4565 seg = mlx5_get_send_wqe(qp, 0); 4566 break; 4567 case IB_QPT_UD: 4568 set_datagram_seg(seg, wr); 4569 seg += sizeof(struct mlx5_wqe_datagram_seg); 4570 size += sizeof(struct mlx5_wqe_datagram_seg) / 16; 4571 4572 if (unlikely((seg == qend))) 4573 seg = mlx5_get_send_wqe(qp, 0); 4574 4575 /* handle qp that supports ud offload */ 4576 if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO) { 4577 struct mlx5_wqe_eth_pad *pad; 4578 4579 pad = seg; 4580 memset(pad, 0, sizeof(struct mlx5_wqe_eth_pad)); 4581 seg += sizeof(struct mlx5_wqe_eth_pad); 4582 size += sizeof(struct mlx5_wqe_eth_pad) / 16; 4583 4584 seg = set_eth_seg(seg, wr, qend, qp, &size); 4585 4586 if (unlikely((seg == qend))) 4587 seg = mlx5_get_send_wqe(qp, 0); 4588 } 4589 break; 4590 case MLX5_IB_QPT_REG_UMR: 4591 if (wr->opcode != MLX5_IB_WR_UMR) { 4592 err = -EINVAL; 4593 mlx5_ib_warn(dev, "bad opcode\n"); 4594 goto out; 4595 } 4596 qp->sq.wr_data[idx] = MLX5_IB_WR_UMR; 4597 ctrl->imm = cpu_to_be32(umr_wr(wr)->mkey); 4598 err = set_reg_umr_segment(dev, seg, wr, !!(MLX5_CAP_GEN(mdev, atomic))); 4599 if (unlikely(err)) 4600 goto out; 4601 seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); 4602 size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; 4603 if (unlikely((seg == qend))) 4604 seg = mlx5_get_send_wqe(qp, 0); 4605 set_reg_mkey_segment(seg, wr); 4606 seg += sizeof(struct mlx5_mkey_seg); 4607 size += sizeof(struct mlx5_mkey_seg) / 16; 4608 if (unlikely((seg == qend))) 4609 seg = mlx5_get_send_wqe(qp, 0); 4610 break; 4611 4612 default: 4613 break; 4614 } 4615 4616 if (wr->send_flags & IB_SEND_INLINE && num_sge) { 4617 int uninitialized_var(sz); 4618 4619 err = set_data_inl_seg(qp, wr, seg, &sz); 4620 if (unlikely(err)) { 4621 mlx5_ib_warn(dev, "\n"); 4622 *bad_wr = wr; 4623 goto out; 4624 } 4625 size += sz; 4626 } else { 4627 dpseg = seg; 4628 for (i = 0; i < num_sge; i++) { 4629 if (unlikely(dpseg == qend)) { 4630 seg = mlx5_get_send_wqe(qp, 0); 4631 dpseg = seg; 4632 } 4633 if (likely(wr->sg_list[i].length)) { 4634 set_data_ptr_seg(dpseg, wr->sg_list + i); 4635 size += sizeof(struct mlx5_wqe_data_seg) / 16; 4636 dpseg++; 4637 } 4638 } 4639 } 4640 4641 qp->next_fence = next_fence; 4642 finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, fence, 4643 mlx5_ib_opcode[wr->opcode]); 4644 skip_psv: 4645 if (0) 4646 dump_wqe(qp, idx, size); 4647 } 4648 4649 out: 4650 if (likely(nreq)) { 4651 qp->sq.head += nreq; 4652 4653 /* Make sure that descriptors are written before 4654 * updating doorbell record and ringing the doorbell 4655 */ 4656 wmb(); 4657 4658 qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post); 4659 4660 /* Make sure doorbell record is visible to the HCA before 4661 * we hit doorbell */ 4662 wmb(); 4663 4664 /* currently we support only regular doorbells */ 4665 mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset, NULL); 4666 /* Make sure doorbells don't leak out of SQ spinlock 4667 * and reach the HCA out of order. 4668 */ 4669 mmiowb(); 4670 bf->offset ^= bf->buf_size; 4671 } 4672 4673 spin_unlock_irqrestore(&qp->sq.lock, flags); 4674 4675 return err; 4676 } 4677 4678 int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, 4679 const struct ib_send_wr **bad_wr) 4680 { 4681 return _mlx5_ib_post_send(ibqp, wr, bad_wr, false); 4682 } 4683 4684 static void set_sig_seg(struct mlx5_rwqe_sig *sig, int size) 4685 { 4686 sig->signature = calc_sig(sig, size); 4687 } 4688 4689 static int _mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, 4690 const struct ib_recv_wr **bad_wr, bool drain) 4691 { 4692 struct mlx5_ib_qp *qp = to_mqp(ibqp); 4693 struct mlx5_wqe_data_seg *scat; 4694 struct mlx5_rwqe_sig *sig; 4695 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 4696 struct mlx5_core_dev *mdev = dev->mdev; 4697 unsigned long flags; 4698 int err = 0; 4699 int nreq; 4700 int ind; 4701 int i; 4702 4703 if (unlikely(ibqp->qp_type == IB_QPT_GSI)) 4704 return mlx5_ib_gsi_post_recv(ibqp, wr, bad_wr); 4705 4706 spin_lock_irqsave(&qp->rq.lock, flags); 4707 4708 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR && !drain) { 4709 err = -EIO; 4710 *bad_wr = wr; 4711 nreq = 0; 4712 goto out; 4713 } 4714 4715 ind = qp->rq.head & (qp->rq.wqe_cnt - 1); 4716 4717 for (nreq = 0; wr; nreq++, wr = wr->next) { 4718 if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { 4719 err = -ENOMEM; 4720 *bad_wr = wr; 4721 goto out; 4722 } 4723 4724 if (unlikely(wr->num_sge > qp->rq.max_gs)) { 4725 err = -EINVAL; 4726 *bad_wr = wr; 4727 goto out; 4728 } 4729 4730 scat = get_recv_wqe(qp, ind); 4731 if (qp->wq_sig) 4732 scat++; 4733 4734 for (i = 0; i < wr->num_sge; i++) 4735 set_data_ptr_seg(scat + i, wr->sg_list + i); 4736 4737 if (i < qp->rq.max_gs) { 4738 scat[i].byte_count = 0; 4739 scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY); 4740 scat[i].addr = 0; 4741 } 4742 4743 if (qp->wq_sig) { 4744 sig = (struct mlx5_rwqe_sig *)scat; 4745 set_sig_seg(sig, (qp->rq.max_gs + 1) << 2); 4746 } 4747 4748 qp->rq.wrid[ind] = wr->wr_id; 4749 4750 ind = (ind + 1) & (qp->rq.wqe_cnt - 1); 4751 } 4752 4753 out: 4754 if (likely(nreq)) { 4755 qp->rq.head += nreq; 4756 4757 /* Make sure that descriptors are written before 4758 * doorbell record. 4759 */ 4760 wmb(); 4761 4762 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff); 4763 } 4764 4765 spin_unlock_irqrestore(&qp->rq.lock, flags); 4766 4767 return err; 4768 } 4769 4770 int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, 4771 const struct ib_recv_wr **bad_wr) 4772 { 4773 return _mlx5_ib_post_recv(ibqp, wr, bad_wr, false); 4774 } 4775 4776 static inline enum ib_qp_state to_ib_qp_state(enum mlx5_qp_state mlx5_state) 4777 { 4778 switch (mlx5_state) { 4779 case MLX5_QP_STATE_RST: return IB_QPS_RESET; 4780 case MLX5_QP_STATE_INIT: return IB_QPS_INIT; 4781 case MLX5_QP_STATE_RTR: return IB_QPS_RTR; 4782 case MLX5_QP_STATE_RTS: return IB_QPS_RTS; 4783 case MLX5_QP_STATE_SQ_DRAINING: 4784 case MLX5_QP_STATE_SQD: return IB_QPS_SQD; 4785 case MLX5_QP_STATE_SQER: return IB_QPS_SQE; 4786 case MLX5_QP_STATE_ERR: return IB_QPS_ERR; 4787 default: return -1; 4788 } 4789 } 4790 4791 static inline enum ib_mig_state to_ib_mig_state(int mlx5_mig_state) 4792 { 4793 switch (mlx5_mig_state) { 4794 case MLX5_QP_PM_ARMED: return IB_MIG_ARMED; 4795 case MLX5_QP_PM_REARM: return IB_MIG_REARM; 4796 case MLX5_QP_PM_MIGRATED: return IB_MIG_MIGRATED; 4797 default: return -1; 4798 } 4799 } 4800 4801 static int to_ib_qp_access_flags(int mlx5_flags) 4802 { 4803 int ib_flags = 0; 4804 4805 if (mlx5_flags & MLX5_QP_BIT_RRE) 4806 ib_flags |= IB_ACCESS_REMOTE_READ; 4807 if (mlx5_flags & MLX5_QP_BIT_RWE) 4808 ib_flags |= IB_ACCESS_REMOTE_WRITE; 4809 if (mlx5_flags & MLX5_QP_BIT_RAE) 4810 ib_flags |= IB_ACCESS_REMOTE_ATOMIC; 4811 4812 return ib_flags; 4813 } 4814 4815 static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev, 4816 struct rdma_ah_attr *ah_attr, 4817 struct mlx5_qp_path *path) 4818 { 4819 4820 memset(ah_attr, 0, sizeof(*ah_attr)); 4821 4822 if (!path->port || path->port > ibdev->num_ports) 4823 return; 4824 4825 ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, path->port); 4826 4827 rdma_ah_set_port_num(ah_attr, path->port); 4828 rdma_ah_set_sl(ah_attr, path->dci_cfi_prio_sl & 0xf); 4829 4830 rdma_ah_set_dlid(ah_attr, be16_to_cpu(path->rlid)); 4831 rdma_ah_set_path_bits(ah_attr, path->grh_mlid & 0x7f); 4832 rdma_ah_set_static_rate(ah_attr, 4833 path->static_rate ? path->static_rate - 5 : 0); 4834 if (path->grh_mlid & (1 << 7)) { 4835 u32 tc_fl = be32_to_cpu(path->tclass_flowlabel); 4836 4837 rdma_ah_set_grh(ah_attr, NULL, 4838 tc_fl & 0xfffff, 4839 path->mgid_index, 4840 path->hop_limit, 4841 (tc_fl >> 20) & 0xff); 4842 rdma_ah_set_dgid_raw(ah_attr, path->rgid); 4843 } 4844 } 4845 4846 static int query_raw_packet_qp_sq_state(struct mlx5_ib_dev *dev, 4847 struct mlx5_ib_sq *sq, 4848 u8 *sq_state) 4849 { 4850 int err; 4851 4852 err = mlx5_core_query_sq_state(dev->mdev, sq->base.mqp.qpn, sq_state); 4853 if (err) 4854 goto out; 4855 sq->state = *sq_state; 4856 4857 out: 4858 return err; 4859 } 4860 4861 static int query_raw_packet_qp_rq_state(struct mlx5_ib_dev *dev, 4862 struct mlx5_ib_rq *rq, 4863 u8 *rq_state) 4864 { 4865 void *out; 4866 void *rqc; 4867 int inlen; 4868 int err; 4869 4870 inlen = MLX5_ST_SZ_BYTES(query_rq_out); 4871 out = kvzalloc(inlen, GFP_KERNEL); 4872 if (!out) 4873 return -ENOMEM; 4874 4875 err = mlx5_core_query_rq(dev->mdev, rq->base.mqp.qpn, out); 4876 if (err) 4877 goto out; 4878 4879 rqc = MLX5_ADDR_OF(query_rq_out, out, rq_context); 4880 *rq_state = MLX5_GET(rqc, rqc, state); 4881 rq->state = *rq_state; 4882 4883 out: 4884 kvfree(out); 4885 return err; 4886 } 4887 4888 static int sqrq_state_to_qp_state(u8 sq_state, u8 rq_state, 4889 struct mlx5_ib_qp *qp, u8 *qp_state) 4890 { 4891 static const u8 sqrq_trans[MLX5_RQ_NUM_STATE][MLX5_SQ_NUM_STATE] = { 4892 [MLX5_RQC_STATE_RST] = { 4893 [MLX5_SQC_STATE_RST] = IB_QPS_RESET, 4894 [MLX5_SQC_STATE_RDY] = MLX5_QP_STATE_BAD, 4895 [MLX5_SQC_STATE_ERR] = MLX5_QP_STATE_BAD, 4896 [MLX5_SQ_STATE_NA] = IB_QPS_RESET, 4897 }, 4898 [MLX5_RQC_STATE_RDY] = { 4899 [MLX5_SQC_STATE_RST] = MLX5_QP_STATE_BAD, 4900 [MLX5_SQC_STATE_RDY] = MLX5_QP_STATE, 4901 [MLX5_SQC_STATE_ERR] = IB_QPS_SQE, 4902 [MLX5_SQ_STATE_NA] = MLX5_QP_STATE, 4903 }, 4904 [MLX5_RQC_STATE_ERR] = { 4905 [MLX5_SQC_STATE_RST] = MLX5_QP_STATE_BAD, 4906 [MLX5_SQC_STATE_RDY] = MLX5_QP_STATE_BAD, 4907 [MLX5_SQC_STATE_ERR] = IB_QPS_ERR, 4908 [MLX5_SQ_STATE_NA] = IB_QPS_ERR, 4909 }, 4910 [MLX5_RQ_STATE_NA] = { 4911 [MLX5_SQC_STATE_RST] = IB_QPS_RESET, 4912 [MLX5_SQC_STATE_RDY] = MLX5_QP_STATE, 4913 [MLX5_SQC_STATE_ERR] = MLX5_QP_STATE, 4914 [MLX5_SQ_STATE_NA] = MLX5_QP_STATE_BAD, 4915 }, 4916 }; 4917 4918 *qp_state = sqrq_trans[rq_state][sq_state]; 4919 4920 if (*qp_state == MLX5_QP_STATE_BAD) { 4921 WARN(1, "Buggy Raw Packet QP state, SQ 0x%x state: 0x%x, RQ 0x%x state: 0x%x", 4922 qp->raw_packet_qp.sq.base.mqp.qpn, sq_state, 4923 qp->raw_packet_qp.rq.base.mqp.qpn, rq_state); 4924 return -EINVAL; 4925 } 4926 4927 if (*qp_state == MLX5_QP_STATE) 4928 *qp_state = qp->state; 4929 4930 return 0; 4931 } 4932 4933 static int query_raw_packet_qp_state(struct mlx5_ib_dev *dev, 4934 struct mlx5_ib_qp *qp, 4935 u8 *raw_packet_qp_state) 4936 { 4937 struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; 4938 struct mlx5_ib_sq *sq = &raw_packet_qp->sq; 4939 struct mlx5_ib_rq *rq = &raw_packet_qp->rq; 4940 int err; 4941 u8 sq_state = MLX5_SQ_STATE_NA; 4942 u8 rq_state = MLX5_RQ_STATE_NA; 4943 4944 if (qp->sq.wqe_cnt) { 4945 err = query_raw_packet_qp_sq_state(dev, sq, &sq_state); 4946 if (err) 4947 return err; 4948 } 4949 4950 if (qp->rq.wqe_cnt) { 4951 err = query_raw_packet_qp_rq_state(dev, rq, &rq_state); 4952 if (err) 4953 return err; 4954 } 4955 4956 return sqrq_state_to_qp_state(sq_state, rq_state, qp, 4957 raw_packet_qp_state); 4958 } 4959 4960 static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 4961 struct ib_qp_attr *qp_attr) 4962 { 4963 int outlen = MLX5_ST_SZ_BYTES(query_qp_out); 4964 struct mlx5_qp_context *context; 4965 int mlx5_state; 4966 u32 *outb; 4967 int err = 0; 4968 4969 outb = kzalloc(outlen, GFP_KERNEL); 4970 if (!outb) 4971 return -ENOMEM; 4972 4973 err = mlx5_core_qp_query(dev->mdev, &qp->trans_qp.base.mqp, outb, 4974 outlen); 4975 if (err) 4976 goto out; 4977 4978 /* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */ 4979 context = (struct mlx5_qp_context *)MLX5_ADDR_OF(query_qp_out, outb, qpc); 4980 4981 mlx5_state = be32_to_cpu(context->flags) >> 28; 4982 4983 qp->state = to_ib_qp_state(mlx5_state); 4984 qp_attr->path_mtu = context->mtu_msgmax >> 5; 4985 qp_attr->path_mig_state = 4986 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3); 4987 qp_attr->qkey = be32_to_cpu(context->qkey); 4988 qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff; 4989 qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff; 4990 qp_attr->dest_qp_num = be32_to_cpu(context->log_pg_sz_remote_qpn) & 0xffffff; 4991 qp_attr->qp_access_flags = 4992 to_ib_qp_access_flags(be32_to_cpu(context->params2)); 4993 4994 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) { 4995 to_rdma_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); 4996 to_rdma_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path); 4997 qp_attr->alt_pkey_index = 4998 be16_to_cpu(context->alt_path.pkey_index); 4999 qp_attr->alt_port_num = 5000 rdma_ah_get_port_num(&qp_attr->alt_ah_attr); 5001 } 5002 5003 qp_attr->pkey_index = be16_to_cpu(context->pri_path.pkey_index); 5004 qp_attr->port_num = context->pri_path.port; 5005 5006 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ 5007 qp_attr->sq_draining = mlx5_state == MLX5_QP_STATE_SQ_DRAINING; 5008 5009 qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7); 5010 5011 qp_attr->max_dest_rd_atomic = 5012 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7); 5013 qp_attr->min_rnr_timer = 5014 (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f; 5015 qp_attr->timeout = context->pri_path.ackto_lt >> 3; 5016 qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7; 5017 qp_attr->rnr_retry = (be32_to_cpu(context->params1) >> 13) & 0x7; 5018 qp_attr->alt_timeout = context->alt_path.ackto_lt >> 3; 5019 5020 out: 5021 kfree(outb); 5022 return err; 5023 } 5024 5025 static int mlx5_ib_dct_query_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *mqp, 5026 struct ib_qp_attr *qp_attr, int qp_attr_mask, 5027 struct ib_qp_init_attr *qp_init_attr) 5028 { 5029 struct mlx5_core_dct *dct = &mqp->dct.mdct; 5030 u32 *out; 5031 u32 access_flags = 0; 5032 int outlen = MLX5_ST_SZ_BYTES(query_dct_out); 5033 void *dctc; 5034 int err; 5035 int supported_mask = IB_QP_STATE | 5036 IB_QP_ACCESS_FLAGS | 5037 IB_QP_PORT | 5038 IB_QP_MIN_RNR_TIMER | 5039 IB_QP_AV | 5040 IB_QP_PATH_MTU | 5041 IB_QP_PKEY_INDEX; 5042 5043 if (qp_attr_mask & ~supported_mask) 5044 return -EINVAL; 5045 if (mqp->state != IB_QPS_RTR) 5046 return -EINVAL; 5047 5048 out = kzalloc(outlen, GFP_KERNEL); 5049 if (!out) 5050 return -ENOMEM; 5051 5052 err = mlx5_core_dct_query(dev->mdev, dct, out, outlen); 5053 if (err) 5054 goto out; 5055 5056 dctc = MLX5_ADDR_OF(query_dct_out, out, dct_context_entry); 5057 5058 if (qp_attr_mask & IB_QP_STATE) 5059 qp_attr->qp_state = IB_QPS_RTR; 5060 5061 if (qp_attr_mask & IB_QP_ACCESS_FLAGS) { 5062 if (MLX5_GET(dctc, dctc, rre)) 5063 access_flags |= IB_ACCESS_REMOTE_READ; 5064 if (MLX5_GET(dctc, dctc, rwe)) 5065 access_flags |= IB_ACCESS_REMOTE_WRITE; 5066 if (MLX5_GET(dctc, dctc, rae)) 5067 access_flags |= IB_ACCESS_REMOTE_ATOMIC; 5068 qp_attr->qp_access_flags = access_flags; 5069 } 5070 5071 if (qp_attr_mask & IB_QP_PORT) 5072 qp_attr->port_num = MLX5_GET(dctc, dctc, port); 5073 if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) 5074 qp_attr->min_rnr_timer = MLX5_GET(dctc, dctc, min_rnr_nak); 5075 if (qp_attr_mask & IB_QP_AV) { 5076 qp_attr->ah_attr.grh.traffic_class = MLX5_GET(dctc, dctc, tclass); 5077 qp_attr->ah_attr.grh.flow_label = MLX5_GET(dctc, dctc, flow_label); 5078 qp_attr->ah_attr.grh.sgid_index = MLX5_GET(dctc, dctc, my_addr_index); 5079 qp_attr->ah_attr.grh.hop_limit = MLX5_GET(dctc, dctc, hop_limit); 5080 } 5081 if (qp_attr_mask & IB_QP_PATH_MTU) 5082 qp_attr->path_mtu = MLX5_GET(dctc, dctc, mtu); 5083 if (qp_attr_mask & IB_QP_PKEY_INDEX) 5084 qp_attr->pkey_index = MLX5_GET(dctc, dctc, pkey_index); 5085 out: 5086 kfree(out); 5087 return err; 5088 } 5089 5090 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, 5091 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) 5092 { 5093 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 5094 struct mlx5_ib_qp *qp = to_mqp(ibqp); 5095 int err = 0; 5096 u8 raw_packet_qp_state; 5097 5098 if (ibqp->rwq_ind_tbl) 5099 return -ENOSYS; 5100 5101 if (unlikely(ibqp->qp_type == IB_QPT_GSI)) 5102 return mlx5_ib_gsi_query_qp(ibqp, qp_attr, qp_attr_mask, 5103 qp_init_attr); 5104 5105 /* Not all of output fields are applicable, make sure to zero them */ 5106 memset(qp_init_attr, 0, sizeof(*qp_init_attr)); 5107 memset(qp_attr, 0, sizeof(*qp_attr)); 5108 5109 if (unlikely(qp->qp_sub_type == MLX5_IB_QPT_DCT)) 5110 return mlx5_ib_dct_query_qp(dev, qp, qp_attr, 5111 qp_attr_mask, qp_init_attr); 5112 5113 mutex_lock(&qp->mutex); 5114 5115 if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET || 5116 qp->flags & MLX5_IB_QP_UNDERLAY) { 5117 err = query_raw_packet_qp_state(dev, qp, &raw_packet_qp_state); 5118 if (err) 5119 goto out; 5120 qp->state = raw_packet_qp_state; 5121 qp_attr->port_num = 1; 5122 } else { 5123 err = query_qp_attr(dev, qp, qp_attr); 5124 if (err) 5125 goto out; 5126 } 5127 5128 qp_attr->qp_state = qp->state; 5129 qp_attr->cur_qp_state = qp_attr->qp_state; 5130 qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt; 5131 qp_attr->cap.max_recv_sge = qp->rq.max_gs; 5132 5133 if (!ibqp->uobject) { 5134 qp_attr->cap.max_send_wr = qp->sq.max_post; 5135 qp_attr->cap.max_send_sge = qp->sq.max_gs; 5136 qp_init_attr->qp_context = ibqp->qp_context; 5137 } else { 5138 qp_attr->cap.max_send_wr = 0; 5139 qp_attr->cap.max_send_sge = 0; 5140 } 5141 5142 qp_init_attr->qp_type = ibqp->qp_type; 5143 qp_init_attr->recv_cq = ibqp->recv_cq; 5144 qp_init_attr->send_cq = ibqp->send_cq; 5145 qp_init_attr->srq = ibqp->srq; 5146 qp_attr->cap.max_inline_data = qp->max_inline_data; 5147 5148 qp_init_attr->cap = qp_attr->cap; 5149 5150 qp_init_attr->create_flags = 0; 5151 if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK) 5152 qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK; 5153 5154 if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL) 5155 qp_init_attr->create_flags |= IB_QP_CREATE_CROSS_CHANNEL; 5156 if (qp->flags & MLX5_IB_QP_MANAGED_SEND) 5157 qp_init_attr->create_flags |= IB_QP_CREATE_MANAGED_SEND; 5158 if (qp->flags & MLX5_IB_QP_MANAGED_RECV) 5159 qp_init_attr->create_flags |= IB_QP_CREATE_MANAGED_RECV; 5160 if (qp->flags & MLX5_IB_QP_SQPN_QP1) 5161 qp_init_attr->create_flags |= mlx5_ib_create_qp_sqpn_qp1(); 5162 5163 qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ? 5164 IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; 5165 5166 out: 5167 mutex_unlock(&qp->mutex); 5168 return err; 5169 } 5170 5171 struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev, 5172 struct ib_ucontext *context, 5173 struct ib_udata *udata) 5174 { 5175 struct mlx5_ib_dev *dev = to_mdev(ibdev); 5176 struct mlx5_ib_xrcd *xrcd; 5177 int err; 5178 5179 if (!MLX5_CAP_GEN(dev->mdev, xrc)) 5180 return ERR_PTR(-ENOSYS); 5181 5182 xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL); 5183 if (!xrcd) 5184 return ERR_PTR(-ENOMEM); 5185 5186 err = mlx5_core_xrcd_alloc(dev->mdev, &xrcd->xrcdn); 5187 if (err) { 5188 kfree(xrcd); 5189 return ERR_PTR(-ENOMEM); 5190 } 5191 5192 return &xrcd->ibxrcd; 5193 } 5194 5195 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd) 5196 { 5197 struct mlx5_ib_dev *dev = to_mdev(xrcd->device); 5198 u32 xrcdn = to_mxrcd(xrcd)->xrcdn; 5199 int err; 5200 5201 err = mlx5_core_xrcd_dealloc(dev->mdev, xrcdn); 5202 if (err) 5203 mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn); 5204 5205 kfree(xrcd); 5206 return 0; 5207 } 5208 5209 static void mlx5_ib_wq_event(struct mlx5_core_qp *core_qp, int type) 5210 { 5211 struct mlx5_ib_rwq *rwq = to_mibrwq(core_qp); 5212 struct mlx5_ib_dev *dev = to_mdev(rwq->ibwq.device); 5213 struct ib_event event; 5214 5215 if (rwq->ibwq.event_handler) { 5216 event.device = rwq->ibwq.device; 5217 event.element.wq = &rwq->ibwq; 5218 switch (type) { 5219 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: 5220 event.event = IB_EVENT_WQ_FATAL; 5221 break; 5222 default: 5223 mlx5_ib_warn(dev, "Unexpected event type %d on WQ %06x\n", type, core_qp->qpn); 5224 return; 5225 } 5226 5227 rwq->ibwq.event_handler(&event, rwq->ibwq.wq_context); 5228 } 5229 } 5230 5231 static int set_delay_drop(struct mlx5_ib_dev *dev) 5232 { 5233 int err = 0; 5234 5235 mutex_lock(&dev->delay_drop.lock); 5236 if (dev->delay_drop.activate) 5237 goto out; 5238 5239 err = mlx5_core_set_delay_drop(dev->mdev, dev->delay_drop.timeout); 5240 if (err) 5241 goto out; 5242 5243 dev->delay_drop.activate = true; 5244 out: 5245 mutex_unlock(&dev->delay_drop.lock); 5246 5247 if (!err) 5248 atomic_inc(&dev->delay_drop.rqs_cnt); 5249 return err; 5250 } 5251 5252 static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd, 5253 struct ib_wq_init_attr *init_attr) 5254 { 5255 struct mlx5_ib_dev *dev; 5256 int has_net_offloads; 5257 __be64 *rq_pas0; 5258 void *in; 5259 void *rqc; 5260 void *wq; 5261 int inlen; 5262 int err; 5263 5264 dev = to_mdev(pd->device); 5265 5266 inlen = MLX5_ST_SZ_BYTES(create_rq_in) + sizeof(u64) * rwq->rq_num_pas; 5267 in = kvzalloc(inlen, GFP_KERNEL); 5268 if (!in) 5269 return -ENOMEM; 5270 5271 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx); 5272 MLX5_SET(rqc, rqc, mem_rq_type, 5273 MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE); 5274 MLX5_SET(rqc, rqc, user_index, rwq->user_index); 5275 MLX5_SET(rqc, rqc, cqn, to_mcq(init_attr->cq)->mcq.cqn); 5276 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); 5277 MLX5_SET(rqc, rqc, flush_in_error_en, 1); 5278 wq = MLX5_ADDR_OF(rqc, rqc, wq); 5279 MLX5_SET(wq, wq, wq_type, 5280 rwq->create_flags & MLX5_IB_WQ_FLAGS_STRIDING_RQ ? 5281 MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ : MLX5_WQ_TYPE_CYCLIC); 5282 if (init_attr->create_flags & IB_WQ_FLAGS_PCI_WRITE_END_PADDING) { 5283 if (!MLX5_CAP_GEN(dev->mdev, end_pad)) { 5284 mlx5_ib_dbg(dev, "Scatter end padding is not supported\n"); 5285 err = -EOPNOTSUPP; 5286 goto out; 5287 } else { 5288 MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); 5289 } 5290 } 5291 MLX5_SET(wq, wq, log_wq_stride, rwq->log_rq_stride); 5292 if (rwq->create_flags & MLX5_IB_WQ_FLAGS_STRIDING_RQ) { 5293 MLX5_SET(wq, wq, two_byte_shift_en, rwq->two_byte_shift_en); 5294 MLX5_SET(wq, wq, log_wqe_stride_size, 5295 rwq->single_stride_log_num_of_bytes - 5296 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES); 5297 MLX5_SET(wq, wq, log_wqe_num_of_strides, rwq->log_num_strides - 5298 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES); 5299 } 5300 MLX5_SET(wq, wq, log_wq_sz, rwq->log_rq_size); 5301 MLX5_SET(wq, wq, pd, to_mpd(pd)->pdn); 5302 MLX5_SET(wq, wq, page_offset, rwq->rq_page_offset); 5303 MLX5_SET(wq, wq, log_wq_pg_sz, rwq->log_page_size); 5304 MLX5_SET(wq, wq, wq_signature, rwq->wq_sig); 5305 MLX5_SET64(wq, wq, dbr_addr, rwq->db.dma); 5306 has_net_offloads = MLX5_CAP_GEN(dev->mdev, eth_net_offloads); 5307 if (init_attr->create_flags & IB_WQ_FLAGS_CVLAN_STRIPPING) { 5308 if (!(has_net_offloads && MLX5_CAP_ETH(dev->mdev, vlan_cap))) { 5309 mlx5_ib_dbg(dev, "VLAN offloads are not supported\n"); 5310 err = -EOPNOTSUPP; 5311 goto out; 5312 } 5313 } else { 5314 MLX5_SET(rqc, rqc, vsd, 1); 5315 } 5316 if (init_attr->create_flags & IB_WQ_FLAGS_SCATTER_FCS) { 5317 if (!(has_net_offloads && MLX5_CAP_ETH(dev->mdev, scatter_fcs))) { 5318 mlx5_ib_dbg(dev, "Scatter FCS is not supported\n"); 5319 err = -EOPNOTSUPP; 5320 goto out; 5321 } 5322 MLX5_SET(rqc, rqc, scatter_fcs, 1); 5323 } 5324 if (init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP) { 5325 if (!(dev->ib_dev.attrs.raw_packet_caps & 5326 IB_RAW_PACKET_CAP_DELAY_DROP)) { 5327 mlx5_ib_dbg(dev, "Delay drop is not supported\n"); 5328 err = -EOPNOTSUPP; 5329 goto out; 5330 } 5331 MLX5_SET(rqc, rqc, delay_drop_en, 1); 5332 } 5333 rq_pas0 = (__be64 *)MLX5_ADDR_OF(wq, wq, pas); 5334 mlx5_ib_populate_pas(dev, rwq->umem, rwq->page_shift, rq_pas0, 0); 5335 err = mlx5_core_create_rq_tracked(dev->mdev, in, inlen, &rwq->core_qp); 5336 if (!err && init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP) { 5337 err = set_delay_drop(dev); 5338 if (err) { 5339 mlx5_ib_warn(dev, "Failed to enable delay drop err=%d\n", 5340 err); 5341 mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp); 5342 } else { 5343 rwq->create_flags |= MLX5_IB_WQ_FLAGS_DELAY_DROP; 5344 } 5345 } 5346 out: 5347 kvfree(in); 5348 return err; 5349 } 5350 5351 static int set_user_rq_size(struct mlx5_ib_dev *dev, 5352 struct ib_wq_init_attr *wq_init_attr, 5353 struct mlx5_ib_create_wq *ucmd, 5354 struct mlx5_ib_rwq *rwq) 5355 { 5356 /* Sanity check RQ size before proceeding */ 5357 if (wq_init_attr->max_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_wq_sz))) 5358 return -EINVAL; 5359 5360 if (!ucmd->rq_wqe_count) 5361 return -EINVAL; 5362 5363 rwq->wqe_count = ucmd->rq_wqe_count; 5364 rwq->wqe_shift = ucmd->rq_wqe_shift; 5365 if (check_shl_overflow(rwq->wqe_count, rwq->wqe_shift, &rwq->buf_size)) 5366 return -EINVAL; 5367 5368 rwq->log_rq_stride = rwq->wqe_shift; 5369 rwq->log_rq_size = ilog2(rwq->wqe_count); 5370 return 0; 5371 } 5372 5373 static int prepare_user_rq(struct ib_pd *pd, 5374 struct ib_wq_init_attr *init_attr, 5375 struct ib_udata *udata, 5376 struct mlx5_ib_rwq *rwq) 5377 { 5378 struct mlx5_ib_dev *dev = to_mdev(pd->device); 5379 struct mlx5_ib_create_wq ucmd = {}; 5380 int err; 5381 size_t required_cmd_sz; 5382 5383 required_cmd_sz = offsetof(typeof(ucmd), single_stride_log_num_of_bytes) 5384 + sizeof(ucmd.single_stride_log_num_of_bytes); 5385 if (udata->inlen < required_cmd_sz) { 5386 mlx5_ib_dbg(dev, "invalid inlen\n"); 5387 return -EINVAL; 5388 } 5389 5390 if (udata->inlen > sizeof(ucmd) && 5391 !ib_is_udata_cleared(udata, sizeof(ucmd), 5392 udata->inlen - sizeof(ucmd))) { 5393 mlx5_ib_dbg(dev, "inlen is not supported\n"); 5394 return -EOPNOTSUPP; 5395 } 5396 5397 if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) { 5398 mlx5_ib_dbg(dev, "copy failed\n"); 5399 return -EFAULT; 5400 } 5401 5402 if (ucmd.comp_mask & (~MLX5_IB_CREATE_WQ_STRIDING_RQ)) { 5403 mlx5_ib_dbg(dev, "invalid comp mask\n"); 5404 return -EOPNOTSUPP; 5405 } else if (ucmd.comp_mask & MLX5_IB_CREATE_WQ_STRIDING_RQ) { 5406 if (!MLX5_CAP_GEN(dev->mdev, striding_rq)) { 5407 mlx5_ib_dbg(dev, "Striding RQ is not supported\n"); 5408 return -EOPNOTSUPP; 5409 } 5410 if ((ucmd.single_stride_log_num_of_bytes < 5411 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES) || 5412 (ucmd.single_stride_log_num_of_bytes > 5413 MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES)) { 5414 mlx5_ib_dbg(dev, "Invalid log stride size (%u. Range is %u - %u)\n", 5415 ucmd.single_stride_log_num_of_bytes, 5416 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES, 5417 MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES); 5418 return -EINVAL; 5419 } 5420 if ((ucmd.single_wqe_log_num_of_strides > 5421 MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES) || 5422 (ucmd.single_wqe_log_num_of_strides < 5423 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES)) { 5424 mlx5_ib_dbg(dev, "Invalid log num strides (%u. Range is %u - %u)\n", 5425 ucmd.single_wqe_log_num_of_strides, 5426 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES, 5427 MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES); 5428 return -EINVAL; 5429 } 5430 rwq->single_stride_log_num_of_bytes = 5431 ucmd.single_stride_log_num_of_bytes; 5432 rwq->log_num_strides = ucmd.single_wqe_log_num_of_strides; 5433 rwq->two_byte_shift_en = !!ucmd.two_byte_shift_en; 5434 rwq->create_flags |= MLX5_IB_WQ_FLAGS_STRIDING_RQ; 5435 } 5436 5437 err = set_user_rq_size(dev, init_attr, &ucmd, rwq); 5438 if (err) { 5439 mlx5_ib_dbg(dev, "err %d\n", err); 5440 return err; 5441 } 5442 5443 err = create_user_rq(dev, pd, rwq, &ucmd); 5444 if (err) { 5445 mlx5_ib_dbg(dev, "err %d\n", err); 5446 if (err) 5447 return err; 5448 } 5449 5450 rwq->user_index = ucmd.user_index; 5451 return 0; 5452 } 5453 5454 struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd, 5455 struct ib_wq_init_attr *init_attr, 5456 struct ib_udata *udata) 5457 { 5458 struct mlx5_ib_dev *dev; 5459 struct mlx5_ib_rwq *rwq; 5460 struct mlx5_ib_create_wq_resp resp = {}; 5461 size_t min_resp_len; 5462 int err; 5463 5464 if (!udata) 5465 return ERR_PTR(-ENOSYS); 5466 5467 min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved); 5468 if (udata->outlen && udata->outlen < min_resp_len) 5469 return ERR_PTR(-EINVAL); 5470 5471 dev = to_mdev(pd->device); 5472 switch (init_attr->wq_type) { 5473 case IB_WQT_RQ: 5474 rwq = kzalloc(sizeof(*rwq), GFP_KERNEL); 5475 if (!rwq) 5476 return ERR_PTR(-ENOMEM); 5477 err = prepare_user_rq(pd, init_attr, udata, rwq); 5478 if (err) 5479 goto err; 5480 err = create_rq(rwq, pd, init_attr); 5481 if (err) 5482 goto err_user_rq; 5483 break; 5484 default: 5485 mlx5_ib_dbg(dev, "unsupported wq type %d\n", 5486 init_attr->wq_type); 5487 return ERR_PTR(-EINVAL); 5488 } 5489 5490 rwq->ibwq.wq_num = rwq->core_qp.qpn; 5491 rwq->ibwq.state = IB_WQS_RESET; 5492 if (udata->outlen) { 5493 resp.response_length = offsetof(typeof(resp), response_length) + 5494 sizeof(resp.response_length); 5495 err = ib_copy_to_udata(udata, &resp, resp.response_length); 5496 if (err) 5497 goto err_copy; 5498 } 5499 5500 rwq->core_qp.event = mlx5_ib_wq_event; 5501 rwq->ibwq.event_handler = init_attr->event_handler; 5502 return &rwq->ibwq; 5503 5504 err_copy: 5505 mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp); 5506 err_user_rq: 5507 destroy_user_rq(dev, pd, rwq); 5508 err: 5509 kfree(rwq); 5510 return ERR_PTR(err); 5511 } 5512 5513 int mlx5_ib_destroy_wq(struct ib_wq *wq) 5514 { 5515 struct mlx5_ib_dev *dev = to_mdev(wq->device); 5516 struct mlx5_ib_rwq *rwq = to_mrwq(wq); 5517 5518 mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp); 5519 destroy_user_rq(dev, wq->pd, rwq); 5520 kfree(rwq); 5521 5522 return 0; 5523 } 5524 5525 struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device, 5526 struct ib_rwq_ind_table_init_attr *init_attr, 5527 struct ib_udata *udata) 5528 { 5529 struct mlx5_ib_dev *dev = to_mdev(device); 5530 struct mlx5_ib_rwq_ind_table *rwq_ind_tbl; 5531 int sz = 1 << init_attr->log_ind_tbl_size; 5532 struct mlx5_ib_create_rwq_ind_tbl_resp resp = {}; 5533 size_t min_resp_len; 5534 int inlen; 5535 int err; 5536 int i; 5537 u32 *in; 5538 void *rqtc; 5539 5540 if (udata->inlen > 0 && 5541 !ib_is_udata_cleared(udata, 0, 5542 udata->inlen)) 5543 return ERR_PTR(-EOPNOTSUPP); 5544 5545 if (init_attr->log_ind_tbl_size > 5546 MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)) { 5547 mlx5_ib_dbg(dev, "log_ind_tbl_size = %d is bigger than supported = %d\n", 5548 init_attr->log_ind_tbl_size, 5549 MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)); 5550 return ERR_PTR(-EINVAL); 5551 } 5552 5553 min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved); 5554 if (udata->outlen && udata->outlen < min_resp_len) 5555 return ERR_PTR(-EINVAL); 5556 5557 rwq_ind_tbl = kzalloc(sizeof(*rwq_ind_tbl), GFP_KERNEL); 5558 if (!rwq_ind_tbl) 5559 return ERR_PTR(-ENOMEM); 5560 5561 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; 5562 in = kvzalloc(inlen, GFP_KERNEL); 5563 if (!in) { 5564 err = -ENOMEM; 5565 goto err; 5566 } 5567 5568 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context); 5569 5570 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); 5571 MLX5_SET(rqtc, rqtc, rqt_max_size, sz); 5572 5573 for (i = 0; i < sz; i++) 5574 MLX5_SET(rqtc, rqtc, rq_num[i], init_attr->ind_tbl[i]->wq_num); 5575 5576 err = mlx5_core_create_rqt(dev->mdev, in, inlen, &rwq_ind_tbl->rqtn); 5577 kvfree(in); 5578 5579 if (err) 5580 goto err; 5581 5582 rwq_ind_tbl->ib_rwq_ind_tbl.ind_tbl_num = rwq_ind_tbl->rqtn; 5583 if (udata->outlen) { 5584 resp.response_length = offsetof(typeof(resp), response_length) + 5585 sizeof(resp.response_length); 5586 err = ib_copy_to_udata(udata, &resp, resp.response_length); 5587 if (err) 5588 goto err_copy; 5589 } 5590 5591 return &rwq_ind_tbl->ib_rwq_ind_tbl; 5592 5593 err_copy: 5594 mlx5_core_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn); 5595 err: 5596 kfree(rwq_ind_tbl); 5597 return ERR_PTR(err); 5598 } 5599 5600 int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl) 5601 { 5602 struct mlx5_ib_rwq_ind_table *rwq_ind_tbl = to_mrwq_ind_table(ib_rwq_ind_tbl); 5603 struct mlx5_ib_dev *dev = to_mdev(ib_rwq_ind_tbl->device); 5604 5605 mlx5_core_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn); 5606 5607 kfree(rwq_ind_tbl); 5608 return 0; 5609 } 5610 5611 int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, 5612 u32 wq_attr_mask, struct ib_udata *udata) 5613 { 5614 struct mlx5_ib_dev *dev = to_mdev(wq->device); 5615 struct mlx5_ib_rwq *rwq = to_mrwq(wq); 5616 struct mlx5_ib_modify_wq ucmd = {}; 5617 size_t required_cmd_sz; 5618 int curr_wq_state; 5619 int wq_state; 5620 int inlen; 5621 int err; 5622 void *rqc; 5623 void *in; 5624 5625 required_cmd_sz = offsetof(typeof(ucmd), reserved) + sizeof(ucmd.reserved); 5626 if (udata->inlen < required_cmd_sz) 5627 return -EINVAL; 5628 5629 if (udata->inlen > sizeof(ucmd) && 5630 !ib_is_udata_cleared(udata, sizeof(ucmd), 5631 udata->inlen - sizeof(ucmd))) 5632 return -EOPNOTSUPP; 5633 5634 if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) 5635 return -EFAULT; 5636 5637 if (ucmd.comp_mask || ucmd.reserved) 5638 return -EOPNOTSUPP; 5639 5640 inlen = MLX5_ST_SZ_BYTES(modify_rq_in); 5641 in = kvzalloc(inlen, GFP_KERNEL); 5642 if (!in) 5643 return -ENOMEM; 5644 5645 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); 5646 5647 curr_wq_state = (wq_attr_mask & IB_WQ_CUR_STATE) ? 5648 wq_attr->curr_wq_state : wq->state; 5649 wq_state = (wq_attr_mask & IB_WQ_STATE) ? 5650 wq_attr->wq_state : curr_wq_state; 5651 if (curr_wq_state == IB_WQS_ERR) 5652 curr_wq_state = MLX5_RQC_STATE_ERR; 5653 if (wq_state == IB_WQS_ERR) 5654 wq_state = MLX5_RQC_STATE_ERR; 5655 MLX5_SET(modify_rq_in, in, rq_state, curr_wq_state); 5656 MLX5_SET(rqc, rqc, state, wq_state); 5657 5658 if (wq_attr_mask & IB_WQ_FLAGS) { 5659 if (wq_attr->flags_mask & IB_WQ_FLAGS_CVLAN_STRIPPING) { 5660 if (!(MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && 5661 MLX5_CAP_ETH(dev->mdev, vlan_cap))) { 5662 mlx5_ib_dbg(dev, "VLAN offloads are not " 5663 "supported\n"); 5664 err = -EOPNOTSUPP; 5665 goto out; 5666 } 5667 MLX5_SET64(modify_rq_in, in, modify_bitmask, 5668 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD); 5669 MLX5_SET(rqc, rqc, vsd, 5670 (wq_attr->flags & IB_WQ_FLAGS_CVLAN_STRIPPING) ? 0 : 1); 5671 } 5672 5673 if (wq_attr->flags_mask & IB_WQ_FLAGS_PCI_WRITE_END_PADDING) { 5674 mlx5_ib_dbg(dev, "Modifying scatter end padding is not supported\n"); 5675 err = -EOPNOTSUPP; 5676 goto out; 5677 } 5678 } 5679 5680 if (curr_wq_state == IB_WQS_RESET && wq_state == IB_WQS_RDY) { 5681 if (MLX5_CAP_GEN(dev->mdev, modify_rq_counter_set_id)) { 5682 MLX5_SET64(modify_rq_in, in, modify_bitmask, 5683 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID); 5684 MLX5_SET(rqc, rqc, counter_set_id, 5685 dev->port->cnts.set_id); 5686 } else 5687 pr_info_once("%s: Receive WQ counters are not supported on current FW\n", 5688 dev->ib_dev.name); 5689 } 5690 5691 err = mlx5_core_modify_rq(dev->mdev, rwq->core_qp.qpn, in, inlen); 5692 if (!err) 5693 rwq->ibwq.state = (wq_state == MLX5_RQC_STATE_ERR) ? IB_WQS_ERR : wq_state; 5694 5695 out: 5696 kvfree(in); 5697 return err; 5698 } 5699 5700 struct mlx5_ib_drain_cqe { 5701 struct ib_cqe cqe; 5702 struct completion done; 5703 }; 5704 5705 static void mlx5_ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc) 5706 { 5707 struct mlx5_ib_drain_cqe *cqe = container_of(wc->wr_cqe, 5708 struct mlx5_ib_drain_cqe, 5709 cqe); 5710 5711 complete(&cqe->done); 5712 } 5713 5714 /* This function returns only once the drained WR was completed */ 5715 static void handle_drain_completion(struct ib_cq *cq, 5716 struct mlx5_ib_drain_cqe *sdrain, 5717 struct mlx5_ib_dev *dev) 5718 { 5719 struct mlx5_core_dev *mdev = dev->mdev; 5720 5721 if (cq->poll_ctx == IB_POLL_DIRECT) { 5722 while (wait_for_completion_timeout(&sdrain->done, HZ / 10) <= 0) 5723 ib_process_cq_direct(cq, -1); 5724 return; 5725 } 5726 5727 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 5728 struct mlx5_ib_cq *mcq = to_mcq(cq); 5729 bool triggered = false; 5730 unsigned long flags; 5731 5732 spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); 5733 /* Make sure that the CQ handler won't run if wasn't run yet */ 5734 if (!mcq->mcq.reset_notify_added) 5735 mcq->mcq.reset_notify_added = 1; 5736 else 5737 triggered = true; 5738 spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); 5739 5740 if (triggered) { 5741 /* Wait for any scheduled/running task to be ended */ 5742 switch (cq->poll_ctx) { 5743 case IB_POLL_SOFTIRQ: 5744 irq_poll_disable(&cq->iop); 5745 irq_poll_enable(&cq->iop); 5746 break; 5747 case IB_POLL_WORKQUEUE: 5748 cancel_work_sync(&cq->work); 5749 break; 5750 default: 5751 WARN_ON_ONCE(1); 5752 } 5753 } 5754 5755 /* Run the CQ handler - this makes sure that the drain WR will 5756 * be processed if wasn't processed yet. 5757 */ 5758 mcq->mcq.comp(&mcq->mcq); 5759 } 5760 5761 wait_for_completion(&sdrain->done); 5762 } 5763 5764 void mlx5_ib_drain_sq(struct ib_qp *qp) 5765 { 5766 struct ib_cq *cq = qp->send_cq; 5767 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; 5768 struct mlx5_ib_drain_cqe sdrain; 5769 const struct ib_send_wr *bad_swr; 5770 struct ib_rdma_wr swr = { 5771 .wr = { 5772 .next = NULL, 5773 { .wr_cqe = &sdrain.cqe, }, 5774 .opcode = IB_WR_RDMA_WRITE, 5775 }, 5776 }; 5777 int ret; 5778 struct mlx5_ib_dev *dev = to_mdev(qp->device); 5779 struct mlx5_core_dev *mdev = dev->mdev; 5780 5781 ret = ib_modify_qp(qp, &attr, IB_QP_STATE); 5782 if (ret && mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) { 5783 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret); 5784 return; 5785 } 5786 5787 sdrain.cqe.done = mlx5_ib_drain_qp_done; 5788 init_completion(&sdrain.done); 5789 5790 ret = _mlx5_ib_post_send(qp, &swr.wr, &bad_swr, true); 5791 if (ret) { 5792 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret); 5793 return; 5794 } 5795 5796 handle_drain_completion(cq, &sdrain, dev); 5797 } 5798 5799 void mlx5_ib_drain_rq(struct ib_qp *qp) 5800 { 5801 struct ib_cq *cq = qp->recv_cq; 5802 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; 5803 struct mlx5_ib_drain_cqe rdrain; 5804 struct ib_recv_wr rwr = {}; 5805 const struct ib_recv_wr *bad_rwr; 5806 int ret; 5807 struct mlx5_ib_dev *dev = to_mdev(qp->device); 5808 struct mlx5_core_dev *mdev = dev->mdev; 5809 5810 ret = ib_modify_qp(qp, &attr, IB_QP_STATE); 5811 if (ret && mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) { 5812 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret); 5813 return; 5814 } 5815 5816 rwr.wr_cqe = &rdrain.cqe; 5817 rdrain.cqe.done = mlx5_ib_drain_qp_done; 5818 init_completion(&rdrain.done); 5819 5820 ret = _mlx5_ib_post_recv(qp, &rwr, &bad_rwr, true); 5821 if (ret) { 5822 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret); 5823 return; 5824 } 5825 5826 handle_drain_completion(cq, &rdrain, dev); 5827 } 5828