qp.c (39a1142dbba04d2e08259bd10a369465c932126b) qp.c (f031396531fe2b1a6ffb4fa5eceb9c1fa276869a)
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:

--- 44 unchanged lines hidden (view full) ---

53
54enum {
55 MLX5_IB_SQ_STRIDE = 6,
56 MLX5_IB_CACHE_LINE_SIZE = 64,
57};
58
59static const u32 mlx5_ib_opcode[] = {
60 [IB_WR_SEND] = MLX5_OPCODE_SEND,
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:

--- 44 unchanged lines hidden (view full) ---

53
54enum {
55 MLX5_IB_SQ_STRIDE = 6,
56 MLX5_IB_CACHE_LINE_SIZE = 64,
57};
58
59static const u32 mlx5_ib_opcode[] = {
60 [IB_WR_SEND] = MLX5_OPCODE_SEND,
61 [IB_WR_LSO] = MLX5_OPCODE_LSO,
61 [IB_WR_SEND_WITH_IMM] = MLX5_OPCODE_SEND_IMM,
62 [IB_WR_RDMA_WRITE] = MLX5_OPCODE_RDMA_WRITE,
63 [IB_WR_RDMA_WRITE_WITH_IMM] = MLX5_OPCODE_RDMA_WRITE_IMM,
64 [IB_WR_RDMA_READ] = MLX5_OPCODE_RDMA_READ,
65 [IB_WR_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_CS,
66 [IB_WR_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_FA,
67 [IB_WR_SEND_WITH_INV] = MLX5_OPCODE_SEND_INVAL,
68 [IB_WR_LOCAL_INV] = MLX5_OPCODE_UMR,
69 [IB_WR_REG_MR] = MLX5_OPCODE_UMR,
70 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_MASKED_CS,
71 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_MASKED_FA,
72 [MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR,
73};
74
62 [IB_WR_SEND_WITH_IMM] = MLX5_OPCODE_SEND_IMM,
63 [IB_WR_RDMA_WRITE] = MLX5_OPCODE_RDMA_WRITE,
64 [IB_WR_RDMA_WRITE_WITH_IMM] = MLX5_OPCODE_RDMA_WRITE_IMM,
65 [IB_WR_RDMA_READ] = MLX5_OPCODE_RDMA_READ,
66 [IB_WR_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_CS,
67 [IB_WR_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_FA,
68 [IB_WR_SEND_WITH_INV] = MLX5_OPCODE_SEND_INVAL,
69 [IB_WR_LOCAL_INV] = MLX5_OPCODE_UMR,
70 [IB_WR_REG_MR] = MLX5_OPCODE_UMR,
71 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_MASKED_CS,
72 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_MASKED_FA,
73 [MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR,
74};
75
76struct mlx5_wqe_eth_pad {
77 u8 rsvd0[16];
78};
75
76static int is_qp0(enum ib_qp_type qp_type)
77{
78 return qp_type == IB_QPT_SMI;
79}
80
81static int is_sqp(enum ib_qp_type qp_type)
82{

--- 172 unchanged lines hidden (view full) ---

255 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
256 qp->rq.max_post = qp->rq.wqe_cnt;
257 }
258 }
259
260 return 0;
261}
262
79
80static int is_qp0(enum ib_qp_type qp_type)
81{
82 return qp_type == IB_QPT_SMI;
83}
84
85static int is_sqp(enum ib_qp_type qp_type)
86{

--- 172 unchanged lines hidden (view full) ---

259 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
260 qp->rq.max_post = qp->rq.wqe_cnt;
261 }
262 }
263
264 return 0;
265}
266
263static int sq_overhead(enum ib_qp_type qp_type)
267static int sq_overhead(struct ib_qp_init_attr *attr)
264{
265 int size = 0;
266
268{
269 int size = 0;
270
267 switch (qp_type) {
271 switch (attr->qp_type) {
268 case IB_QPT_XRC_INI:
269 size += sizeof(struct mlx5_wqe_xrc_seg);
270 /* fall through */
271 case IB_QPT_RC:
272 size += sizeof(struct mlx5_wqe_ctrl_seg) +
273 max(sizeof(struct mlx5_wqe_atomic_seg) +
274 sizeof(struct mlx5_wqe_raddr_seg),
275 sizeof(struct mlx5_wqe_umr_ctrl_seg) +

--- 6 unchanged lines hidden (view full) ---

282 case IB_QPT_UC:
283 size += sizeof(struct mlx5_wqe_ctrl_seg) +
284 max(sizeof(struct mlx5_wqe_raddr_seg),
285 sizeof(struct mlx5_wqe_umr_ctrl_seg) +
286 sizeof(struct mlx5_mkey_seg));
287 break;
288
289 case IB_QPT_UD:
272 case IB_QPT_XRC_INI:
273 size += sizeof(struct mlx5_wqe_xrc_seg);
274 /* fall through */
275 case IB_QPT_RC:
276 size += sizeof(struct mlx5_wqe_ctrl_seg) +
277 max(sizeof(struct mlx5_wqe_atomic_seg) +
278 sizeof(struct mlx5_wqe_raddr_seg),
279 sizeof(struct mlx5_wqe_umr_ctrl_seg) +

--- 6 unchanged lines hidden (view full) ---

286 case IB_QPT_UC:
287 size += sizeof(struct mlx5_wqe_ctrl_seg) +
288 max(sizeof(struct mlx5_wqe_raddr_seg),
289 sizeof(struct mlx5_wqe_umr_ctrl_seg) +
290 sizeof(struct mlx5_mkey_seg));
291 break;
292
293 case IB_QPT_UD:
294 if (attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)
295 size += sizeof(struct mlx5_wqe_eth_pad) +
296 sizeof(struct mlx5_wqe_eth_seg);
297 /* fall through */
290 case IB_QPT_SMI:
291 case IB_QPT_GSI:
292 size += sizeof(struct mlx5_wqe_ctrl_seg) +
293 sizeof(struct mlx5_wqe_datagram_seg);
294 break;
295
296 case MLX5_IB_QPT_REG_UMR:
297 size += sizeof(struct mlx5_wqe_ctrl_seg) +

--- 8 unchanged lines hidden (view full) ---

306 return size;
307}
308
309static int calc_send_wqe(struct ib_qp_init_attr *attr)
310{
311 int inl_size = 0;
312 int size;
313
298 case IB_QPT_SMI:
299 case IB_QPT_GSI:
300 size += sizeof(struct mlx5_wqe_ctrl_seg) +
301 sizeof(struct mlx5_wqe_datagram_seg);
302 break;
303
304 case MLX5_IB_QPT_REG_UMR:
305 size += sizeof(struct mlx5_wqe_ctrl_seg) +

--- 8 unchanged lines hidden (view full) ---

314 return size;
315}
316
317static int calc_send_wqe(struct ib_qp_init_attr *attr)
318{
319 int inl_size = 0;
320 int size;
321
314 size = sq_overhead(attr->qp_type);
322 size = sq_overhead(attr);
315 if (size < 0)
316 return size;
317
318 if (attr->cap.max_inline_data) {
319 inl_size = size + sizeof(struct mlx5_wqe_inline_seg) +
320 attr->cap.max_inline_data;
321 }
322

--- 20 unchanged lines hidden (view full) ---

343 return wqe_size;
344
345 if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
346 mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
347 wqe_size, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
348 return -EINVAL;
349 }
350
323 if (size < 0)
324 return size;
325
326 if (attr->cap.max_inline_data) {
327 inl_size = size + sizeof(struct mlx5_wqe_inline_seg) +
328 attr->cap.max_inline_data;
329 }
330

--- 20 unchanged lines hidden (view full) ---

351 return wqe_size;
352
353 if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
354 mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
355 wqe_size, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
356 return -EINVAL;
357 }
358
351 qp->max_inline_data = wqe_size - sq_overhead(attr->qp_type) -
352 sizeof(struct mlx5_wqe_inline_seg);
359 qp->max_inline_data = wqe_size - sq_overhead(attr) -
360 sizeof(struct mlx5_wqe_inline_seg);
353 attr->cap.max_inline_data = qp->max_inline_data;
354
355 if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN)
356 qp->signature_en = true;
357
358 wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
359 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
360 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {

--- 417 unchanged lines hidden (view full) ---

778{
779 enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW;
780 struct mlx5_uuar_info *uuari;
781 int uar_index;
782 int uuarn;
783 int err;
784
785 uuari = &dev->mdev->priv.uuari;
361 attr->cap.max_inline_data = qp->max_inline_data;
362
363 if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN)
364 qp->signature_en = true;
365
366 wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
367 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
368 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {

--- 417 unchanged lines hidden (view full) ---

786{
787 enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW;
788 struct mlx5_uuar_info *uuari;
789 int uar_index;
790 int uuarn;
791 int err;
792
793 uuari = &dev->mdev->priv.uuari;
786 if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK))
794 if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN |
795 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
796 IB_QP_CREATE_IPOIB_UD_LSO))
787 return -EINVAL;
788
789 if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
790 lc = MLX5_IB_LATENCY_CLASS_FAST_PATH;
791
792 uuarn = alloc_uuar(uuari, lc);
793 if (uuarn < 0) {
794 mlx5_ib_dbg(dev, "\n");

--- 428 unchanged lines hidden (view full) ---

1223 }
1224 if (init_attr->create_flags & IB_QP_CREATE_CROSS_CHANNEL)
1225 qp->flags |= MLX5_IB_QP_CROSS_CHANNEL;
1226 if (init_attr->create_flags & IB_QP_CREATE_MANAGED_SEND)
1227 qp->flags |= MLX5_IB_QP_MANAGED_SEND;
1228 if (init_attr->create_flags & IB_QP_CREATE_MANAGED_RECV)
1229 qp->flags |= MLX5_IB_QP_MANAGED_RECV;
1230 }
797 return -EINVAL;
798
799 if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
800 lc = MLX5_IB_LATENCY_CLASS_FAST_PATH;
801
802 uuarn = alloc_uuar(uuari, lc);
803 if (uuarn < 0) {
804 mlx5_ib_dbg(dev, "\n");

--- 428 unchanged lines hidden (view full) ---

1233 }
1234 if (init_attr->create_flags & IB_QP_CREATE_CROSS_CHANNEL)
1235 qp->flags |= MLX5_IB_QP_CROSS_CHANNEL;
1236 if (init_attr->create_flags & IB_QP_CREATE_MANAGED_SEND)
1237 qp->flags |= MLX5_IB_QP_MANAGED_SEND;
1238 if (init_attr->create_flags & IB_QP_CREATE_MANAGED_RECV)
1239 qp->flags |= MLX5_IB_QP_MANAGED_RECV;
1240 }
1241
1242 if (init_attr->qp_type == IB_QPT_UD &&
1243 (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO))
1244 if (!MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
1245 mlx5_ib_dbg(dev, "ipoib UD lso qp isn't supported\n");
1246 return -EOPNOTSUPP;
1247 }
1248
1231 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
1232 qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
1233
1234 if (pd && pd->uobject) {
1235 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
1236 mlx5_ib_dbg(dev, "copy failed\n");
1237 return -EFAULT;
1238 }

--- 141 unchanged lines hidden (view full) ---

1380
1381 in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma);
1382
1383 if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1) {
1384 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
1385 /* 0xffffff means we ask to work with cqe version 0 */
1386 MLX5_SET(qpc, qpc, user_index, uidx);
1387 }
1249 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
1250 qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
1251
1252 if (pd && pd->uobject) {
1253 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
1254 mlx5_ib_dbg(dev, "copy failed\n");
1255 return -EFAULT;
1256 }

--- 141 unchanged lines hidden (view full) ---

1398
1399 in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma);
1400
1401 if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1) {
1402 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
1403 /* 0xffffff means we ask to work with cqe version 0 */
1404 MLX5_SET(qpc, qpc, user_index, uidx);
1405 }
1406 /* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */
1407 if (init_attr->qp_type == IB_QPT_UD &&
1408 (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)) {
1409 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
1410 MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, 1);
1411 qp->flags |= MLX5_IB_QP_LSO;
1412 }
1388
1389 if (init_attr->qp_type == IB_QPT_RAW_PACKET) {
1390 qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr;
1391 raw_packet_qp_copy_info(qp, &qp->raw_packet_qp);
1392 err = create_raw_packet_qp(dev, qp, in, pd);
1393 } else {
1394 err = mlx5_core_create_qp(dev->mdev, &base->mqp, in, inlen);
1395 }

--- 1041 unchanged lines hidden (view full) ---

2437static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
2438 u64 remote_addr, u32 rkey)
2439{
2440 rseg->raddr = cpu_to_be64(remote_addr);
2441 rseg->rkey = cpu_to_be32(rkey);
2442 rseg->reserved = 0;
2443}
2444
1413
1414 if (init_attr->qp_type == IB_QPT_RAW_PACKET) {
1415 qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr;
1416 raw_packet_qp_copy_info(qp, &qp->raw_packet_qp);
1417 err = create_raw_packet_qp(dev, qp, in, pd);
1418 } else {
1419 err = mlx5_core_create_qp(dev->mdev, &base->mqp, in, inlen);
1420 }

--- 1041 unchanged lines hidden (view full) ---

2462static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
2463 u64 remote_addr, u32 rkey)
2464{
2465 rseg->raddr = cpu_to_be64(remote_addr);
2466 rseg->rkey = cpu_to_be32(rkey);
2467 rseg->reserved = 0;
2468}
2469
2470static void *set_eth_seg(struct mlx5_wqe_eth_seg *eseg,
2471 struct ib_send_wr *wr, void *qend,
2472 struct mlx5_ib_qp *qp, int *size)
2473{
2474 void *seg = eseg;
2475
2476 memset(eseg, 0, sizeof(struct mlx5_wqe_eth_seg));
2477
2478 if (wr->send_flags & IB_SEND_IP_CSUM)
2479 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM |
2480 MLX5_ETH_WQE_L4_CSUM;
2481
2482 seg += sizeof(struct mlx5_wqe_eth_seg);
2483 *size += sizeof(struct mlx5_wqe_eth_seg) / 16;
2484
2485 if (wr->opcode == IB_WR_LSO) {
2486 struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr);
2487 int size_of_inl_hdr_start = sizeof(eseg->inline_hdr_start);
2488 u64 left, leftlen, copysz;
2489 void *pdata = ud_wr->header;
2490
2491 left = ud_wr->hlen;
2492 eseg->mss = cpu_to_be16(ud_wr->mss);
2493 eseg->inline_hdr_sz = cpu_to_be16(left);
2494
2495 /*
2496 * check if there is space till the end of queue, if yes,
2497 * copy all in one shot, otherwise copy till the end of queue,
2498 * rollback and than the copy the left
2499 */
2500 leftlen = qend - (void *)eseg->inline_hdr_start;
2501 copysz = min_t(u64, leftlen, left);
2502
2503 memcpy(seg - size_of_inl_hdr_start, pdata, copysz);
2504
2505 if (likely(copysz > size_of_inl_hdr_start)) {
2506 seg += ALIGN(copysz - size_of_inl_hdr_start, 16);
2507 *size += ALIGN(copysz - size_of_inl_hdr_start, 16) / 16;
2508 }
2509
2510 if (unlikely(copysz < left)) { /* the last wqe in the queue */
2511 seg = mlx5_get_send_wqe(qp, 0);
2512 left -= copysz;
2513 pdata += copysz;
2514 memcpy(seg, pdata, left);
2515 seg += ALIGN(left, 16);
2516 *size += ALIGN(left, 16) / 16;
2517 }
2518 }
2519
2520 return seg;
2521}
2522
2445static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
2446 struct ib_send_wr *wr)
2447{
2448 memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av));
2449 dseg->av.dqp_dct = cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV);
2450 dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr)->remote_qkey);
2451}
2452

--- 915 unchanged lines hidden (view full) ---

3368 size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
3369 break;
3370
3371 default:
3372 break;
3373 }
3374 break;
3375
2523static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
2524 struct ib_send_wr *wr)
2525{
2526 memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av));
2527 dseg->av.dqp_dct = cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV);
2528 dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr)->remote_qkey);
2529}
2530

--- 915 unchanged lines hidden (view full) ---

3446 size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
3447 break;
3448
3449 default:
3450 break;
3451 }
3452 break;
3453
3376 case IB_QPT_UD:
3377 case IB_QPT_SMI:
3378 case IB_QPT_GSI:
3379 set_datagram_seg(seg, wr);
3380 seg += sizeof(struct mlx5_wqe_datagram_seg);
3381 size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
3382 if (unlikely((seg == qend)))
3383 seg = mlx5_get_send_wqe(qp, 0);
3384 break;
3454 case IB_QPT_SMI:
3455 case IB_QPT_GSI:
3456 set_datagram_seg(seg, wr);
3457 seg += sizeof(struct mlx5_wqe_datagram_seg);
3458 size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
3459 if (unlikely((seg == qend)))
3460 seg = mlx5_get_send_wqe(qp, 0);
3461 break;
3462 case IB_QPT_UD:
3463 set_datagram_seg(seg, wr);
3464 seg += sizeof(struct mlx5_wqe_datagram_seg);
3465 size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
3385
3466
3467 if (unlikely((seg == qend)))
3468 seg = mlx5_get_send_wqe(qp, 0);
3469
3470 /* handle qp that supports ud offload */
3471 if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO) {
3472 struct mlx5_wqe_eth_pad *pad;
3473
3474 pad = seg;
3475 memset(pad, 0, sizeof(struct mlx5_wqe_eth_pad));
3476 seg += sizeof(struct mlx5_wqe_eth_pad);
3477 size += sizeof(struct mlx5_wqe_eth_pad) / 16;
3478
3479 seg = set_eth_seg(seg, wr, qend, qp, &size);
3480
3481 if (unlikely((seg == qend)))
3482 seg = mlx5_get_send_wqe(qp, 0);
3483 }
3484 break;
3386 case MLX5_IB_QPT_REG_UMR:
3387 if (wr->opcode != MLX5_IB_WR_UMR) {
3388 err = -EINVAL;
3389 mlx5_ib_warn(dev, "bad opcode\n");
3390 goto out;
3391 }
3392 qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
3393 ctrl->imm = cpu_to_be32(umr_wr(wr)->mkey);

--- 532 unchanged lines hidden ---
3485 case MLX5_IB_QPT_REG_UMR:
3486 if (wr->opcode != MLX5_IB_WR_UMR) {
3487 err = -EINVAL;
3488 mlx5_ib_warn(dev, "bad opcode\n");
3489 goto out;
3490 }
3491 qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
3492 ctrl->imm = cpu_to_be32(umr_wr(wr)->mkey);

--- 532 unchanged lines hidden ---