1 /* 2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 34 #include <linux/slab.h> 35 #include <linux/vmalloc.h> 36 #include <linux/mlx4/qp.h> 37 38 #include "mlx4_en.h" 39 40 void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, 41 int is_tx, int rss, int qpn, int cqn, 42 int user_prio, struct mlx4_qp_context *context) 43 { 44 struct mlx4_en_dev *mdev = priv->mdev; 45 struct net_device *dev = priv->dev; 46 47 memset(context, 0, sizeof *context); 48 context->flags = cpu_to_be32(7 << 16 | rss << MLX4_RSS_QPC_FLAG_OFFSET); 49 context->pd = cpu_to_be32(mdev->priv_pdn); 50 context->mtu_msgmax = 0xff; 51 if (!is_tx && !rss) 52 context->rq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4); 53 if (is_tx) { 54 context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4); 55 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP) 56 context->params2 |= MLX4_QP_BIT_FPP; 57 58 } else { 59 context->sq_size_stride = ilog2(TXBB_SIZE) - 4; 60 } 61 context->usr_page = cpu_to_be32(mdev->priv_uar.index); 62 context->local_qpn = cpu_to_be32(qpn); 63 context->pri_path.ackto = 1 & 0x07; 64 context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6; 65 if (user_prio >= 0) { 66 context->pri_path.sched_queue |= user_prio << 3; 67 context->pri_path.feup = MLX4_FEUP_FORCE_ETH_UP; 68 } 69 context->pri_path.counter_index = priv->counter_index; 70 context->cqn_send = cpu_to_be32(cqn); 71 context->cqn_recv = cpu_to_be32(cqn); 72 if (!rss && 73 (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_LB_SRC_CHK) && 74 context->pri_path.counter_index != 75 MLX4_SINK_COUNTER_INDEX(mdev->dev)) { 76 /* disable multicast loopback to qp with same counter */ 77 if (!(dev->features & NETIF_F_LOOPBACK)) 78 context->pri_path.fl |= MLX4_FL_ETH_SRC_CHECK_MC_LB; 79 context->pri_path.control |= MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER; 80 } 81 context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2); 82 if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX)) 83 context->param3 |= cpu_to_be32(1 << 30); 84 85 if (!is_tx && !rss && 86 (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)) { 87 en_dbg(HW, priv, "Setting RX qp %x tunnel mode to RX tunneled & non-tunneled\n", qpn); 88 context->srqn = cpu_to_be32(7 << 28); /* this fills bits 30:28 */ 89 } 90 } 91 92 int mlx4_en_change_mcast_lb(struct mlx4_en_priv *priv, struct mlx4_qp *qp, 93 int loopback) 94 { 95 int ret; 96 struct mlx4_update_qp_params qp_params; 97 98 memset(&qp_params, 0, sizeof(qp_params)); 99 if (!loopback) 100 qp_params.flags = MLX4_UPDATE_QP_PARAMS_FLAGS_ETH_CHECK_MC_LB; 101 102 ret = mlx4_update_qp(priv->mdev->dev, qp->qpn, 103 MLX4_UPDATE_QP_ETH_SRC_CHECK_MC_LB, 104 &qp_params); 105 106 return ret; 107 } 108 109 int mlx4_en_map_buffer(struct mlx4_buf *buf) 110 { 111 struct page **pages; 112 int i; 113 114 if (BITS_PER_LONG == 64 || buf->nbufs == 1) 115 return 0; 116 117 pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL); 118 if (!pages) 119 return -ENOMEM; 120 121 for (i = 0; i < buf->nbufs; ++i) 122 pages[i] = virt_to_page(buf->page_list[i].buf); 123 124 buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); 125 kfree(pages); 126 if (!buf->direct.buf) 127 return -ENOMEM; 128 129 return 0; 130 } 131 132 void mlx4_en_unmap_buffer(struct mlx4_buf *buf) 133 { 134 if (BITS_PER_LONG == 64 || buf->nbufs == 1) 135 return; 136 137 vunmap(buf->direct.buf); 138 } 139 140 void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event) 141 { 142 return; 143 } 144 145