1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. 4 * All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/export.h> 36 #include "fw_qos.h" 37 #include "fw.h" 38 39 enum { 40 /* allocate vpp opcode modifiers */ 41 MLX4_ALLOCATE_VPP_ALLOCATE = 0x0, 42 MLX4_ALLOCATE_VPP_QUERY = 0x1 43 }; 44 45 enum { 46 /* set vport qos opcode modifiers */ 47 MLX4_SET_VPORT_QOS_SET = 0x0, 48 MLX4_SET_VPORT_QOS_QUERY = 0x1 49 }; 50 51 struct mlx4_set_port_prio2tc_context { 52 u8 prio2tc[4]; 53 }; 54 55 struct mlx4_port_scheduler_tc_cfg_be { 56 __be16 pg; 57 __be16 bw_precentage; 58 __be16 max_bw_units; /* 3-100Mbps, 4-1Gbps, other values - reserved */ 59 __be16 max_bw_value; 60 }; 61 62 struct mlx4_set_port_scheduler_context { 63 struct mlx4_port_scheduler_tc_cfg_be tc[MLX4_NUM_TC]; 64 }; 65 66 /* Granular Qos (per VF) section */ 67 struct mlx4_alloc_vpp_param { 68 __be32 availible_vpp; 69 __be32 vpp_p_up[MLX4_NUM_UP]; 70 }; 71 72 struct mlx4_prio_qos_param { 73 __be32 bw_share; 74 __be32 max_avg_bw; 75 __be32 reserved; 76 __be32 enable; 77 __be32 reserved1[4]; 78 }; 79 80 struct mlx4_set_vport_context { 81 __be32 reserved[8]; 82 struct mlx4_prio_qos_param qos_p_up[MLX4_NUM_UP]; 83 }; 84 85 int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc) 86 { 87 struct mlx4_cmd_mailbox *mailbox; 88 struct mlx4_set_port_prio2tc_context *context; 89 int err; 90 u32 in_mod; 91 int i; 92 93 mailbox = mlx4_alloc_cmd_mailbox(dev); 94 if (IS_ERR(mailbox)) 95 return PTR_ERR(mailbox); 96 97 context = mailbox->buf; 98 99 for (i = 0; i < MLX4_NUM_UP; i += 2) 100 context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1]; 101 102 in_mod = MLX4_SET_PORT_PRIO2TC << 8 | port; 103 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, 104 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 105 106 mlx4_free_cmd_mailbox(dev, mailbox); 107 return err; 108 } 109 EXPORT_SYMBOL(mlx4_SET_PORT_PRIO2TC); 110 111 int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw, 112 u8 *pg, u16 *ratelimit) 113 { 114 struct mlx4_cmd_mailbox *mailbox; 115 struct mlx4_set_port_scheduler_context *context; 116 int err; 117 u32 in_mod; 118 int i; 119 120 mailbox = mlx4_alloc_cmd_mailbox(dev); 121 if (IS_ERR(mailbox)) 122 return PTR_ERR(mailbox); 123 124 context = mailbox->buf; 125 126 for (i = 0; i < MLX4_NUM_TC; i++) { 127 struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i]; 128 u16 r; 129 130 if (ratelimit && ratelimit[i]) { 131 if (ratelimit[i] <= MLX4_MAX_100M_UNITS_VAL) { 132 r = ratelimit[i]; 133 tc->max_bw_units = 134 htons(MLX4_RATELIMIT_100M_UNITS); 135 } else { 136 r = ratelimit[i] / 10; 137 tc->max_bw_units = 138 htons(MLX4_RATELIMIT_1G_UNITS); 139 } 140 tc->max_bw_value = htons(r); 141 } else { 142 tc->max_bw_value = htons(MLX4_RATELIMIT_DEFAULT); 143 tc->max_bw_units = htons(MLX4_RATELIMIT_1G_UNITS); 144 } 145 146 tc->pg = htons(pg[i]); 147 tc->bw_precentage = htons(tc_tx_bw[i]); 148 } 149 150 in_mod = MLX4_SET_PORT_SCHEDULER << 8 | port; 151 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, 152 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 153 154 mlx4_free_cmd_mailbox(dev, mailbox); 155 return err; 156 } 157 EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER); 158 159 int mlx4_ALLOCATE_VPP_get(struct mlx4_dev *dev, u8 port, 160 u16 *availible_vpp, u8 *vpp_p_up) 161 { 162 int i; 163 int err; 164 struct mlx4_cmd_mailbox *mailbox; 165 struct mlx4_alloc_vpp_param *out_param; 166 167 mailbox = mlx4_alloc_cmd_mailbox(dev); 168 if (IS_ERR(mailbox)) 169 return PTR_ERR(mailbox); 170 171 out_param = mailbox->buf; 172 173 err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 174 MLX4_ALLOCATE_VPP_QUERY, 175 MLX4_CMD_ALLOCATE_VPP, 176 MLX4_CMD_TIME_CLASS_A, 177 MLX4_CMD_NATIVE); 178 if (err) 179 goto out; 180 181 /* Total number of supported VPPs */ 182 *availible_vpp = (u16)be32_to_cpu(out_param->availible_vpp); 183 184 for (i = 0; i < MLX4_NUM_UP; i++) 185 vpp_p_up[i] = (u8)be32_to_cpu(out_param->vpp_p_up[i]); 186 187 out: 188 mlx4_free_cmd_mailbox(dev, mailbox); 189 190 return err; 191 } 192 EXPORT_SYMBOL(mlx4_ALLOCATE_VPP_get); 193 194 int mlx4_ALLOCATE_VPP_set(struct mlx4_dev *dev, u8 port, u8 *vpp_p_up) 195 { 196 int i; 197 int err; 198 struct mlx4_cmd_mailbox *mailbox; 199 struct mlx4_alloc_vpp_param *in_param; 200 201 mailbox = mlx4_alloc_cmd_mailbox(dev); 202 if (IS_ERR(mailbox)) 203 return PTR_ERR(mailbox); 204 205 in_param = mailbox->buf; 206 207 for (i = 0; i < MLX4_NUM_UP; i++) 208 in_param->vpp_p_up[i] = cpu_to_be32(vpp_p_up[i]); 209 210 err = mlx4_cmd(dev, mailbox->dma, port, 211 MLX4_ALLOCATE_VPP_ALLOCATE, 212 MLX4_CMD_ALLOCATE_VPP, 213 MLX4_CMD_TIME_CLASS_A, 214 MLX4_CMD_NATIVE); 215 216 mlx4_free_cmd_mailbox(dev, mailbox); 217 return err; 218 } 219 EXPORT_SYMBOL(mlx4_ALLOCATE_VPP_set); 220 221 int mlx4_SET_VPORT_QOS_get(struct mlx4_dev *dev, u8 port, u8 vport, 222 struct mlx4_vport_qos_param *out_param) 223 { 224 int i; 225 int err; 226 struct mlx4_cmd_mailbox *mailbox; 227 struct mlx4_set_vport_context *ctx; 228 229 mailbox = mlx4_alloc_cmd_mailbox(dev); 230 if (IS_ERR(mailbox)) 231 return PTR_ERR(mailbox); 232 233 ctx = mailbox->buf; 234 235 err = mlx4_cmd_box(dev, 0, mailbox->dma, (vport << 8) | port, 236 MLX4_SET_VPORT_QOS_QUERY, 237 MLX4_CMD_SET_VPORT_QOS, 238 MLX4_CMD_TIME_CLASS_A, 239 MLX4_CMD_NATIVE); 240 if (err) 241 goto out; 242 243 for (i = 0; i < MLX4_NUM_UP; i++) { 244 out_param[i].bw_share = be32_to_cpu(ctx->qos_p_up[i].bw_share); 245 out_param[i].max_avg_bw = 246 be32_to_cpu(ctx->qos_p_up[i].max_avg_bw); 247 out_param[i].enable = 248 !!(be32_to_cpu(ctx->qos_p_up[i].enable) & 31); 249 } 250 251 out: 252 mlx4_free_cmd_mailbox(dev, mailbox); 253 254 return err; 255 } 256 EXPORT_SYMBOL(mlx4_SET_VPORT_QOS_get); 257 258 int mlx4_SET_VPORT_QOS_set(struct mlx4_dev *dev, u8 port, u8 vport, 259 struct mlx4_vport_qos_param *in_param) 260 { 261 int i; 262 int err; 263 struct mlx4_cmd_mailbox *mailbox; 264 struct mlx4_set_vport_context *ctx; 265 266 mailbox = mlx4_alloc_cmd_mailbox(dev); 267 if (IS_ERR(mailbox)) 268 return PTR_ERR(mailbox); 269 270 ctx = mailbox->buf; 271 272 for (i = 0; i < MLX4_NUM_UP; i++) { 273 ctx->qos_p_up[i].bw_share = cpu_to_be32(in_param[i].bw_share); 274 ctx->qos_p_up[i].max_avg_bw = 275 cpu_to_be32(in_param[i].max_avg_bw); 276 ctx->qos_p_up[i].enable = 277 cpu_to_be32(in_param[i].enable << 31); 278 } 279 280 err = mlx4_cmd(dev, mailbox->dma, (vport << 8) | port, 281 MLX4_SET_VPORT_QOS_SET, 282 MLX4_CMD_SET_VPORT_QOS, 283 MLX4_CMD_TIME_CLASS_A, 284 MLX4_CMD_NATIVE); 285 286 mlx4_free_cmd_mailbox(dev, mailbox); 287 return err; 288 } 289 EXPORT_SYMBOL(mlx4_SET_VPORT_QOS_set); 290