1*5a2cc190SJeff Kirsher /* 2*5a2cc190SJeff Kirsher * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3*5a2cc190SJeff Kirsher * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 4*5a2cc190SJeff Kirsher * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. 5*5a2cc190SJeff Kirsher * 6*5a2cc190SJeff Kirsher * This software is available to you under a choice of one of two 7*5a2cc190SJeff Kirsher * licenses. You may choose to be licensed under the terms of the GNU 8*5a2cc190SJeff Kirsher * General Public License (GPL) Version 2, available from the file 9*5a2cc190SJeff Kirsher * COPYING in the main directory of this source tree, or the 10*5a2cc190SJeff Kirsher * OpenIB.org BSD license below: 11*5a2cc190SJeff Kirsher * 12*5a2cc190SJeff Kirsher * Redistribution and use in source and binary forms, with or 13*5a2cc190SJeff Kirsher * without modification, are permitted provided that the following 14*5a2cc190SJeff Kirsher * conditions are met: 15*5a2cc190SJeff Kirsher * 16*5a2cc190SJeff Kirsher * - Redistributions of source code must retain the above 17*5a2cc190SJeff Kirsher * copyright notice, this list of conditions and the following 18*5a2cc190SJeff Kirsher * disclaimer. 19*5a2cc190SJeff Kirsher * 20*5a2cc190SJeff Kirsher * - Redistributions in binary form must reproduce the above 21*5a2cc190SJeff Kirsher * copyright notice, this list of conditions and the following 22*5a2cc190SJeff Kirsher * disclaimer in the documentation and/or other materials 23*5a2cc190SJeff Kirsher * provided with the distribution. 24*5a2cc190SJeff Kirsher * 25*5a2cc190SJeff Kirsher * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26*5a2cc190SJeff Kirsher * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27*5a2cc190SJeff Kirsher * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28*5a2cc190SJeff Kirsher * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29*5a2cc190SJeff Kirsher * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30*5a2cc190SJeff Kirsher * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31*5a2cc190SJeff Kirsher * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32*5a2cc190SJeff Kirsher * SOFTWARE. 33*5a2cc190SJeff Kirsher */ 34*5a2cc190SJeff Kirsher 35*5a2cc190SJeff Kirsher #include <linux/sched.h> 36*5a2cc190SJeff Kirsher #include <linux/slab.h> 37*5a2cc190SJeff Kirsher #include <linux/pci.h> 38*5a2cc190SJeff Kirsher #include <linux/errno.h> 39*5a2cc190SJeff Kirsher 40*5a2cc190SJeff Kirsher #include <linux/mlx4/cmd.h> 41*5a2cc190SJeff Kirsher 42*5a2cc190SJeff Kirsher #include <asm/io.h> 43*5a2cc190SJeff Kirsher 44*5a2cc190SJeff Kirsher #include "mlx4.h" 45*5a2cc190SJeff Kirsher 46*5a2cc190SJeff Kirsher #define CMD_POLL_TOKEN 0xffff 47*5a2cc190SJeff Kirsher 48*5a2cc190SJeff Kirsher enum { 49*5a2cc190SJeff Kirsher /* command completed successfully: */ 50*5a2cc190SJeff Kirsher CMD_STAT_OK = 0x00, 51*5a2cc190SJeff Kirsher /* Internal error (such as a bus error) occurred while processing command: */ 52*5a2cc190SJeff Kirsher CMD_STAT_INTERNAL_ERR = 0x01, 53*5a2cc190SJeff Kirsher /* Operation/command not supported or opcode modifier not supported: */ 54*5a2cc190SJeff Kirsher CMD_STAT_BAD_OP = 0x02, 55*5a2cc190SJeff Kirsher /* Parameter not supported or parameter out of range: */ 56*5a2cc190SJeff Kirsher CMD_STAT_BAD_PARAM = 0x03, 57*5a2cc190SJeff Kirsher /* System not enabled or bad system state: */ 58*5a2cc190SJeff Kirsher CMD_STAT_BAD_SYS_STATE = 0x04, 59*5a2cc190SJeff Kirsher /* Attempt to access reserved or unallocaterd resource: */ 60*5a2cc190SJeff Kirsher CMD_STAT_BAD_RESOURCE = 0x05, 61*5a2cc190SJeff Kirsher /* Requested resource is currently executing a command, or is otherwise busy: */ 62*5a2cc190SJeff Kirsher CMD_STAT_RESOURCE_BUSY = 0x06, 63*5a2cc190SJeff Kirsher /* Required capability exceeds device limits: */ 64*5a2cc190SJeff Kirsher CMD_STAT_EXCEED_LIM = 0x08, 65*5a2cc190SJeff Kirsher /* Resource is not in the appropriate state or ownership: */ 66*5a2cc190SJeff Kirsher CMD_STAT_BAD_RES_STATE = 0x09, 67*5a2cc190SJeff Kirsher /* Index out of range: */ 68*5a2cc190SJeff Kirsher CMD_STAT_BAD_INDEX = 0x0a, 69*5a2cc190SJeff Kirsher /* FW image corrupted: */ 70*5a2cc190SJeff Kirsher CMD_STAT_BAD_NVMEM = 0x0b, 71*5a2cc190SJeff Kirsher /* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */ 72*5a2cc190SJeff Kirsher CMD_STAT_ICM_ERROR = 0x0c, 73*5a2cc190SJeff Kirsher /* Attempt to modify a QP/EE which is not in the presumed state: */ 74*5a2cc190SJeff Kirsher CMD_STAT_BAD_QP_STATE = 0x10, 75*5a2cc190SJeff Kirsher /* Bad segment parameters (Address/Size): */ 76*5a2cc190SJeff Kirsher CMD_STAT_BAD_SEG_PARAM = 0x20, 77*5a2cc190SJeff Kirsher /* Memory Region has Memory Windows bound to: */ 78*5a2cc190SJeff Kirsher CMD_STAT_REG_BOUND = 0x21, 79*5a2cc190SJeff Kirsher /* HCA local attached memory not present: */ 80*5a2cc190SJeff Kirsher CMD_STAT_LAM_NOT_PRE = 0x22, 81*5a2cc190SJeff Kirsher /* Bad management packet (silently discarded): */ 82*5a2cc190SJeff Kirsher CMD_STAT_BAD_PKT = 0x30, 83*5a2cc190SJeff Kirsher /* More outstanding CQEs in CQ than new CQ size: */ 84*5a2cc190SJeff Kirsher CMD_STAT_BAD_SIZE = 0x40, 85*5a2cc190SJeff Kirsher /* Multi Function device support required: */ 86*5a2cc190SJeff Kirsher CMD_STAT_MULTI_FUNC_REQ = 0x50, 87*5a2cc190SJeff Kirsher }; 88*5a2cc190SJeff Kirsher 89*5a2cc190SJeff Kirsher enum { 90*5a2cc190SJeff Kirsher HCR_IN_PARAM_OFFSET = 0x00, 91*5a2cc190SJeff Kirsher HCR_IN_MODIFIER_OFFSET = 0x08, 92*5a2cc190SJeff Kirsher HCR_OUT_PARAM_OFFSET = 0x0c, 93*5a2cc190SJeff Kirsher HCR_TOKEN_OFFSET = 0x14, 94*5a2cc190SJeff Kirsher HCR_STATUS_OFFSET = 0x18, 95*5a2cc190SJeff Kirsher 96*5a2cc190SJeff Kirsher HCR_OPMOD_SHIFT = 12, 97*5a2cc190SJeff Kirsher HCR_T_BIT = 21, 98*5a2cc190SJeff Kirsher HCR_E_BIT = 22, 99*5a2cc190SJeff Kirsher HCR_GO_BIT = 23 100*5a2cc190SJeff Kirsher }; 101*5a2cc190SJeff Kirsher 102*5a2cc190SJeff Kirsher enum { 103*5a2cc190SJeff Kirsher GO_BIT_TIMEOUT_MSECS = 10000 104*5a2cc190SJeff Kirsher }; 105*5a2cc190SJeff Kirsher 106*5a2cc190SJeff Kirsher struct mlx4_cmd_context { 107*5a2cc190SJeff Kirsher struct completion done; 108*5a2cc190SJeff Kirsher int result; 109*5a2cc190SJeff Kirsher int next; 110*5a2cc190SJeff Kirsher u64 out_param; 111*5a2cc190SJeff Kirsher u16 token; 112*5a2cc190SJeff Kirsher }; 113*5a2cc190SJeff Kirsher 114*5a2cc190SJeff Kirsher static int mlx4_status_to_errno(u8 status) 115*5a2cc190SJeff Kirsher { 116*5a2cc190SJeff Kirsher static const int trans_table[] = { 117*5a2cc190SJeff Kirsher [CMD_STAT_INTERNAL_ERR] = -EIO, 118*5a2cc190SJeff Kirsher [CMD_STAT_BAD_OP] = -EPERM, 119*5a2cc190SJeff Kirsher [CMD_STAT_BAD_PARAM] = -EINVAL, 120*5a2cc190SJeff Kirsher [CMD_STAT_BAD_SYS_STATE] = -ENXIO, 121*5a2cc190SJeff Kirsher [CMD_STAT_BAD_RESOURCE] = -EBADF, 122*5a2cc190SJeff Kirsher [CMD_STAT_RESOURCE_BUSY] = -EBUSY, 123*5a2cc190SJeff Kirsher [CMD_STAT_EXCEED_LIM] = -ENOMEM, 124*5a2cc190SJeff Kirsher [CMD_STAT_BAD_RES_STATE] = -EBADF, 125*5a2cc190SJeff Kirsher [CMD_STAT_BAD_INDEX] = -EBADF, 126*5a2cc190SJeff Kirsher [CMD_STAT_BAD_NVMEM] = -EFAULT, 127*5a2cc190SJeff Kirsher [CMD_STAT_ICM_ERROR] = -ENFILE, 128*5a2cc190SJeff Kirsher [CMD_STAT_BAD_QP_STATE] = -EINVAL, 129*5a2cc190SJeff Kirsher [CMD_STAT_BAD_SEG_PARAM] = -EFAULT, 130*5a2cc190SJeff Kirsher [CMD_STAT_REG_BOUND] = -EBUSY, 131*5a2cc190SJeff Kirsher [CMD_STAT_LAM_NOT_PRE] = -EAGAIN, 132*5a2cc190SJeff Kirsher [CMD_STAT_BAD_PKT] = -EINVAL, 133*5a2cc190SJeff Kirsher [CMD_STAT_BAD_SIZE] = -ENOMEM, 134*5a2cc190SJeff Kirsher [CMD_STAT_MULTI_FUNC_REQ] = -EACCES, 135*5a2cc190SJeff Kirsher }; 136*5a2cc190SJeff Kirsher 137*5a2cc190SJeff Kirsher if (status >= ARRAY_SIZE(trans_table) || 138*5a2cc190SJeff Kirsher (status != CMD_STAT_OK && trans_table[status] == 0)) 139*5a2cc190SJeff Kirsher return -EIO; 140*5a2cc190SJeff Kirsher 141*5a2cc190SJeff Kirsher return trans_table[status]; 142*5a2cc190SJeff Kirsher } 143*5a2cc190SJeff Kirsher 144*5a2cc190SJeff Kirsher static int cmd_pending(struct mlx4_dev *dev) 145*5a2cc190SJeff Kirsher { 146*5a2cc190SJeff Kirsher u32 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET); 147*5a2cc190SJeff Kirsher 148*5a2cc190SJeff Kirsher return (status & swab32(1 << HCR_GO_BIT)) || 149*5a2cc190SJeff Kirsher (mlx4_priv(dev)->cmd.toggle == 150*5a2cc190SJeff Kirsher !!(status & swab32(1 << HCR_T_BIT))); 151*5a2cc190SJeff Kirsher } 152*5a2cc190SJeff Kirsher 153*5a2cc190SJeff Kirsher static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param, 154*5a2cc190SJeff Kirsher u32 in_modifier, u8 op_modifier, u16 op, u16 token, 155*5a2cc190SJeff Kirsher int event) 156*5a2cc190SJeff Kirsher { 157*5a2cc190SJeff Kirsher struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; 158*5a2cc190SJeff Kirsher u32 __iomem *hcr = cmd->hcr; 159*5a2cc190SJeff Kirsher int ret = -EAGAIN; 160*5a2cc190SJeff Kirsher unsigned long end; 161*5a2cc190SJeff Kirsher 162*5a2cc190SJeff Kirsher mutex_lock(&cmd->hcr_mutex); 163*5a2cc190SJeff Kirsher 164*5a2cc190SJeff Kirsher end = jiffies; 165*5a2cc190SJeff Kirsher if (event) 166*5a2cc190SJeff Kirsher end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS); 167*5a2cc190SJeff Kirsher 168*5a2cc190SJeff Kirsher while (cmd_pending(dev)) { 169*5a2cc190SJeff Kirsher if (time_after_eq(jiffies, end)) 170*5a2cc190SJeff Kirsher goto out; 171*5a2cc190SJeff Kirsher cond_resched(); 172*5a2cc190SJeff Kirsher } 173*5a2cc190SJeff Kirsher 174*5a2cc190SJeff Kirsher /* 175*5a2cc190SJeff Kirsher * We use writel (instead of something like memcpy_toio) 176*5a2cc190SJeff Kirsher * because writes of less than 32 bits to the HCR don't work 177*5a2cc190SJeff Kirsher * (and some architectures such as ia64 implement memcpy_toio 178*5a2cc190SJeff Kirsher * in terms of writeb). 179*5a2cc190SJeff Kirsher */ 180*5a2cc190SJeff Kirsher __raw_writel((__force u32) cpu_to_be32(in_param >> 32), hcr + 0); 181*5a2cc190SJeff Kirsher __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), hcr + 1); 182*5a2cc190SJeff Kirsher __raw_writel((__force u32) cpu_to_be32(in_modifier), hcr + 2); 183*5a2cc190SJeff Kirsher __raw_writel((__force u32) cpu_to_be32(out_param >> 32), hcr + 3); 184*5a2cc190SJeff Kirsher __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4); 185*5a2cc190SJeff Kirsher __raw_writel((__force u32) cpu_to_be32(token << 16), hcr + 5); 186*5a2cc190SJeff Kirsher 187*5a2cc190SJeff Kirsher /* __raw_writel may not order writes. */ 188*5a2cc190SJeff Kirsher wmb(); 189*5a2cc190SJeff Kirsher 190*5a2cc190SJeff Kirsher __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) | 191*5a2cc190SJeff Kirsher (cmd->toggle << HCR_T_BIT) | 192*5a2cc190SJeff Kirsher (event ? (1 << HCR_E_BIT) : 0) | 193*5a2cc190SJeff Kirsher (op_modifier << HCR_OPMOD_SHIFT) | 194*5a2cc190SJeff Kirsher op), hcr + 6); 195*5a2cc190SJeff Kirsher 196*5a2cc190SJeff Kirsher /* 197*5a2cc190SJeff Kirsher * Make sure that our HCR writes don't get mixed in with 198*5a2cc190SJeff Kirsher * writes from another CPU starting a FW command. 199*5a2cc190SJeff Kirsher */ 200*5a2cc190SJeff Kirsher mmiowb(); 201*5a2cc190SJeff Kirsher 202*5a2cc190SJeff Kirsher cmd->toggle = cmd->toggle ^ 1; 203*5a2cc190SJeff Kirsher 204*5a2cc190SJeff Kirsher ret = 0; 205*5a2cc190SJeff Kirsher 206*5a2cc190SJeff Kirsher out: 207*5a2cc190SJeff Kirsher mutex_unlock(&cmd->hcr_mutex); 208*5a2cc190SJeff Kirsher return ret; 209*5a2cc190SJeff Kirsher } 210*5a2cc190SJeff Kirsher 211*5a2cc190SJeff Kirsher static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param, 212*5a2cc190SJeff Kirsher int out_is_imm, u32 in_modifier, u8 op_modifier, 213*5a2cc190SJeff Kirsher u16 op, unsigned long timeout) 214*5a2cc190SJeff Kirsher { 215*5a2cc190SJeff Kirsher struct mlx4_priv *priv = mlx4_priv(dev); 216*5a2cc190SJeff Kirsher void __iomem *hcr = priv->cmd.hcr; 217*5a2cc190SJeff Kirsher int err = 0; 218*5a2cc190SJeff Kirsher unsigned long end; 219*5a2cc190SJeff Kirsher 220*5a2cc190SJeff Kirsher down(&priv->cmd.poll_sem); 221*5a2cc190SJeff Kirsher 222*5a2cc190SJeff Kirsher err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0, 223*5a2cc190SJeff Kirsher in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0); 224*5a2cc190SJeff Kirsher if (err) 225*5a2cc190SJeff Kirsher goto out; 226*5a2cc190SJeff Kirsher 227*5a2cc190SJeff Kirsher end = msecs_to_jiffies(timeout) + jiffies; 228*5a2cc190SJeff Kirsher while (cmd_pending(dev) && time_before(jiffies, end)) 229*5a2cc190SJeff Kirsher cond_resched(); 230*5a2cc190SJeff Kirsher 231*5a2cc190SJeff Kirsher if (cmd_pending(dev)) { 232*5a2cc190SJeff Kirsher err = -ETIMEDOUT; 233*5a2cc190SJeff Kirsher goto out; 234*5a2cc190SJeff Kirsher } 235*5a2cc190SJeff Kirsher 236*5a2cc190SJeff Kirsher if (out_is_imm) 237*5a2cc190SJeff Kirsher *out_param = 238*5a2cc190SJeff Kirsher (u64) be32_to_cpu((__force __be32) 239*5a2cc190SJeff Kirsher __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 | 240*5a2cc190SJeff Kirsher (u64) be32_to_cpu((__force __be32) 241*5a2cc190SJeff Kirsher __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4)); 242*5a2cc190SJeff Kirsher 243*5a2cc190SJeff Kirsher err = mlx4_status_to_errno(be32_to_cpu((__force __be32) 244*5a2cc190SJeff Kirsher __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24); 245*5a2cc190SJeff Kirsher 246*5a2cc190SJeff Kirsher out: 247*5a2cc190SJeff Kirsher up(&priv->cmd.poll_sem); 248*5a2cc190SJeff Kirsher return err; 249*5a2cc190SJeff Kirsher } 250*5a2cc190SJeff Kirsher 251*5a2cc190SJeff Kirsher void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param) 252*5a2cc190SJeff Kirsher { 253*5a2cc190SJeff Kirsher struct mlx4_priv *priv = mlx4_priv(dev); 254*5a2cc190SJeff Kirsher struct mlx4_cmd_context *context = 255*5a2cc190SJeff Kirsher &priv->cmd.context[token & priv->cmd.token_mask]; 256*5a2cc190SJeff Kirsher 257*5a2cc190SJeff Kirsher /* previously timed out command completing at long last */ 258*5a2cc190SJeff Kirsher if (token != context->token) 259*5a2cc190SJeff Kirsher return; 260*5a2cc190SJeff Kirsher 261*5a2cc190SJeff Kirsher context->result = mlx4_status_to_errno(status); 262*5a2cc190SJeff Kirsher context->out_param = out_param; 263*5a2cc190SJeff Kirsher 264*5a2cc190SJeff Kirsher complete(&context->done); 265*5a2cc190SJeff Kirsher } 266*5a2cc190SJeff Kirsher 267*5a2cc190SJeff Kirsher static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param, 268*5a2cc190SJeff Kirsher int out_is_imm, u32 in_modifier, u8 op_modifier, 269*5a2cc190SJeff Kirsher u16 op, unsigned long timeout) 270*5a2cc190SJeff Kirsher { 271*5a2cc190SJeff Kirsher struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; 272*5a2cc190SJeff Kirsher struct mlx4_cmd_context *context; 273*5a2cc190SJeff Kirsher int err = 0; 274*5a2cc190SJeff Kirsher 275*5a2cc190SJeff Kirsher down(&cmd->event_sem); 276*5a2cc190SJeff Kirsher 277*5a2cc190SJeff Kirsher spin_lock(&cmd->context_lock); 278*5a2cc190SJeff Kirsher BUG_ON(cmd->free_head < 0); 279*5a2cc190SJeff Kirsher context = &cmd->context[cmd->free_head]; 280*5a2cc190SJeff Kirsher context->token += cmd->token_mask + 1; 281*5a2cc190SJeff Kirsher cmd->free_head = context->next; 282*5a2cc190SJeff Kirsher spin_unlock(&cmd->context_lock); 283*5a2cc190SJeff Kirsher 284*5a2cc190SJeff Kirsher init_completion(&context->done); 285*5a2cc190SJeff Kirsher 286*5a2cc190SJeff Kirsher mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0, 287*5a2cc190SJeff Kirsher in_modifier, op_modifier, op, context->token, 1); 288*5a2cc190SJeff Kirsher 289*5a2cc190SJeff Kirsher if (!wait_for_completion_timeout(&context->done, msecs_to_jiffies(timeout))) { 290*5a2cc190SJeff Kirsher err = -EBUSY; 291*5a2cc190SJeff Kirsher goto out; 292*5a2cc190SJeff Kirsher } 293*5a2cc190SJeff Kirsher 294*5a2cc190SJeff Kirsher err = context->result; 295*5a2cc190SJeff Kirsher if (err) 296*5a2cc190SJeff Kirsher goto out; 297*5a2cc190SJeff Kirsher 298*5a2cc190SJeff Kirsher if (out_is_imm) 299*5a2cc190SJeff Kirsher *out_param = context->out_param; 300*5a2cc190SJeff Kirsher 301*5a2cc190SJeff Kirsher out: 302*5a2cc190SJeff Kirsher spin_lock(&cmd->context_lock); 303*5a2cc190SJeff Kirsher context->next = cmd->free_head; 304*5a2cc190SJeff Kirsher cmd->free_head = context - cmd->context; 305*5a2cc190SJeff Kirsher spin_unlock(&cmd->context_lock); 306*5a2cc190SJeff Kirsher 307*5a2cc190SJeff Kirsher up(&cmd->event_sem); 308*5a2cc190SJeff Kirsher return err; 309*5a2cc190SJeff Kirsher } 310*5a2cc190SJeff Kirsher 311*5a2cc190SJeff Kirsher int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param, 312*5a2cc190SJeff Kirsher int out_is_imm, u32 in_modifier, u8 op_modifier, 313*5a2cc190SJeff Kirsher u16 op, unsigned long timeout) 314*5a2cc190SJeff Kirsher { 315*5a2cc190SJeff Kirsher if (mlx4_priv(dev)->cmd.use_events) 316*5a2cc190SJeff Kirsher return mlx4_cmd_wait(dev, in_param, out_param, out_is_imm, 317*5a2cc190SJeff Kirsher in_modifier, op_modifier, op, timeout); 318*5a2cc190SJeff Kirsher else 319*5a2cc190SJeff Kirsher return mlx4_cmd_poll(dev, in_param, out_param, out_is_imm, 320*5a2cc190SJeff Kirsher in_modifier, op_modifier, op, timeout); 321*5a2cc190SJeff Kirsher } 322*5a2cc190SJeff Kirsher EXPORT_SYMBOL_GPL(__mlx4_cmd); 323*5a2cc190SJeff Kirsher 324*5a2cc190SJeff Kirsher int mlx4_cmd_init(struct mlx4_dev *dev) 325*5a2cc190SJeff Kirsher { 326*5a2cc190SJeff Kirsher struct mlx4_priv *priv = mlx4_priv(dev); 327*5a2cc190SJeff Kirsher 328*5a2cc190SJeff Kirsher mutex_init(&priv->cmd.hcr_mutex); 329*5a2cc190SJeff Kirsher sema_init(&priv->cmd.poll_sem, 1); 330*5a2cc190SJeff Kirsher priv->cmd.use_events = 0; 331*5a2cc190SJeff Kirsher priv->cmd.toggle = 1; 332*5a2cc190SJeff Kirsher 333*5a2cc190SJeff Kirsher priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_HCR_BASE, 334*5a2cc190SJeff Kirsher MLX4_HCR_SIZE); 335*5a2cc190SJeff Kirsher if (!priv->cmd.hcr) { 336*5a2cc190SJeff Kirsher mlx4_err(dev, "Couldn't map command register."); 337*5a2cc190SJeff Kirsher return -ENOMEM; 338*5a2cc190SJeff Kirsher } 339*5a2cc190SJeff Kirsher 340*5a2cc190SJeff Kirsher priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev, 341*5a2cc190SJeff Kirsher MLX4_MAILBOX_SIZE, 342*5a2cc190SJeff Kirsher MLX4_MAILBOX_SIZE, 0); 343*5a2cc190SJeff Kirsher if (!priv->cmd.pool) { 344*5a2cc190SJeff Kirsher iounmap(priv->cmd.hcr); 345*5a2cc190SJeff Kirsher return -ENOMEM; 346*5a2cc190SJeff Kirsher } 347*5a2cc190SJeff Kirsher 348*5a2cc190SJeff Kirsher return 0; 349*5a2cc190SJeff Kirsher } 350*5a2cc190SJeff Kirsher 351*5a2cc190SJeff Kirsher void mlx4_cmd_cleanup(struct mlx4_dev *dev) 352*5a2cc190SJeff Kirsher { 353*5a2cc190SJeff Kirsher struct mlx4_priv *priv = mlx4_priv(dev); 354*5a2cc190SJeff Kirsher 355*5a2cc190SJeff Kirsher pci_pool_destroy(priv->cmd.pool); 356*5a2cc190SJeff Kirsher iounmap(priv->cmd.hcr); 357*5a2cc190SJeff Kirsher } 358*5a2cc190SJeff Kirsher 359*5a2cc190SJeff Kirsher /* 360*5a2cc190SJeff Kirsher * Switch to using events to issue FW commands (can only be called 361*5a2cc190SJeff Kirsher * after event queue for command events has been initialized). 362*5a2cc190SJeff Kirsher */ 363*5a2cc190SJeff Kirsher int mlx4_cmd_use_events(struct mlx4_dev *dev) 364*5a2cc190SJeff Kirsher { 365*5a2cc190SJeff Kirsher struct mlx4_priv *priv = mlx4_priv(dev); 366*5a2cc190SJeff Kirsher int i; 367*5a2cc190SJeff Kirsher 368*5a2cc190SJeff Kirsher priv->cmd.context = kmalloc(priv->cmd.max_cmds * 369*5a2cc190SJeff Kirsher sizeof (struct mlx4_cmd_context), 370*5a2cc190SJeff Kirsher GFP_KERNEL); 371*5a2cc190SJeff Kirsher if (!priv->cmd.context) 372*5a2cc190SJeff Kirsher return -ENOMEM; 373*5a2cc190SJeff Kirsher 374*5a2cc190SJeff Kirsher for (i = 0; i < priv->cmd.max_cmds; ++i) { 375*5a2cc190SJeff Kirsher priv->cmd.context[i].token = i; 376*5a2cc190SJeff Kirsher priv->cmd.context[i].next = i + 1; 377*5a2cc190SJeff Kirsher } 378*5a2cc190SJeff Kirsher 379*5a2cc190SJeff Kirsher priv->cmd.context[priv->cmd.max_cmds - 1].next = -1; 380*5a2cc190SJeff Kirsher priv->cmd.free_head = 0; 381*5a2cc190SJeff Kirsher 382*5a2cc190SJeff Kirsher sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds); 383*5a2cc190SJeff Kirsher spin_lock_init(&priv->cmd.context_lock); 384*5a2cc190SJeff Kirsher 385*5a2cc190SJeff Kirsher for (priv->cmd.token_mask = 1; 386*5a2cc190SJeff Kirsher priv->cmd.token_mask < priv->cmd.max_cmds; 387*5a2cc190SJeff Kirsher priv->cmd.token_mask <<= 1) 388*5a2cc190SJeff Kirsher ; /* nothing */ 389*5a2cc190SJeff Kirsher --priv->cmd.token_mask; 390*5a2cc190SJeff Kirsher 391*5a2cc190SJeff Kirsher priv->cmd.use_events = 1; 392*5a2cc190SJeff Kirsher 393*5a2cc190SJeff Kirsher down(&priv->cmd.poll_sem); 394*5a2cc190SJeff Kirsher 395*5a2cc190SJeff Kirsher return 0; 396*5a2cc190SJeff Kirsher } 397*5a2cc190SJeff Kirsher 398*5a2cc190SJeff Kirsher /* 399*5a2cc190SJeff Kirsher * Switch back to polling (used when shutting down the device) 400*5a2cc190SJeff Kirsher */ 401*5a2cc190SJeff Kirsher void mlx4_cmd_use_polling(struct mlx4_dev *dev) 402*5a2cc190SJeff Kirsher { 403*5a2cc190SJeff Kirsher struct mlx4_priv *priv = mlx4_priv(dev); 404*5a2cc190SJeff Kirsher int i; 405*5a2cc190SJeff Kirsher 406*5a2cc190SJeff Kirsher priv->cmd.use_events = 0; 407*5a2cc190SJeff Kirsher 408*5a2cc190SJeff Kirsher for (i = 0; i < priv->cmd.max_cmds; ++i) 409*5a2cc190SJeff Kirsher down(&priv->cmd.event_sem); 410*5a2cc190SJeff Kirsher 411*5a2cc190SJeff Kirsher kfree(priv->cmd.context); 412*5a2cc190SJeff Kirsher 413*5a2cc190SJeff Kirsher up(&priv->cmd.poll_sem); 414*5a2cc190SJeff Kirsher } 415*5a2cc190SJeff Kirsher 416*5a2cc190SJeff Kirsher struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev) 417*5a2cc190SJeff Kirsher { 418*5a2cc190SJeff Kirsher struct mlx4_cmd_mailbox *mailbox; 419*5a2cc190SJeff Kirsher 420*5a2cc190SJeff Kirsher mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL); 421*5a2cc190SJeff Kirsher if (!mailbox) 422*5a2cc190SJeff Kirsher return ERR_PTR(-ENOMEM); 423*5a2cc190SJeff Kirsher 424*5a2cc190SJeff Kirsher mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL, 425*5a2cc190SJeff Kirsher &mailbox->dma); 426*5a2cc190SJeff Kirsher if (!mailbox->buf) { 427*5a2cc190SJeff Kirsher kfree(mailbox); 428*5a2cc190SJeff Kirsher return ERR_PTR(-ENOMEM); 429*5a2cc190SJeff Kirsher } 430*5a2cc190SJeff Kirsher 431*5a2cc190SJeff Kirsher return mailbox; 432*5a2cc190SJeff Kirsher } 433*5a2cc190SJeff Kirsher EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox); 434*5a2cc190SJeff Kirsher 435*5a2cc190SJeff Kirsher void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox) 436*5a2cc190SJeff Kirsher { 437*5a2cc190SJeff Kirsher if (!mailbox) 438*5a2cc190SJeff Kirsher return; 439*5a2cc190SJeff Kirsher 440*5a2cc190SJeff Kirsher pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma); 441*5a2cc190SJeff Kirsher kfree(mailbox); 442*5a2cc190SJeff Kirsher } 443*5a2cc190SJeff Kirsher EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox); 444