1 /* 2 * Copyright (c) 2016 Hisilicon Limited. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/dmapool.h> 34 #include <linux/platform_device.h> 35 #include "hns_roce_common.h" 36 #include "hns_roce_device.h" 37 #include "hns_roce_cmd.h" 38 39 #define CMD_POLL_TOKEN 0xffff 40 #define CMD_MAX_NUM 32 41 #define CMD_TOKEN_MASK 0x1f 42 43 static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, u64 in_param, 44 u64 out_param, u32 in_modifier, 45 u8 op_modifier, u16 op, u16 token, 46 int event) 47 { 48 struct hns_roce_cmdq *cmd = &hr_dev->cmd; 49 int ret; 50 51 mutex_lock(&cmd->hcr_mutex); 52 ret = hr_dev->hw->post_mbox(hr_dev, in_param, out_param, in_modifier, 53 op_modifier, op, token, event); 54 mutex_unlock(&cmd->hcr_mutex); 55 56 return ret; 57 } 58 59 /* this should be called with "poll_sem" */ 60 static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param, 61 u64 out_param, unsigned long in_modifier, 62 u8 op_modifier, u16 op, 63 unsigned int timeout) 64 { 65 struct device *dev = hr_dev->dev; 66 int ret; 67 68 ret = hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param, 69 in_modifier, op_modifier, op, 70 CMD_POLL_TOKEN, 0); 71 if (ret) { 72 dev_err(dev, "[cmd_poll]hns_roce_cmd_mbox_post_hw failed\n"); 73 return ret; 74 } 75 76 return hr_dev->hw->poll_mbox_done(hr_dev, timeout); 77 } 78 79 static int hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param, 80 u64 out_param, unsigned long in_modifier, 81 u8 op_modifier, u16 op, unsigned int timeout) 82 { 83 int ret; 84 85 down(&hr_dev->cmd.poll_sem); 86 ret = __hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param, in_modifier, 87 op_modifier, op, timeout); 88 up(&hr_dev->cmd.poll_sem); 89 90 return ret; 91 } 92 93 void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status, 94 u64 out_param) 95 { 96 struct hns_roce_cmd_context *context = 97 &hr_dev->cmd.context[token % hr_dev->cmd.max_cmds]; 98 99 if (token != context->token) 100 return; 101 102 context->result = (status == HNS_ROCE_CMD_SUCCESS) ? 0 : (-EIO); 103 context->out_param = out_param; 104 complete(&context->done); 105 } 106 107 /* this should be called with "use_events" */ 108 static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param, 109 u64 out_param, unsigned long in_modifier, 110 u8 op_modifier, u16 op, 111 unsigned int timeout) 112 { 113 struct hns_roce_cmdq *cmd = &hr_dev->cmd; 114 struct hns_roce_cmd_context *context; 115 struct device *dev = hr_dev->dev; 116 int ret; 117 118 spin_lock(&cmd->context_lock); 119 WARN_ON(cmd->free_head < 0); 120 context = &cmd->context[cmd->free_head]; 121 context->token += cmd->token_mask + 1; 122 cmd->free_head = context->next; 123 spin_unlock(&cmd->context_lock); 124 125 init_completion(&context->done); 126 127 ret = hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param, 128 in_modifier, op_modifier, op, 129 context->token, 1); 130 if (ret) 131 goto out; 132 133 /* 134 * It is timeout when wait_for_completion_timeout return 0 135 * The return value is the time limit set in advance 136 * how many seconds showing 137 */ 138 if (!wait_for_completion_timeout(&context->done, 139 msecs_to_jiffies(timeout))) { 140 dev_err(dev, "[cmd]wait_for_completion_timeout timeout\n"); 141 ret = -EBUSY; 142 goto out; 143 } 144 145 ret = context->result; 146 if (ret) { 147 dev_err(dev, "[cmd]event mod cmd process error!err=%d\n", ret); 148 goto out; 149 } 150 151 out: 152 spin_lock(&cmd->context_lock); 153 context->next = cmd->free_head; 154 cmd->free_head = context - cmd->context; 155 spin_unlock(&cmd->context_lock); 156 157 return ret; 158 } 159 160 static int hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param, 161 u64 out_param, unsigned long in_modifier, 162 u8 op_modifier, u16 op, unsigned int timeout) 163 { 164 int ret; 165 166 down(&hr_dev->cmd.event_sem); 167 ret = __hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param, in_modifier, 168 op_modifier, op, timeout); 169 up(&hr_dev->cmd.event_sem); 170 171 return ret; 172 } 173 174 int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param, 175 unsigned long in_modifier, u8 op_modifier, u16 op, 176 unsigned int timeout) 177 { 178 bool is_busy; 179 180 if (hr_dev->hw->chk_mbox_avail) 181 if (!hr_dev->hw->chk_mbox_avail(hr_dev, &is_busy)) 182 return is_busy ? -EBUSY : 0; 183 184 if (hr_dev->cmd.use_events) 185 return hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param, 186 in_modifier, op_modifier, op, 187 timeout); 188 else 189 return hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param, 190 in_modifier, op_modifier, op, 191 timeout); 192 } 193 194 int hns_roce_cmd_init(struct hns_roce_dev *hr_dev) 195 { 196 struct device *dev = hr_dev->dev; 197 198 mutex_init(&hr_dev->cmd.hcr_mutex); 199 sema_init(&hr_dev->cmd.poll_sem, 1); 200 hr_dev->cmd.use_events = 0; 201 hr_dev->cmd.max_cmds = CMD_MAX_NUM; 202 hr_dev->cmd.pool = dma_pool_create("hns_roce_cmd", dev, 203 HNS_ROCE_MAILBOX_SIZE, 204 HNS_ROCE_MAILBOX_SIZE, 0); 205 if (!hr_dev->cmd.pool) 206 return -ENOMEM; 207 208 return 0; 209 } 210 211 void hns_roce_cmd_cleanup(struct hns_roce_dev *hr_dev) 212 { 213 dma_pool_destroy(hr_dev->cmd.pool); 214 } 215 216 int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev) 217 { 218 struct hns_roce_cmdq *hr_cmd = &hr_dev->cmd; 219 int i; 220 221 hr_cmd->context = 222 kcalloc(hr_cmd->max_cmds, sizeof(*hr_cmd->context), GFP_KERNEL); 223 if (!hr_cmd->context) 224 return -ENOMEM; 225 226 for (i = 0; i < hr_cmd->max_cmds; ++i) { 227 hr_cmd->context[i].token = i; 228 hr_cmd->context[i].next = i + 1; 229 } 230 231 hr_cmd->context[hr_cmd->max_cmds - 1].next = -1; 232 hr_cmd->free_head = 0; 233 234 sema_init(&hr_cmd->event_sem, hr_cmd->max_cmds); 235 spin_lock_init(&hr_cmd->context_lock); 236 237 hr_cmd->token_mask = CMD_TOKEN_MASK; 238 hr_cmd->use_events = 1; 239 240 return 0; 241 } 242 243 void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev) 244 { 245 struct hns_roce_cmdq *hr_cmd = &hr_dev->cmd; 246 247 kfree(hr_cmd->context); 248 hr_cmd->use_events = 0; 249 } 250 251 struct hns_roce_cmd_mailbox * 252 hns_roce_alloc_cmd_mailbox(struct hns_roce_dev *hr_dev) 253 { 254 struct hns_roce_cmd_mailbox *mailbox; 255 256 mailbox = kmalloc(sizeof(*mailbox), GFP_KERNEL); 257 if (!mailbox) 258 return ERR_PTR(-ENOMEM); 259 260 mailbox->buf = 261 dma_pool_alloc(hr_dev->cmd.pool, GFP_KERNEL, &mailbox->dma); 262 if (!mailbox->buf) { 263 kfree(mailbox); 264 return ERR_PTR(-ENOMEM); 265 } 266 267 return mailbox; 268 } 269 270 void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev, 271 struct hns_roce_cmd_mailbox *mailbox) 272 { 273 if (!mailbox) 274 return; 275 276 dma_pool_free(hr_dev->cmd.pool, mailbox->buf, mailbox->dma); 277 kfree(mailbox); 278 } 279