1 /* 2 * Intel SST generic IPC Support 3 * 4 * Copyright (C) 2015, Intel Corporation. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License version 8 * 2 as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 */ 16 17 #include <linux/types.h> 18 #include <linux/kernel.h> 19 #include <linux/list.h> 20 #include <linux/wait.h> 21 #include <linux/module.h> 22 #include <linux/spinlock.h> 23 #include <linux/device.h> 24 #include <linux/slab.h> 25 #include <linux/workqueue.h> 26 #include <linux/sched.h> 27 #include <linux/delay.h> 28 #include <linux/platform_device.h> 29 #include <linux/kthread.h> 30 #include <sound/asound.h> 31 32 #include "sst-dsp.h" 33 #include "sst-dsp-priv.h" 34 #include "sst-ipc.h" 35 36 /* IPC message timeout (msecs) */ 37 #define IPC_TIMEOUT_MSECS 300 38 39 #define IPC_EMPTY_LIST_SIZE 8 40 41 /* locks held by caller */ 42 static struct ipc_message *msg_get_empty(struct sst_generic_ipc *ipc) 43 { 44 struct ipc_message *msg = NULL; 45 46 if (!list_empty(&ipc->empty_list)) { 47 msg = list_first_entry(&ipc->empty_list, struct ipc_message, 48 list); 49 list_del(&msg->list); 50 } 51 52 return msg; 53 } 54 55 static int tx_wait_done(struct sst_generic_ipc *ipc, 56 struct ipc_message *msg, void *rx_data) 57 { 58 unsigned long flags; 59 int ret; 60 61 /* wait for DSP completion (in all cases atm inc pending) */ 62 ret = wait_event_timeout(msg->waitq, msg->complete, 63 msecs_to_jiffies(IPC_TIMEOUT_MSECS)); 64 65 spin_lock_irqsave(&ipc->dsp->spinlock, flags); 66 if (ret == 0) { 67 if (ipc->ops.shim_dbg != NULL) 68 ipc->ops.shim_dbg(ipc, "message timeout"); 69 70 list_del(&msg->list); 71 ret = -ETIMEDOUT; 72 } else { 73 74 /* copy the data returned from DSP */ 75 if (msg->rx_size) 76 memcpy(rx_data, msg->rx_data, msg->rx_size); 77 ret = msg->errno; 78 } 79 80 list_add_tail(&msg->list, &ipc->empty_list); 81 spin_unlock_irqrestore(&ipc->dsp->spinlock, flags); 82 return ret; 83 } 84 85 static int ipc_tx_message(struct sst_generic_ipc *ipc, u64 header, 86 void *tx_data, size_t tx_bytes, void *rx_data, 87 size_t rx_bytes, int wait) 88 { 89 struct ipc_message *msg; 90 unsigned long flags; 91 92 spin_lock_irqsave(&ipc->dsp->spinlock, flags); 93 94 msg = msg_get_empty(ipc); 95 if (msg == NULL) { 96 spin_unlock_irqrestore(&ipc->dsp->spinlock, flags); 97 return -EBUSY; 98 } 99 100 msg->header = header; 101 msg->tx_size = tx_bytes; 102 msg->rx_size = rx_bytes; 103 msg->wait = wait; 104 msg->errno = 0; 105 msg->pending = false; 106 msg->complete = false; 107 108 if ((tx_bytes) && (ipc->ops.tx_data_copy != NULL)) 109 ipc->ops.tx_data_copy(msg, tx_data, tx_bytes); 110 111 list_add_tail(&msg->list, &ipc->tx_list); 112 spin_unlock_irqrestore(&ipc->dsp->spinlock, flags); 113 114 queue_kthread_work(&ipc->kworker, &ipc->kwork); 115 116 if (wait) 117 return tx_wait_done(ipc, msg, rx_data); 118 else 119 return 0; 120 } 121 122 static int msg_empty_list_init(struct sst_generic_ipc *ipc) 123 { 124 int i; 125 126 ipc->msg = kzalloc(sizeof(struct ipc_message) * 127 IPC_EMPTY_LIST_SIZE, GFP_KERNEL); 128 if (ipc->msg == NULL) 129 return -ENOMEM; 130 131 for (i = 0; i < IPC_EMPTY_LIST_SIZE; i++) { 132 ipc->msg[i].tx_data = kzalloc(ipc->tx_data_max_size, GFP_KERNEL); 133 if (ipc->msg[i].tx_data == NULL) 134 goto free_mem; 135 136 ipc->msg[i].rx_data = kzalloc(ipc->rx_data_max_size, GFP_KERNEL); 137 if (ipc->msg[i].rx_data == NULL) { 138 kfree(ipc->msg[i].tx_data); 139 goto free_mem; 140 } 141 142 init_waitqueue_head(&ipc->msg[i].waitq); 143 list_add(&ipc->msg[i].list, &ipc->empty_list); 144 } 145 146 return 0; 147 148 free_mem: 149 while (i > 0) { 150 kfree(ipc->msg[i-1].tx_data); 151 kfree(ipc->msg[i-1].rx_data); 152 --i; 153 } 154 kfree(ipc->msg); 155 156 return -ENOMEM; 157 } 158 159 static void ipc_tx_msgs(struct kthread_work *work) 160 { 161 struct sst_generic_ipc *ipc = 162 container_of(work, struct sst_generic_ipc, kwork); 163 struct ipc_message *msg; 164 unsigned long flags; 165 166 spin_lock_irqsave(&ipc->dsp->spinlock, flags); 167 168 if (list_empty(&ipc->tx_list) || ipc->pending) { 169 spin_unlock_irqrestore(&ipc->dsp->spinlock, flags); 170 return; 171 } 172 173 /* if the DSP is busy, we will TX messages after IRQ. 174 * also postpone if we are in the middle of procesing completion irq*/ 175 if (ipc->ops.is_dsp_busy && ipc->ops.is_dsp_busy(ipc->dsp)) { 176 dev_dbg(ipc->dev, "ipc_tx_msgs dsp busy\n"); 177 spin_unlock_irqrestore(&ipc->dsp->spinlock, flags); 178 return; 179 } 180 181 msg = list_first_entry(&ipc->tx_list, struct ipc_message, list); 182 list_move(&msg->list, &ipc->rx_list); 183 184 if (ipc->ops.tx_msg != NULL) 185 ipc->ops.tx_msg(ipc, msg); 186 187 spin_unlock_irqrestore(&ipc->dsp->spinlock, flags); 188 } 189 190 int sst_ipc_tx_message_wait(struct sst_generic_ipc *ipc, u64 header, 191 void *tx_data, size_t tx_bytes, void *rx_data, size_t rx_bytes) 192 { 193 return ipc_tx_message(ipc, header, tx_data, tx_bytes, 194 rx_data, rx_bytes, 1); 195 } 196 EXPORT_SYMBOL_GPL(sst_ipc_tx_message_wait); 197 198 int sst_ipc_tx_message_nowait(struct sst_generic_ipc *ipc, u64 header, 199 void *tx_data, size_t tx_bytes) 200 { 201 return ipc_tx_message(ipc, header, tx_data, tx_bytes, 202 NULL, 0, 0); 203 } 204 EXPORT_SYMBOL_GPL(sst_ipc_tx_message_nowait); 205 206 struct ipc_message *sst_ipc_reply_find_msg(struct sst_generic_ipc *ipc, 207 u64 header) 208 { 209 struct ipc_message *msg; 210 u64 mask; 211 212 if (ipc->ops.reply_msg_match != NULL) 213 header = ipc->ops.reply_msg_match(header, &mask); 214 215 if (list_empty(&ipc->rx_list)) { 216 dev_err(ipc->dev, "error: rx list empty but received 0x%llx\n", 217 header); 218 return NULL; 219 } 220 221 list_for_each_entry(msg, &ipc->rx_list, list) { 222 if ((msg->header & mask) == header) 223 return msg; 224 } 225 226 return NULL; 227 } 228 EXPORT_SYMBOL_GPL(sst_ipc_reply_find_msg); 229 230 /* locks held by caller */ 231 void sst_ipc_tx_msg_reply_complete(struct sst_generic_ipc *ipc, 232 struct ipc_message *msg) 233 { 234 msg->complete = true; 235 236 if (!msg->wait) 237 list_add_tail(&msg->list, &ipc->empty_list); 238 else 239 wake_up(&msg->waitq); 240 } 241 EXPORT_SYMBOL_GPL(sst_ipc_tx_msg_reply_complete); 242 243 void sst_ipc_drop_all(struct sst_generic_ipc *ipc) 244 { 245 struct ipc_message *msg, *tmp; 246 unsigned long flags; 247 int tx_drop_cnt = 0, rx_drop_cnt = 0; 248 249 /* drop all TX and Rx messages before we stall + reset DSP */ 250 spin_lock_irqsave(&ipc->dsp->spinlock, flags); 251 252 list_for_each_entry_safe(msg, tmp, &ipc->tx_list, list) { 253 list_move(&msg->list, &ipc->empty_list); 254 tx_drop_cnt++; 255 } 256 257 list_for_each_entry_safe(msg, tmp, &ipc->rx_list, list) { 258 list_move(&msg->list, &ipc->empty_list); 259 rx_drop_cnt++; 260 } 261 262 spin_unlock_irqrestore(&ipc->dsp->spinlock, flags); 263 264 if (tx_drop_cnt || rx_drop_cnt) 265 dev_err(ipc->dev, "dropped IPC msg RX=%d, TX=%d\n", 266 tx_drop_cnt, rx_drop_cnt); 267 } 268 EXPORT_SYMBOL_GPL(sst_ipc_drop_all); 269 270 int sst_ipc_init(struct sst_generic_ipc *ipc) 271 { 272 int ret; 273 274 INIT_LIST_HEAD(&ipc->tx_list); 275 INIT_LIST_HEAD(&ipc->rx_list); 276 INIT_LIST_HEAD(&ipc->empty_list); 277 init_waitqueue_head(&ipc->wait_txq); 278 279 ret = msg_empty_list_init(ipc); 280 if (ret < 0) 281 return -ENOMEM; 282 283 /* start the IPC message thread */ 284 init_kthread_worker(&ipc->kworker); 285 ipc->tx_thread = kthread_run(kthread_worker_fn, 286 &ipc->kworker, "%s", 287 dev_name(ipc->dev)); 288 if (IS_ERR(ipc->tx_thread)) { 289 dev_err(ipc->dev, "error: failed to create message TX task\n"); 290 ret = PTR_ERR(ipc->tx_thread); 291 kfree(ipc->msg); 292 return ret; 293 } 294 295 init_kthread_work(&ipc->kwork, ipc_tx_msgs); 296 return 0; 297 } 298 EXPORT_SYMBOL_GPL(sst_ipc_init); 299 300 void sst_ipc_fini(struct sst_generic_ipc *ipc) 301 { 302 int i; 303 304 if (ipc->tx_thread) 305 kthread_stop(ipc->tx_thread); 306 307 if (ipc->msg) { 308 for (i = 0; i < IPC_EMPTY_LIST_SIZE; i++) { 309 kfree(ipc->msg[i].tx_data); 310 kfree(ipc->msg[i].rx_data); 311 } 312 kfree(ipc->msg); 313 } 314 } 315 EXPORT_SYMBOL_GPL(sst_ipc_fini); 316 317 /* Module information */ 318 MODULE_AUTHOR("Jin Yao"); 319 MODULE_DESCRIPTION("Intel SST IPC generic"); 320 MODULE_LICENSE("GPL v2"); 321