1 /* 2 * 3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Copyright (c) 2003-2012, Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 */ 16 17 18 #include <linux/export.h> 19 #include <linux/kthread.h> 20 #include <linux/interrupt.h> 21 #include <linux/fs.h> 22 #include <linux/jiffies.h> 23 #include <linux/slab.h> 24 #include <linux/pm_runtime.h> 25 26 #include <linux/mei.h> 27 28 #include "mei_dev.h" 29 #include "hbm.h" 30 #include "client.h" 31 32 33 /** 34 * mei_irq_compl_handler - dispatch complete handlers 35 * for the completed callbacks 36 * 37 * @dev: mei device 38 * @compl_list: list of completed cbs 39 */ 40 void mei_irq_compl_handler(struct mei_device *dev, struct mei_cl_cb *compl_list) 41 { 42 struct mei_cl_cb *cb, *next; 43 struct mei_cl *cl; 44 45 list_for_each_entry_safe(cb, next, &compl_list->list, list) { 46 cl = cb->cl; 47 list_del_init(&cb->list); 48 49 dev_dbg(dev->dev, "completing call back.\n"); 50 if (cl == &dev->iamthif_cl) 51 mei_amthif_complete(cl, cb); 52 else 53 mei_cl_complete(cl, cb); 54 } 55 } 56 EXPORT_SYMBOL_GPL(mei_irq_compl_handler); 57 58 /** 59 * mei_cl_hbm_equal - check if hbm is addressed to the client 60 * 61 * @cl: host client 62 * @mei_hdr: header of mei client message 63 * 64 * Return: true if matches, false otherwise 65 */ 66 static inline int mei_cl_hbm_equal(struct mei_cl *cl, 67 struct mei_msg_hdr *mei_hdr) 68 { 69 return mei_cl_host_addr(cl) == mei_hdr->host_addr && 70 mei_cl_me_id(cl) == mei_hdr->me_addr; 71 } 72 73 /** 74 * mei_irq_discard_msg - discard received message 75 * 76 * @dev: mei device 77 * @hdr: message header 78 */ 79 void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr) 80 { 81 /* 82 * no need to check for size as it is guarantied 83 * that length fits into rd_msg_buf 84 */ 85 mei_read_slots(dev, dev->rd_msg_buf, hdr->length); 86 dev_dbg(dev->dev, "discarding message " MEI_HDR_FMT "\n", 87 MEI_HDR_PRM(hdr)); 88 } 89 90 /** 91 * mei_cl_irq_read_msg - process client message 92 * 93 * @cl: reading client 94 * @mei_hdr: header of mei client message 95 * @complete_list: completion list 96 * 97 * Return: always 0 98 */ 99 int mei_cl_irq_read_msg(struct mei_cl *cl, 100 struct mei_msg_hdr *mei_hdr, 101 struct mei_cl_cb *complete_list) 102 { 103 struct mei_device *dev = cl->dev; 104 struct mei_cl_cb *cb; 105 unsigned char *buffer = NULL; 106 size_t buf_sz; 107 108 cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list); 109 if (!cb) { 110 cl_err(dev, cl, "pending read cb not found\n"); 111 goto out; 112 } 113 114 if (!mei_cl_is_connected(cl)) { 115 cl_dbg(dev, cl, "not connected\n"); 116 cb->status = -ENODEV; 117 goto out; 118 } 119 120 if (cb->buf.size == 0 || cb->buf.data == NULL) { 121 cl_err(dev, cl, "response buffer is not allocated.\n"); 122 list_move_tail(&cb->list, &complete_list->list); 123 cb->status = -ENOMEM; 124 goto out; 125 } 126 127 buf_sz = mei_hdr->length + cb->buf_idx; 128 /* catch for integer overflow */ 129 if (buf_sz < cb->buf_idx) { 130 cl_err(dev, cl, "message is too big len %d idx %zu\n", 131 mei_hdr->length, cb->buf_idx); 132 133 list_move_tail(&cb->list, &complete_list->list); 134 cb->status = -EMSGSIZE; 135 goto out; 136 } 137 138 if (cb->buf.size < buf_sz) { 139 cl_dbg(dev, cl, "message overflow. size %zu len %d idx %zu\n", 140 cb->buf.size, mei_hdr->length, cb->buf_idx); 141 buffer = krealloc(cb->buf.data, buf_sz, GFP_KERNEL); 142 143 if (!buffer) { 144 cb->status = -ENOMEM; 145 list_move_tail(&cb->list, &complete_list->list); 146 goto out; 147 } 148 cb->buf.data = buffer; 149 cb->buf.size = buf_sz; 150 } 151 152 buffer = cb->buf.data + cb->buf_idx; 153 mei_read_slots(dev, buffer, mei_hdr->length); 154 155 cb->buf_idx += mei_hdr->length; 156 157 if (mei_hdr->msg_complete) { 158 cl_dbg(dev, cl, "completed read length = %zu\n", cb->buf_idx); 159 list_move_tail(&cb->list, &complete_list->list); 160 } else { 161 pm_runtime_mark_last_busy(dev->dev); 162 pm_request_autosuspend(dev->dev); 163 } 164 165 out: 166 if (!buffer) 167 mei_irq_discard_msg(dev, mei_hdr); 168 169 return 0; 170 } 171 172 /** 173 * mei_cl_irq_disconnect_rsp - send disconnection response message 174 * 175 * @cl: client 176 * @cb: callback block. 177 * @cmpl_list: complete list. 178 * 179 * Return: 0, OK; otherwise, error. 180 */ 181 static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb, 182 struct mei_cl_cb *cmpl_list) 183 { 184 struct mei_device *dev = cl->dev; 185 u32 msg_slots; 186 int slots; 187 int ret; 188 189 slots = mei_hbuf_empty_slots(dev); 190 msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_response)); 191 192 if (slots < msg_slots) 193 return -EMSGSIZE; 194 195 ret = mei_hbm_cl_disconnect_rsp(dev, cl); 196 list_move_tail(&cb->list, &cmpl_list->list); 197 198 return ret; 199 } 200 201 /** 202 * mei_cl_irq_read - processes client read related operation from the 203 * interrupt thread context - request for flow control credits 204 * 205 * @cl: client 206 * @cb: callback block. 207 * @cmpl_list: complete list. 208 * 209 * Return: 0, OK; otherwise, error. 210 */ 211 static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb, 212 struct mei_cl_cb *cmpl_list) 213 { 214 struct mei_device *dev = cl->dev; 215 u32 msg_slots; 216 int slots; 217 int ret; 218 219 msg_slots = mei_data2slots(sizeof(struct hbm_flow_control)); 220 slots = mei_hbuf_empty_slots(dev); 221 222 if (slots < msg_slots) 223 return -EMSGSIZE; 224 225 ret = mei_hbm_cl_flow_control_req(dev, cl); 226 if (ret) { 227 cl->status = ret; 228 cb->buf_idx = 0; 229 list_move_tail(&cb->list, &cmpl_list->list); 230 return ret; 231 } 232 233 list_move_tail(&cb->list, &cl->rd_pending); 234 235 return 0; 236 } 237 238 static inline bool hdr_is_hbm(struct mei_msg_hdr *mei_hdr) 239 { 240 return mei_hdr->host_addr == 0 && mei_hdr->me_addr == 0; 241 } 242 243 static inline bool hdr_is_fixed(struct mei_msg_hdr *mei_hdr) 244 { 245 return mei_hdr->host_addr == 0 && mei_hdr->me_addr != 0; 246 } 247 248 /** 249 * mei_irq_read_handler - bottom half read routine after ISR to 250 * handle the read processing. 251 * 252 * @dev: the device structure 253 * @cmpl_list: An instance of our list structure 254 * @slots: slots to read. 255 * 256 * Return: 0 on success, <0 on failure. 257 */ 258 int mei_irq_read_handler(struct mei_device *dev, 259 struct mei_cl_cb *cmpl_list, s32 *slots) 260 { 261 struct mei_msg_hdr *mei_hdr; 262 struct mei_cl *cl; 263 int ret; 264 265 if (!dev->rd_msg_hdr) { 266 dev->rd_msg_hdr = mei_read_hdr(dev); 267 (*slots)--; 268 dev_dbg(dev->dev, "slots =%08x.\n", *slots); 269 } 270 mei_hdr = (struct mei_msg_hdr *) &dev->rd_msg_hdr; 271 dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr)); 272 273 if (mei_hdr->reserved || !dev->rd_msg_hdr) { 274 dev_err(dev->dev, "corrupted message header 0x%08X\n", 275 dev->rd_msg_hdr); 276 ret = -EBADMSG; 277 goto end; 278 } 279 280 if (mei_slots2data(*slots) < mei_hdr->length) { 281 dev_err(dev->dev, "less data available than length=%08x.\n", 282 *slots); 283 /* we can't read the message */ 284 ret = -ENODATA; 285 goto end; 286 } 287 288 /* HBM message */ 289 if (hdr_is_hbm(mei_hdr)) { 290 ret = mei_hbm_dispatch(dev, mei_hdr); 291 if (ret) { 292 dev_dbg(dev->dev, "mei_hbm_dispatch failed ret = %d\n", 293 ret); 294 goto end; 295 } 296 goto reset_slots; 297 } 298 299 /* find recipient cl */ 300 list_for_each_entry(cl, &dev->file_list, link) { 301 if (mei_cl_hbm_equal(cl, mei_hdr)) { 302 cl_dbg(dev, cl, "got a message\n"); 303 break; 304 } 305 } 306 307 /* if no recipient cl was found we assume corrupted header */ 308 if (&cl->link == &dev->file_list) { 309 /* A message for not connected fixed address clients 310 * should be silently discarded 311 */ 312 if (hdr_is_fixed(mei_hdr)) { 313 mei_irq_discard_msg(dev, mei_hdr); 314 ret = 0; 315 goto reset_slots; 316 } 317 dev_err(dev->dev, "no destination client found 0x%08X\n", 318 dev->rd_msg_hdr); 319 ret = -EBADMSG; 320 goto end; 321 } 322 323 if (cl == &dev->iamthif_cl) { 324 ret = mei_amthif_irq_read_msg(cl, mei_hdr, cmpl_list); 325 } else { 326 ret = mei_cl_irq_read_msg(cl, mei_hdr, cmpl_list); 327 } 328 329 330 reset_slots: 331 /* reset the number of slots and header */ 332 *slots = mei_count_full_read_slots(dev); 333 dev->rd_msg_hdr = 0; 334 335 if (*slots == -EOVERFLOW) { 336 /* overflow - reset */ 337 dev_err(dev->dev, "resetting due to slots overflow.\n"); 338 /* set the event since message has been read */ 339 ret = -ERANGE; 340 goto end; 341 } 342 end: 343 return ret; 344 } 345 EXPORT_SYMBOL_GPL(mei_irq_read_handler); 346 347 348 /** 349 * mei_irq_write_handler - dispatch write requests 350 * after irq received 351 * 352 * @dev: the device structure 353 * @cmpl_list: An instance of our list structure 354 * 355 * Return: 0 on success, <0 on failure. 356 */ 357 int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list) 358 { 359 360 struct mei_cl *cl; 361 struct mei_cl_cb *cb, *next; 362 struct mei_cl_cb *list; 363 s32 slots; 364 int ret; 365 366 367 if (!mei_hbuf_acquire(dev)) 368 return 0; 369 370 slots = mei_hbuf_empty_slots(dev); 371 if (slots <= 0) 372 return -EMSGSIZE; 373 374 /* complete all waiting for write CB */ 375 dev_dbg(dev->dev, "complete all waiting for write cb.\n"); 376 377 list = &dev->write_waiting_list; 378 list_for_each_entry_safe(cb, next, &list->list, list) { 379 cl = cb->cl; 380 381 cl->status = 0; 382 cl_dbg(dev, cl, "MEI WRITE COMPLETE\n"); 383 cl->writing_state = MEI_WRITE_COMPLETE; 384 list_move_tail(&cb->list, &cmpl_list->list); 385 } 386 387 /* complete control write list CB */ 388 dev_dbg(dev->dev, "complete control write list cb.\n"); 389 list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list.list, list) { 390 cl = cb->cl; 391 switch (cb->fop_type) { 392 case MEI_FOP_DISCONNECT: 393 /* send disconnect message */ 394 ret = mei_cl_irq_disconnect(cl, cb, cmpl_list); 395 if (ret) 396 return ret; 397 398 break; 399 case MEI_FOP_READ: 400 /* send flow control message */ 401 ret = mei_cl_irq_read(cl, cb, cmpl_list); 402 if (ret) 403 return ret; 404 405 break; 406 case MEI_FOP_CONNECT: 407 /* connect message */ 408 ret = mei_cl_irq_connect(cl, cb, cmpl_list); 409 if (ret) 410 return ret; 411 412 break; 413 case MEI_FOP_DISCONNECT_RSP: 414 /* send disconnect resp */ 415 ret = mei_cl_irq_disconnect_rsp(cl, cb, cmpl_list); 416 if (ret) 417 return ret; 418 break; 419 420 case MEI_FOP_NOTIFY_START: 421 case MEI_FOP_NOTIFY_STOP: 422 ret = mei_cl_irq_notify(cl, cb, cmpl_list); 423 if (ret) 424 return ret; 425 break; 426 default: 427 BUG(); 428 } 429 430 } 431 /* complete write list CB */ 432 dev_dbg(dev->dev, "complete write list cb.\n"); 433 list_for_each_entry_safe(cb, next, &dev->write_list.list, list) { 434 cl = cb->cl; 435 if (cl == &dev->iamthif_cl) 436 ret = mei_amthif_irq_write(cl, cb, cmpl_list); 437 else 438 ret = mei_cl_irq_write(cl, cb, cmpl_list); 439 if (ret) 440 return ret; 441 } 442 return 0; 443 } 444 EXPORT_SYMBOL_GPL(mei_irq_write_handler); 445 446 447 /** 448 * mei_connect_timeout - connect/disconnect timeouts 449 * 450 * @cl: host client 451 */ 452 static void mei_connect_timeout(struct mei_cl *cl) 453 { 454 struct mei_device *dev = cl->dev; 455 456 if (cl->state == MEI_FILE_CONNECTING) { 457 if (dev->hbm_f_dot_supported) { 458 cl->state = MEI_FILE_DISCONNECT_REQUIRED; 459 wake_up(&cl->wait); 460 return; 461 } 462 } 463 mei_reset(dev); 464 } 465 466 /** 467 * mei_timer - timer function. 468 * 469 * @work: pointer to the work_struct structure 470 * 471 */ 472 void mei_timer(struct work_struct *work) 473 { 474 struct mei_cl *cl; 475 476 struct mei_device *dev = container_of(work, 477 struct mei_device, timer_work.work); 478 479 480 mutex_lock(&dev->device_lock); 481 482 /* Catch interrupt stalls during HBM init handshake */ 483 if (dev->dev_state == MEI_DEV_INIT_CLIENTS && 484 dev->hbm_state != MEI_HBM_IDLE) { 485 486 if (dev->init_clients_timer) { 487 if (--dev->init_clients_timer == 0) { 488 dev_err(dev->dev, "timer: init clients timeout hbm_state = %d.\n", 489 dev->hbm_state); 490 mei_reset(dev); 491 goto out; 492 } 493 } 494 } 495 496 if (dev->dev_state != MEI_DEV_ENABLED) 497 goto out; 498 499 /*** connect/disconnect timeouts ***/ 500 list_for_each_entry(cl, &dev->file_list, link) { 501 if (cl->timer_count) { 502 if (--cl->timer_count == 0) { 503 dev_err(dev->dev, "timer: connect/disconnect timeout.\n"); 504 mei_connect_timeout(cl); 505 goto out; 506 } 507 } 508 } 509 510 if (!mei_cl_is_connected(&dev->iamthif_cl)) 511 goto out; 512 513 if (dev->iamthif_stall_timer) { 514 if (--dev->iamthif_stall_timer == 0) { 515 dev_err(dev->dev, "timer: amthif hanged.\n"); 516 mei_reset(dev); 517 dev->iamthif_canceled = false; 518 dev->iamthif_state = MEI_IAMTHIF_IDLE; 519 520 mei_io_cb_free(dev->iamthif_current_cb); 521 dev->iamthif_current_cb = NULL; 522 523 dev->iamthif_fp = NULL; 524 mei_amthif_run_next_cmd(dev); 525 } 526 } 527 528 out: 529 if (dev->dev_state != MEI_DEV_DISABLED) 530 schedule_delayed_work(&dev->timer_work, 2 * HZ); 531 mutex_unlock(&dev->device_lock); 532 } 533