1 /* 2 * 3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Copyright (c) 2003-2012, Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 */ 16 17 18 #include <linux/export.h> 19 #include <linux/kthread.h> 20 #include <linux/interrupt.h> 21 #include <linux/fs.h> 22 #include <linux/jiffies.h> 23 #include <linux/slab.h> 24 25 #include <linux/mei.h> 26 27 #include "mei_dev.h" 28 #include "hbm.h" 29 #include "client.h" 30 31 32 /** 33 * mei_irq_compl_handler - dispatch complete handlers 34 * for the completed callbacks 35 * 36 * @dev: mei device 37 * @compl_list: list of completed cbs 38 */ 39 void mei_irq_compl_handler(struct mei_device *dev, struct mei_cl_cb *compl_list) 40 { 41 struct mei_cl_cb *cb, *next; 42 struct mei_cl *cl; 43 44 list_for_each_entry_safe(cb, next, &compl_list->list, list) { 45 cl = cb->cl; 46 list_del(&cb->list); 47 if (!cl) 48 continue; 49 50 dev_dbg(dev->dev, "completing call back.\n"); 51 if (cl == &dev->iamthif_cl) 52 mei_amthif_complete(dev, cb); 53 else 54 mei_cl_complete(cl, cb); 55 } 56 } 57 EXPORT_SYMBOL_GPL(mei_irq_compl_handler); 58 59 /** 60 * mei_cl_hbm_equal - check if hbm is addressed to the client 61 * 62 * @cl: host client 63 * @mei_hdr: header of mei client message 64 * 65 * Return: true if matches, false otherwise 66 */ 67 static inline int mei_cl_hbm_equal(struct mei_cl *cl, 68 struct mei_msg_hdr *mei_hdr) 69 { 70 return cl->host_client_id == mei_hdr->host_addr && 71 cl->me_client_id == mei_hdr->me_addr; 72 } 73 /** 74 * mei_cl_is_reading - checks if the client 75 * is the one to read this message 76 * 77 * @cl: mei client 78 * @mei_hdr: header of mei message 79 * 80 * Return: true on match and false otherwise 81 */ 82 static bool mei_cl_is_reading(struct mei_cl *cl, struct mei_msg_hdr *mei_hdr) 83 { 84 return mei_cl_hbm_equal(cl, mei_hdr) && 85 cl->state == MEI_FILE_CONNECTED && 86 cl->reading_state != MEI_READ_COMPLETE; 87 } 88 89 /** 90 * mei_cl_irq_read_msg - process client message 91 * 92 * @dev: the device structure 93 * @mei_hdr: header of mei client message 94 * @complete_list: An instance of our list structure 95 * 96 * Return: 0 on success, <0 on failure. 97 */ 98 static int mei_cl_irq_read_msg(struct mei_device *dev, 99 struct mei_msg_hdr *mei_hdr, 100 struct mei_cl_cb *complete_list) 101 { 102 struct mei_cl *cl; 103 struct mei_cl_cb *cb, *next; 104 unsigned char *buffer = NULL; 105 106 list_for_each_entry_safe(cb, next, &dev->read_list.list, list) { 107 cl = cb->cl; 108 if (!cl || !mei_cl_is_reading(cl, mei_hdr)) 109 continue; 110 111 cl->reading_state = MEI_READING; 112 113 if (cb->response_buffer.size == 0 || 114 cb->response_buffer.data == NULL) { 115 cl_err(dev, cl, "response buffer is not allocated.\n"); 116 list_del(&cb->list); 117 return -ENOMEM; 118 } 119 120 if (cb->response_buffer.size < mei_hdr->length + cb->buf_idx) { 121 cl_dbg(dev, cl, "message overflow. size %d len %d idx %ld\n", 122 cb->response_buffer.size, 123 mei_hdr->length, cb->buf_idx); 124 buffer = krealloc(cb->response_buffer.data, 125 mei_hdr->length + cb->buf_idx, 126 GFP_KERNEL); 127 128 if (!buffer) { 129 list_del(&cb->list); 130 return -ENOMEM; 131 } 132 cb->response_buffer.data = buffer; 133 cb->response_buffer.size = 134 mei_hdr->length + cb->buf_idx; 135 } 136 137 buffer = cb->response_buffer.data + cb->buf_idx; 138 mei_read_slots(dev, buffer, mei_hdr->length); 139 140 cb->buf_idx += mei_hdr->length; 141 if (mei_hdr->msg_complete) { 142 cl->status = 0; 143 list_del(&cb->list); 144 cl_dbg(dev, cl, "completed read length = %lu\n", 145 cb->buf_idx); 146 list_add_tail(&cb->list, &complete_list->list); 147 } 148 break; 149 } 150 151 dev_dbg(dev->dev, "message read\n"); 152 if (!buffer) { 153 mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length); 154 dev_dbg(dev->dev, "discarding message " MEI_HDR_FMT "\n", 155 MEI_HDR_PRM(mei_hdr)); 156 } 157 158 return 0; 159 } 160 161 /** 162 * mei_cl_irq_disconnect_rsp - send disconnection response message 163 * 164 * @cl: client 165 * @cb: callback block. 166 * @cmpl_list: complete list. 167 * 168 * Return: 0, OK; otherwise, error. 169 */ 170 static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb, 171 struct mei_cl_cb *cmpl_list) 172 { 173 struct mei_device *dev = cl->dev; 174 u32 msg_slots; 175 int slots; 176 int ret; 177 178 slots = mei_hbuf_empty_slots(dev); 179 msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_response)); 180 181 if (slots < msg_slots) 182 return -EMSGSIZE; 183 184 ret = mei_hbm_cl_disconnect_rsp(dev, cl); 185 186 cl->state = MEI_FILE_DISCONNECTED; 187 cl->status = 0; 188 list_del(&cb->list); 189 mei_io_cb_free(cb); 190 191 return ret; 192 } 193 194 195 196 /** 197 * mei_cl_irq_disconnect - processes close related operation from 198 * interrupt thread context - send disconnect request 199 * 200 * @cl: client 201 * @cb: callback block. 202 * @cmpl_list: complete list. 203 * 204 * Return: 0, OK; otherwise, error. 205 */ 206 static int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb, 207 struct mei_cl_cb *cmpl_list) 208 { 209 struct mei_device *dev = cl->dev; 210 u32 msg_slots; 211 int slots; 212 213 msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request)); 214 slots = mei_hbuf_empty_slots(dev); 215 216 if (slots < msg_slots) 217 return -EMSGSIZE; 218 219 if (mei_hbm_cl_disconnect_req(dev, cl)) { 220 cl->status = 0; 221 cb->buf_idx = 0; 222 list_move_tail(&cb->list, &cmpl_list->list); 223 return -EIO; 224 } 225 226 cl->state = MEI_FILE_DISCONNECTING; 227 cl->status = 0; 228 cb->buf_idx = 0; 229 list_move_tail(&cb->list, &dev->ctrl_rd_list.list); 230 cl->timer_count = MEI_CONNECT_TIMEOUT; 231 232 return 0; 233 } 234 235 236 /** 237 * mei_cl_irq_read - processes client read related operation from the 238 * interrupt thread context - request for flow control credits 239 * 240 * @cl: client 241 * @cb: callback block. 242 * @cmpl_list: complete list. 243 * 244 * Return: 0, OK; otherwise, error. 245 */ 246 static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb, 247 struct mei_cl_cb *cmpl_list) 248 { 249 struct mei_device *dev = cl->dev; 250 u32 msg_slots; 251 int slots; 252 int ret; 253 254 msg_slots = mei_data2slots(sizeof(struct hbm_flow_control)); 255 slots = mei_hbuf_empty_slots(dev); 256 257 if (slots < msg_slots) 258 return -EMSGSIZE; 259 260 ret = mei_hbm_cl_flow_control_req(dev, cl); 261 if (ret) { 262 cl->status = ret; 263 cb->buf_idx = 0; 264 list_move_tail(&cb->list, &cmpl_list->list); 265 return ret; 266 } 267 268 list_move_tail(&cb->list, &dev->read_list.list); 269 270 return 0; 271 } 272 273 274 /** 275 * mei_cl_irq_connect - send connect request in irq_thread context 276 * 277 * @cl: client 278 * @cb: callback block. 279 * @cmpl_list: complete list. 280 * 281 * Return: 0, OK; otherwise, error. 282 */ 283 static int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb, 284 struct mei_cl_cb *cmpl_list) 285 { 286 struct mei_device *dev = cl->dev; 287 u32 msg_slots; 288 int slots; 289 int ret; 290 291 msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request)); 292 slots = mei_hbuf_empty_slots(dev); 293 294 if (mei_cl_is_other_connecting(cl)) 295 return 0; 296 297 if (slots < msg_slots) 298 return -EMSGSIZE; 299 300 cl->state = MEI_FILE_CONNECTING; 301 302 ret = mei_hbm_cl_connect_req(dev, cl); 303 if (ret) { 304 cl->status = ret; 305 cb->buf_idx = 0; 306 list_del(&cb->list); 307 return ret; 308 } 309 310 list_move_tail(&cb->list, &dev->ctrl_rd_list.list); 311 cl->timer_count = MEI_CONNECT_TIMEOUT; 312 return 0; 313 } 314 315 316 /** 317 * mei_irq_read_handler - bottom half read routine after ISR to 318 * handle the read processing. 319 * 320 * @dev: the device structure 321 * @cmpl_list: An instance of our list structure 322 * @slots: slots to read. 323 * 324 * Return: 0 on success, <0 on failure. 325 */ 326 int mei_irq_read_handler(struct mei_device *dev, 327 struct mei_cl_cb *cmpl_list, s32 *slots) 328 { 329 struct mei_msg_hdr *mei_hdr; 330 struct mei_cl *cl; 331 int ret; 332 333 if (!dev->rd_msg_hdr) { 334 dev->rd_msg_hdr = mei_read_hdr(dev); 335 (*slots)--; 336 dev_dbg(dev->dev, "slots =%08x.\n", *slots); 337 } 338 mei_hdr = (struct mei_msg_hdr *) &dev->rd_msg_hdr; 339 dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr)); 340 341 if (mei_hdr->reserved || !dev->rd_msg_hdr) { 342 dev_err(dev->dev, "corrupted message header 0x%08X\n", 343 dev->rd_msg_hdr); 344 ret = -EBADMSG; 345 goto end; 346 } 347 348 if (mei_slots2data(*slots) < mei_hdr->length) { 349 dev_err(dev->dev, "less data available than length=%08x.\n", 350 *slots); 351 /* we can't read the message */ 352 ret = -ENODATA; 353 goto end; 354 } 355 356 /* HBM message */ 357 if (mei_hdr->host_addr == 0 && mei_hdr->me_addr == 0) { 358 ret = mei_hbm_dispatch(dev, mei_hdr); 359 if (ret) { 360 dev_dbg(dev->dev, "mei_hbm_dispatch failed ret = %d\n", 361 ret); 362 goto end; 363 } 364 goto reset_slots; 365 } 366 367 /* find recipient cl */ 368 list_for_each_entry(cl, &dev->file_list, link) { 369 if (mei_cl_hbm_equal(cl, mei_hdr)) { 370 cl_dbg(dev, cl, "got a message\n"); 371 break; 372 } 373 } 374 375 /* if no recipient cl was found we assume corrupted header */ 376 if (&cl->link == &dev->file_list) { 377 dev_err(dev->dev, "no destination client found 0x%08X\n", 378 dev->rd_msg_hdr); 379 ret = -EBADMSG; 380 goto end; 381 } 382 383 if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id && 384 MEI_FILE_CONNECTED == dev->iamthif_cl.state && 385 dev->iamthif_state == MEI_IAMTHIF_READING) { 386 387 ret = mei_amthif_irq_read_msg(dev, mei_hdr, cmpl_list); 388 if (ret) { 389 dev_err(dev->dev, "mei_amthif_irq_read_msg failed = %d\n", 390 ret); 391 goto end; 392 } 393 } else { 394 ret = mei_cl_irq_read_msg(dev, mei_hdr, cmpl_list); 395 if (ret) { 396 dev_err(dev->dev, "mei_cl_irq_read_msg failed = %d\n", 397 ret); 398 goto end; 399 } 400 } 401 402 reset_slots: 403 /* reset the number of slots and header */ 404 *slots = mei_count_full_read_slots(dev); 405 dev->rd_msg_hdr = 0; 406 407 if (*slots == -EOVERFLOW) { 408 /* overflow - reset */ 409 dev_err(dev->dev, "resetting due to slots overflow.\n"); 410 /* set the event since message has been read */ 411 ret = -ERANGE; 412 goto end; 413 } 414 end: 415 return ret; 416 } 417 EXPORT_SYMBOL_GPL(mei_irq_read_handler); 418 419 420 /** 421 * mei_irq_write_handler - dispatch write requests 422 * after irq received 423 * 424 * @dev: the device structure 425 * @cmpl_list: An instance of our list structure 426 * 427 * Return: 0 on success, <0 on failure. 428 */ 429 int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list) 430 { 431 432 struct mei_cl *cl; 433 struct mei_cl_cb *cb, *next; 434 struct mei_cl_cb *list; 435 s32 slots; 436 int ret; 437 438 439 if (!mei_hbuf_acquire(dev)) 440 return 0; 441 442 slots = mei_hbuf_empty_slots(dev); 443 if (slots <= 0) 444 return -EMSGSIZE; 445 446 /* complete all waiting for write CB */ 447 dev_dbg(dev->dev, "complete all waiting for write cb.\n"); 448 449 list = &dev->write_waiting_list; 450 list_for_each_entry_safe(cb, next, &list->list, list) { 451 cl = cb->cl; 452 if (cl == NULL) 453 continue; 454 455 cl->status = 0; 456 list_del(&cb->list); 457 if (cb->fop_type == MEI_FOP_WRITE && 458 cl != &dev->iamthif_cl) { 459 cl_dbg(dev, cl, "MEI WRITE COMPLETE\n"); 460 cl->writing_state = MEI_WRITE_COMPLETE; 461 list_add_tail(&cb->list, &cmpl_list->list); 462 } 463 if (cl == &dev->iamthif_cl) { 464 cl_dbg(dev, cl, "check iamthif flow control.\n"); 465 if (dev->iamthif_flow_control_pending) { 466 ret = mei_amthif_irq_read(dev, &slots); 467 if (ret) 468 return ret; 469 } 470 } 471 } 472 473 if (dev->wd_state == MEI_WD_STOPPING) { 474 dev->wd_state = MEI_WD_IDLE; 475 wake_up(&dev->wait_stop_wd); 476 } 477 478 if (mei_cl_is_connected(&dev->wd_cl)) { 479 if (dev->wd_pending && 480 mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) { 481 ret = mei_wd_send(dev); 482 if (ret) 483 return ret; 484 dev->wd_pending = false; 485 } 486 } 487 488 /* complete control write list CB */ 489 dev_dbg(dev->dev, "complete control write list cb.\n"); 490 list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list.list, list) { 491 cl = cb->cl; 492 if (!cl) { 493 list_del(&cb->list); 494 return -ENODEV; 495 } 496 switch (cb->fop_type) { 497 case MEI_FOP_DISCONNECT: 498 /* send disconnect message */ 499 ret = mei_cl_irq_disconnect(cl, cb, cmpl_list); 500 if (ret) 501 return ret; 502 503 break; 504 case MEI_FOP_READ: 505 /* send flow control message */ 506 ret = mei_cl_irq_read(cl, cb, cmpl_list); 507 if (ret) 508 return ret; 509 510 break; 511 case MEI_FOP_CONNECT: 512 /* connect message */ 513 ret = mei_cl_irq_connect(cl, cb, cmpl_list); 514 if (ret) 515 return ret; 516 517 break; 518 case MEI_FOP_DISCONNECT_RSP: 519 /* send disconnect resp */ 520 ret = mei_cl_irq_disconnect_rsp(cl, cb, cmpl_list); 521 if (ret) 522 return ret; 523 break; 524 default: 525 BUG(); 526 } 527 528 } 529 /* complete write list CB */ 530 dev_dbg(dev->dev, "complete write list cb.\n"); 531 list_for_each_entry_safe(cb, next, &dev->write_list.list, list) { 532 cl = cb->cl; 533 if (cl == NULL) 534 continue; 535 if (cl == &dev->iamthif_cl) 536 ret = mei_amthif_irq_write(cl, cb, cmpl_list); 537 else 538 ret = mei_cl_irq_write(cl, cb, cmpl_list); 539 if (ret) 540 return ret; 541 } 542 return 0; 543 } 544 EXPORT_SYMBOL_GPL(mei_irq_write_handler); 545 546 547 548 /** 549 * mei_timer - timer function. 550 * 551 * @work: pointer to the work_struct structure 552 * 553 */ 554 void mei_timer(struct work_struct *work) 555 { 556 unsigned long timeout; 557 struct mei_cl *cl; 558 559 struct mei_device *dev = container_of(work, 560 struct mei_device, timer_work.work); 561 562 563 mutex_lock(&dev->device_lock); 564 565 /* Catch interrupt stalls during HBM init handshake */ 566 if (dev->dev_state == MEI_DEV_INIT_CLIENTS && 567 dev->hbm_state != MEI_HBM_IDLE) { 568 569 if (dev->init_clients_timer) { 570 if (--dev->init_clients_timer == 0) { 571 dev_err(dev->dev, "timer: init clients timeout hbm_state = %d.\n", 572 dev->hbm_state); 573 mei_reset(dev); 574 goto out; 575 } 576 } 577 } 578 579 if (dev->dev_state != MEI_DEV_ENABLED) 580 goto out; 581 582 /*** connect/disconnect timeouts ***/ 583 list_for_each_entry(cl, &dev->file_list, link) { 584 if (cl->timer_count) { 585 if (--cl->timer_count == 0) { 586 dev_err(dev->dev, "timer: connect/disconnect timeout.\n"); 587 mei_reset(dev); 588 goto out; 589 } 590 } 591 } 592 593 if (!mei_cl_is_connected(&dev->iamthif_cl)) 594 goto out; 595 596 if (dev->iamthif_stall_timer) { 597 if (--dev->iamthif_stall_timer == 0) { 598 dev_err(dev->dev, "timer: amthif hanged.\n"); 599 mei_reset(dev); 600 dev->iamthif_msg_buf_size = 0; 601 dev->iamthif_msg_buf_index = 0; 602 dev->iamthif_canceled = false; 603 dev->iamthif_ioctl = true; 604 dev->iamthif_state = MEI_IAMTHIF_IDLE; 605 dev->iamthif_timer = 0; 606 607 mei_io_cb_free(dev->iamthif_current_cb); 608 dev->iamthif_current_cb = NULL; 609 610 dev->iamthif_file_object = NULL; 611 mei_amthif_run_next_cmd(dev); 612 } 613 } 614 615 if (dev->iamthif_timer) { 616 617 timeout = dev->iamthif_timer + 618 mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER); 619 620 dev_dbg(dev->dev, "dev->iamthif_timer = %ld\n", 621 dev->iamthif_timer); 622 dev_dbg(dev->dev, "timeout = %ld\n", timeout); 623 dev_dbg(dev->dev, "jiffies = %ld\n", jiffies); 624 if (time_after(jiffies, timeout)) { 625 /* 626 * User didn't read the AMTHI data on time (15sec) 627 * freeing AMTHI for other requests 628 */ 629 630 dev_dbg(dev->dev, "freeing AMTHI for other requests\n"); 631 632 mei_io_list_flush(&dev->amthif_rd_complete_list, 633 &dev->iamthif_cl); 634 mei_io_cb_free(dev->iamthif_current_cb); 635 dev->iamthif_current_cb = NULL; 636 637 dev->iamthif_file_object->private_data = NULL; 638 dev->iamthif_file_object = NULL; 639 dev->iamthif_timer = 0; 640 mei_amthif_run_next_cmd(dev); 641 642 } 643 } 644 out: 645 if (dev->dev_state != MEI_DEV_DISABLED) 646 schedule_delayed_work(&dev->timer_work, 2 * HZ); 647 mutex_unlock(&dev->device_lock); 648 } 649 650