1 /* 2 * 3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Copyright (c) 2003-2012, Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 */ 16 17 18 #include <linux/export.h> 19 #include <linux/pci.h> 20 #include <linux/kthread.h> 21 #include <linux/interrupt.h> 22 #include <linux/fs.h> 23 #include <linux/jiffies.h> 24 25 #include <linux/mei.h> 26 27 #include "mei_dev.h" 28 #include "hbm.h" 29 #include "client.h" 30 31 32 /** 33 * mei_irq_compl_handler - dispatch complete handlers 34 * for the completed callbacks 35 * 36 * @dev - mei device 37 * @compl_list - list of completed cbs 38 */ 39 void mei_irq_compl_handler(struct mei_device *dev, struct mei_cl_cb *compl_list) 40 { 41 struct mei_cl_cb *cb, *next; 42 struct mei_cl *cl; 43 44 list_for_each_entry_safe(cb, next, &compl_list->list, list) { 45 cl = cb->cl; 46 list_del(&cb->list); 47 if (!cl) 48 continue; 49 50 dev_dbg(&dev->pdev->dev, "completing call back.\n"); 51 if (cl == &dev->iamthif_cl) 52 mei_amthif_complete(dev, cb); 53 else 54 mei_cl_complete(cl, cb); 55 } 56 } 57 EXPORT_SYMBOL_GPL(mei_irq_compl_handler); 58 59 /** 60 * mei_cl_hbm_equal - check if hbm is addressed to the client 61 * 62 * @cl: host client 63 * @mei_hdr: header of mei client message 64 * 65 * returns true if matches, false otherwise 66 */ 67 static inline int mei_cl_hbm_equal(struct mei_cl *cl, 68 struct mei_msg_hdr *mei_hdr) 69 { 70 return cl->host_client_id == mei_hdr->host_addr && 71 cl->me_client_id == mei_hdr->me_addr; 72 } 73 /** 74 * mei_cl_is_reading - checks if the client 75 is the one to read this message 76 * 77 * @cl: mei client 78 * @mei_hdr: header of mei message 79 * 80 * returns true on match and false otherwise 81 */ 82 static bool mei_cl_is_reading(struct mei_cl *cl, struct mei_msg_hdr *mei_hdr) 83 { 84 return mei_cl_hbm_equal(cl, mei_hdr) && 85 cl->state == MEI_FILE_CONNECTED && 86 cl->reading_state != MEI_READ_COMPLETE; 87 } 88 89 /** 90 * mei_irq_read_client_message - process client message 91 * 92 * @dev: the device structure 93 * @mei_hdr: header of mei client message 94 * @complete_list: An instance of our list structure 95 * 96 * returns 0 on success, <0 on failure. 97 */ 98 static int mei_cl_irq_read_msg(struct mei_device *dev, 99 struct mei_msg_hdr *mei_hdr, 100 struct mei_cl_cb *complete_list) 101 { 102 struct mei_cl *cl; 103 struct mei_cl_cb *cb, *next; 104 unsigned char *buffer = NULL; 105 106 list_for_each_entry_safe(cb, next, &dev->read_list.list, list) { 107 cl = cb->cl; 108 if (!cl || !mei_cl_is_reading(cl, mei_hdr)) 109 continue; 110 111 cl->reading_state = MEI_READING; 112 113 if (cb->response_buffer.size == 0 || 114 cb->response_buffer.data == NULL) { 115 cl_err(dev, cl, "response buffer is not allocated.\n"); 116 list_del(&cb->list); 117 return -ENOMEM; 118 } 119 120 if (cb->response_buffer.size < mei_hdr->length + cb->buf_idx) { 121 cl_dbg(dev, cl, "message overflow. size %d len %d idx %ld\n", 122 cb->response_buffer.size, 123 mei_hdr->length, cb->buf_idx); 124 buffer = krealloc(cb->response_buffer.data, 125 mei_hdr->length + cb->buf_idx, 126 GFP_KERNEL); 127 128 if (!buffer) { 129 cl_err(dev, cl, "allocation failed.\n"); 130 list_del(&cb->list); 131 return -ENOMEM; 132 } 133 cb->response_buffer.data = buffer; 134 cb->response_buffer.size = 135 mei_hdr->length + cb->buf_idx; 136 } 137 138 buffer = cb->response_buffer.data + cb->buf_idx; 139 mei_read_slots(dev, buffer, mei_hdr->length); 140 141 cb->buf_idx += mei_hdr->length; 142 if (mei_hdr->msg_complete) { 143 cl->status = 0; 144 list_del(&cb->list); 145 cl_dbg(dev, cl, "completed read length = %lu\n", 146 cb->buf_idx); 147 list_add_tail(&cb->list, &complete_list->list); 148 } 149 break; 150 } 151 152 dev_dbg(&dev->pdev->dev, "message read\n"); 153 if (!buffer) { 154 mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length); 155 dev_dbg(&dev->pdev->dev, "discarding message " MEI_HDR_FMT "\n", 156 MEI_HDR_PRM(mei_hdr)); 157 } 158 159 return 0; 160 } 161 162 /** 163 * mei_cl_irq_disconnect_rsp - send disconnection response message 164 * 165 * @cl: client 166 * @cb: callback block. 167 * @cmpl_list: complete list. 168 * 169 * returns 0, OK; otherwise, error. 170 */ 171 static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb, 172 struct mei_cl_cb *cmpl_list) 173 { 174 struct mei_device *dev = cl->dev; 175 u32 msg_slots; 176 int slots; 177 int ret; 178 179 slots = mei_hbuf_empty_slots(dev); 180 msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_response)); 181 182 if (slots < msg_slots) 183 return -EMSGSIZE; 184 185 ret = mei_hbm_cl_disconnect_rsp(dev, cl); 186 187 cl->state = MEI_FILE_DISCONNECTED; 188 cl->status = 0; 189 list_del(&cb->list); 190 mei_io_cb_free(cb); 191 192 return ret; 193 } 194 195 196 197 /** 198 * mei_cl_irq_close - processes close related operation from 199 * interrupt thread context - send disconnect request 200 * 201 * @cl: client 202 * @cb: callback block. 203 * @cmpl_list: complete list. 204 * 205 * returns 0, OK; otherwise, error. 206 */ 207 static int mei_cl_irq_close(struct mei_cl *cl, struct mei_cl_cb *cb, 208 struct mei_cl_cb *cmpl_list) 209 { 210 struct mei_device *dev = cl->dev; 211 u32 msg_slots; 212 int slots; 213 214 msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request)); 215 slots = mei_hbuf_empty_slots(dev); 216 217 if (slots < msg_slots) 218 return -EMSGSIZE; 219 220 if (mei_hbm_cl_disconnect_req(dev, cl)) { 221 cl->status = 0; 222 cb->buf_idx = 0; 223 list_move_tail(&cb->list, &cmpl_list->list); 224 return -EIO; 225 } 226 227 cl->state = MEI_FILE_DISCONNECTING; 228 cl->status = 0; 229 cb->buf_idx = 0; 230 list_move_tail(&cb->list, &dev->ctrl_rd_list.list); 231 cl->timer_count = MEI_CONNECT_TIMEOUT; 232 233 return 0; 234 } 235 236 237 /** 238 * mei_cl_irq_close - processes client read related operation from the 239 * interrupt thread context - request for flow control credits 240 * 241 * @cl: client 242 * @cb: callback block. 243 * @cmpl_list: complete list. 244 * 245 * returns 0, OK; otherwise, error. 246 */ 247 static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb, 248 struct mei_cl_cb *cmpl_list) 249 { 250 struct mei_device *dev = cl->dev; 251 u32 msg_slots; 252 int slots; 253 int ret; 254 255 msg_slots = mei_data2slots(sizeof(struct hbm_flow_control)); 256 slots = mei_hbuf_empty_slots(dev); 257 258 if (slots < msg_slots) 259 return -EMSGSIZE; 260 261 ret = mei_hbm_cl_flow_control_req(dev, cl); 262 if (ret) { 263 cl->status = ret; 264 cb->buf_idx = 0; 265 list_move_tail(&cb->list, &cmpl_list->list); 266 return ret; 267 } 268 269 list_move_tail(&cb->list, &dev->read_list.list); 270 271 return 0; 272 } 273 274 275 /** 276 * mei_cl_irq_connect - send connect request in irq_thread context 277 * 278 * @cl: client 279 * @cb: callback block. 280 * @cmpl_list: complete list. 281 * 282 * returns 0, OK; otherwise, error. 283 */ 284 static int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb, 285 struct mei_cl_cb *cmpl_list) 286 { 287 struct mei_device *dev = cl->dev; 288 u32 msg_slots; 289 int slots; 290 int ret; 291 292 msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request)); 293 slots = mei_hbuf_empty_slots(dev); 294 295 if (mei_cl_is_other_connecting(cl)) 296 return 0; 297 298 if (slots < msg_slots) 299 return -EMSGSIZE; 300 301 cl->state = MEI_FILE_CONNECTING; 302 303 ret = mei_hbm_cl_connect_req(dev, cl); 304 if (ret) { 305 cl->status = ret; 306 cb->buf_idx = 0; 307 list_del(&cb->list); 308 return ret; 309 } 310 311 list_move_tail(&cb->list, &dev->ctrl_rd_list.list); 312 cl->timer_count = MEI_CONNECT_TIMEOUT; 313 return 0; 314 } 315 316 317 /** 318 * mei_irq_read_handler - bottom half read routine after ISR to 319 * handle the read processing. 320 * 321 * @dev: the device structure 322 * @cmpl_list: An instance of our list structure 323 * @slots: slots to read. 324 * 325 * returns 0 on success, <0 on failure. 326 */ 327 int mei_irq_read_handler(struct mei_device *dev, 328 struct mei_cl_cb *cmpl_list, s32 *slots) 329 { 330 struct mei_msg_hdr *mei_hdr; 331 struct mei_cl *cl; 332 int ret; 333 334 if (!dev->rd_msg_hdr) { 335 dev->rd_msg_hdr = mei_read_hdr(dev); 336 (*slots)--; 337 dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots); 338 } 339 mei_hdr = (struct mei_msg_hdr *) &dev->rd_msg_hdr; 340 dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr)); 341 342 if (mei_hdr->reserved || !dev->rd_msg_hdr) { 343 dev_err(&dev->pdev->dev, "corrupted message header 0x%08X\n", 344 dev->rd_msg_hdr); 345 ret = -EBADMSG; 346 goto end; 347 } 348 349 if (mei_slots2data(*slots) < mei_hdr->length) { 350 dev_err(&dev->pdev->dev, "less data available than length=%08x.\n", 351 *slots); 352 /* we can't read the message */ 353 ret = -ENODATA; 354 goto end; 355 } 356 357 /* HBM message */ 358 if (mei_hdr->host_addr == 0 && mei_hdr->me_addr == 0) { 359 ret = mei_hbm_dispatch(dev, mei_hdr); 360 if (ret) { 361 dev_dbg(&dev->pdev->dev, "mei_hbm_dispatch failed ret = %d\n", 362 ret); 363 goto end; 364 } 365 goto reset_slots; 366 } 367 368 /* find recipient cl */ 369 list_for_each_entry(cl, &dev->file_list, link) { 370 if (mei_cl_hbm_equal(cl, mei_hdr)) { 371 cl_dbg(dev, cl, "got a message\n"); 372 break; 373 } 374 } 375 376 /* if no recipient cl was found we assume corrupted header */ 377 if (&cl->link == &dev->file_list) { 378 dev_err(&dev->pdev->dev, "no destination client found 0x%08X\n", 379 dev->rd_msg_hdr); 380 ret = -EBADMSG; 381 goto end; 382 } 383 384 if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id && 385 MEI_FILE_CONNECTED == dev->iamthif_cl.state && 386 dev->iamthif_state == MEI_IAMTHIF_READING) { 387 388 ret = mei_amthif_irq_read_msg(dev, mei_hdr, cmpl_list); 389 if (ret) { 390 dev_err(&dev->pdev->dev, "mei_amthif_irq_read_msg failed = %d\n", 391 ret); 392 goto end; 393 } 394 } else { 395 ret = mei_cl_irq_read_msg(dev, mei_hdr, cmpl_list); 396 if (ret) { 397 dev_err(&dev->pdev->dev, "mei_cl_irq_read_msg failed = %d\n", 398 ret); 399 goto end; 400 } 401 } 402 403 reset_slots: 404 /* reset the number of slots and header */ 405 *slots = mei_count_full_read_slots(dev); 406 dev->rd_msg_hdr = 0; 407 408 if (*slots == -EOVERFLOW) { 409 /* overflow - reset */ 410 dev_err(&dev->pdev->dev, "resetting due to slots overflow.\n"); 411 /* set the event since message has been read */ 412 ret = -ERANGE; 413 goto end; 414 } 415 end: 416 return ret; 417 } 418 EXPORT_SYMBOL_GPL(mei_irq_read_handler); 419 420 421 /** 422 * mei_irq_write_handler - dispatch write requests 423 * after irq received 424 * 425 * @dev: the device structure 426 * @cmpl_list: An instance of our list structure 427 * 428 * returns 0 on success, <0 on failure. 429 */ 430 int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list) 431 { 432 433 struct mei_cl *cl; 434 struct mei_cl_cb *cb, *next; 435 struct mei_cl_cb *list; 436 s32 slots; 437 int ret; 438 439 440 if (!mei_hbuf_acquire(dev)) 441 return 0; 442 443 slots = mei_hbuf_empty_slots(dev); 444 if (slots <= 0) 445 return -EMSGSIZE; 446 447 /* complete all waiting for write CB */ 448 dev_dbg(&dev->pdev->dev, "complete all waiting for write cb.\n"); 449 450 list = &dev->write_waiting_list; 451 list_for_each_entry_safe(cb, next, &list->list, list) { 452 cl = cb->cl; 453 if (cl == NULL) 454 continue; 455 456 cl->status = 0; 457 list_del(&cb->list); 458 if (cb->fop_type == MEI_FOP_WRITE && 459 cl != &dev->iamthif_cl) { 460 cl_dbg(dev, cl, "MEI WRITE COMPLETE\n"); 461 cl->writing_state = MEI_WRITE_COMPLETE; 462 list_add_tail(&cb->list, &cmpl_list->list); 463 } 464 if (cl == &dev->iamthif_cl) { 465 cl_dbg(dev, cl, "check iamthif flow control.\n"); 466 if (dev->iamthif_flow_control_pending) { 467 ret = mei_amthif_irq_read(dev, &slots); 468 if (ret) 469 return ret; 470 } 471 } 472 } 473 474 if (dev->wd_state == MEI_WD_STOPPING) { 475 dev->wd_state = MEI_WD_IDLE; 476 wake_up(&dev->wait_stop_wd); 477 } 478 479 if (mei_cl_is_connected(&dev->wd_cl)) { 480 if (dev->wd_pending && 481 mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) { 482 ret = mei_wd_send(dev); 483 if (ret) 484 return ret; 485 dev->wd_pending = false; 486 } 487 } 488 489 /* complete control write list CB */ 490 dev_dbg(&dev->pdev->dev, "complete control write list cb.\n"); 491 list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list.list, list) { 492 cl = cb->cl; 493 if (!cl) { 494 list_del(&cb->list); 495 return -ENODEV; 496 } 497 switch (cb->fop_type) { 498 case MEI_FOP_CLOSE: 499 /* send disconnect message */ 500 ret = mei_cl_irq_close(cl, cb, cmpl_list); 501 if (ret) 502 return ret; 503 504 break; 505 case MEI_FOP_READ: 506 /* send flow control message */ 507 ret = mei_cl_irq_read(cl, cb, cmpl_list); 508 if (ret) 509 return ret; 510 511 break; 512 case MEI_FOP_CONNECT: 513 /* connect message */ 514 ret = mei_cl_irq_connect(cl, cb, cmpl_list); 515 if (ret) 516 return ret; 517 518 break; 519 case MEI_FOP_DISCONNECT_RSP: 520 /* send disconnect resp */ 521 ret = mei_cl_irq_disconnect_rsp(cl, cb, cmpl_list); 522 if (ret) 523 return ret; 524 break; 525 default: 526 BUG(); 527 } 528 529 } 530 /* complete write list CB */ 531 dev_dbg(&dev->pdev->dev, "complete write list cb.\n"); 532 list_for_each_entry_safe(cb, next, &dev->write_list.list, list) { 533 cl = cb->cl; 534 if (cl == NULL) 535 continue; 536 if (cl == &dev->iamthif_cl) 537 ret = mei_amthif_irq_write(cl, cb, cmpl_list); 538 else 539 ret = mei_cl_irq_write(cl, cb, cmpl_list); 540 if (ret) 541 return ret; 542 } 543 return 0; 544 } 545 EXPORT_SYMBOL_GPL(mei_irq_write_handler); 546 547 548 549 /** 550 * mei_timer - timer function. 551 * 552 * @work: pointer to the work_struct structure 553 * 554 */ 555 void mei_timer(struct work_struct *work) 556 { 557 unsigned long timeout; 558 struct mei_cl *cl; 559 struct mei_cl_cb *cb_pos = NULL; 560 struct mei_cl_cb *cb_next = NULL; 561 562 struct mei_device *dev = container_of(work, 563 struct mei_device, timer_work.work); 564 565 566 mutex_lock(&dev->device_lock); 567 568 /* Catch interrupt stalls during HBM init handshake */ 569 if (dev->dev_state == MEI_DEV_INIT_CLIENTS && 570 dev->hbm_state != MEI_HBM_IDLE) { 571 572 if (dev->init_clients_timer) { 573 if (--dev->init_clients_timer == 0) { 574 dev_err(&dev->pdev->dev, "timer: init clients timeout hbm_state = %d.\n", 575 dev->hbm_state); 576 mei_reset(dev); 577 goto out; 578 } 579 } 580 } 581 582 if (dev->dev_state != MEI_DEV_ENABLED) 583 goto out; 584 585 /*** connect/disconnect timeouts ***/ 586 list_for_each_entry(cl, &dev->file_list, link) { 587 if (cl->timer_count) { 588 if (--cl->timer_count == 0) { 589 dev_err(&dev->pdev->dev, "timer: connect/disconnect timeout.\n"); 590 mei_reset(dev); 591 goto out; 592 } 593 } 594 } 595 596 if (!mei_cl_is_connected(&dev->iamthif_cl)) 597 goto out; 598 599 if (dev->iamthif_stall_timer) { 600 if (--dev->iamthif_stall_timer == 0) { 601 dev_err(&dev->pdev->dev, "timer: amthif hanged.\n"); 602 mei_reset(dev); 603 dev->iamthif_msg_buf_size = 0; 604 dev->iamthif_msg_buf_index = 0; 605 dev->iamthif_canceled = false; 606 dev->iamthif_ioctl = true; 607 dev->iamthif_state = MEI_IAMTHIF_IDLE; 608 dev->iamthif_timer = 0; 609 610 mei_io_cb_free(dev->iamthif_current_cb); 611 dev->iamthif_current_cb = NULL; 612 613 dev->iamthif_file_object = NULL; 614 mei_amthif_run_next_cmd(dev); 615 } 616 } 617 618 if (dev->iamthif_timer) { 619 620 timeout = dev->iamthif_timer + 621 mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER); 622 623 dev_dbg(&dev->pdev->dev, "dev->iamthif_timer = %ld\n", 624 dev->iamthif_timer); 625 dev_dbg(&dev->pdev->dev, "timeout = %ld\n", timeout); 626 dev_dbg(&dev->pdev->dev, "jiffies = %ld\n", jiffies); 627 if (time_after(jiffies, timeout)) { 628 /* 629 * User didn't read the AMTHI data on time (15sec) 630 * freeing AMTHI for other requests 631 */ 632 633 dev_dbg(&dev->pdev->dev, "freeing AMTHI for other requests\n"); 634 635 list_for_each_entry_safe(cb_pos, cb_next, 636 &dev->amthif_rd_complete_list.list, list) { 637 638 cl = cb_pos->file_object->private_data; 639 640 /* Finding the AMTHI entry. */ 641 if (cl == &dev->iamthif_cl) 642 list_del(&cb_pos->list); 643 } 644 mei_io_cb_free(dev->iamthif_current_cb); 645 dev->iamthif_current_cb = NULL; 646 647 dev->iamthif_file_object->private_data = NULL; 648 dev->iamthif_file_object = NULL; 649 dev->iamthif_timer = 0; 650 mei_amthif_run_next_cmd(dev); 651 652 } 653 } 654 out: 655 if (dev->dev_state != MEI_DEV_DISABLED) 656 schedule_delayed_work(&dev->timer_work, 2 * HZ); 657 mutex_unlock(&dev->device_lock); 658 } 659 660