1 /* 2 * 3 * Intel Management Engine Interface (Intel MEI) Linux driver 4 * Copyright (c) 2003-2012, Intel Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 */ 16 17 18 #include <linux/export.h> 19 #include <linux/kthread.h> 20 #include <linux/interrupt.h> 21 #include <linux/fs.h> 22 #include <linux/jiffies.h> 23 #include <linux/slab.h> 24 25 #include <linux/mei.h> 26 27 #include "mei_dev.h" 28 #include "hbm.h" 29 #include "client.h" 30 31 32 /** 33 * mei_irq_compl_handler - dispatch complete handlers 34 * for the completed callbacks 35 * 36 * @dev: mei device 37 * @compl_list: list of completed cbs 38 */ 39 void mei_irq_compl_handler(struct mei_device *dev, struct mei_cl_cb *compl_list) 40 { 41 struct mei_cl_cb *cb, *next; 42 struct mei_cl *cl; 43 44 list_for_each_entry_safe(cb, next, &compl_list->list, list) { 45 cl = cb->cl; 46 list_del_init(&cb->list); 47 48 dev_dbg(dev->dev, "completing call back.\n"); 49 if (cl == &dev->iamthif_cl) 50 mei_amthif_complete(dev, cb); 51 else 52 mei_cl_complete(cl, cb); 53 } 54 } 55 EXPORT_SYMBOL_GPL(mei_irq_compl_handler); 56 57 /** 58 * mei_cl_hbm_equal - check if hbm is addressed to the client 59 * 60 * @cl: host client 61 * @mei_hdr: header of mei client message 62 * 63 * Return: true if matches, false otherwise 64 */ 65 static inline int mei_cl_hbm_equal(struct mei_cl *cl, 66 struct mei_msg_hdr *mei_hdr) 67 { 68 return mei_cl_host_addr(cl) == mei_hdr->host_addr && 69 mei_cl_me_id(cl) == mei_hdr->me_addr; 70 } 71 72 /** 73 * mei_irq_discard_msg - discard received message 74 * 75 * @dev: mei device 76 * @hdr: message header 77 */ 78 static inline 79 void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr) 80 { 81 /* 82 * no need to check for size as it is guarantied 83 * that length fits into rd_msg_buf 84 */ 85 mei_read_slots(dev, dev->rd_msg_buf, hdr->length); 86 dev_dbg(dev->dev, "discarding message " MEI_HDR_FMT "\n", 87 MEI_HDR_PRM(hdr)); 88 } 89 90 /** 91 * mei_cl_irq_read_msg - process client message 92 * 93 * @cl: reading client 94 * @mei_hdr: header of mei client message 95 * @complete_list: completion list 96 * 97 * Return: always 0 98 */ 99 int mei_cl_irq_read_msg(struct mei_cl *cl, 100 struct mei_msg_hdr *mei_hdr, 101 struct mei_cl_cb *complete_list) 102 { 103 struct mei_device *dev = cl->dev; 104 struct mei_cl_cb *cb; 105 unsigned char *buffer = NULL; 106 107 cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list); 108 if (!cb) { 109 cl_err(dev, cl, "pending read cb not found\n"); 110 goto out; 111 } 112 113 if (!mei_cl_is_connected(cl)) { 114 cl_dbg(dev, cl, "not connected\n"); 115 cb->status = -ENODEV; 116 goto out; 117 } 118 119 if (cb->buf.size == 0 || cb->buf.data == NULL) { 120 cl_err(dev, cl, "response buffer is not allocated.\n"); 121 list_move_tail(&cb->list, &complete_list->list); 122 cb->status = -ENOMEM; 123 goto out; 124 } 125 126 if (cb->buf.size < mei_hdr->length + cb->buf_idx) { 127 cl_dbg(dev, cl, "message overflow. size %d len %d idx %ld\n", 128 cb->buf.size, mei_hdr->length, cb->buf_idx); 129 buffer = krealloc(cb->buf.data, mei_hdr->length + cb->buf_idx, 130 GFP_KERNEL); 131 132 if (!buffer) { 133 cb->status = -ENOMEM; 134 list_move_tail(&cb->list, &complete_list->list); 135 goto out; 136 } 137 cb->buf.data = buffer; 138 cb->buf.size = mei_hdr->length + cb->buf_idx; 139 } 140 141 buffer = cb->buf.data + cb->buf_idx; 142 mei_read_slots(dev, buffer, mei_hdr->length); 143 144 cb->buf_idx += mei_hdr->length; 145 146 if (mei_hdr->msg_complete) { 147 cb->read_time = jiffies; 148 cl_dbg(dev, cl, "completed read length = %lu\n", cb->buf_idx); 149 list_move_tail(&cb->list, &complete_list->list); 150 } 151 152 out: 153 if (!buffer) 154 mei_irq_discard_msg(dev, mei_hdr); 155 156 return 0; 157 } 158 159 /** 160 * mei_cl_irq_disconnect_rsp - send disconnection response message 161 * 162 * @cl: client 163 * @cb: callback block. 164 * @cmpl_list: complete list. 165 * 166 * Return: 0, OK; otherwise, error. 167 */ 168 static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb, 169 struct mei_cl_cb *cmpl_list) 170 { 171 struct mei_device *dev = cl->dev; 172 u32 msg_slots; 173 int slots; 174 int ret; 175 176 slots = mei_hbuf_empty_slots(dev); 177 msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_response)); 178 179 if (slots < msg_slots) 180 return -EMSGSIZE; 181 182 ret = mei_hbm_cl_disconnect_rsp(dev, cl); 183 mei_cl_set_disconnected(cl); 184 mei_io_cb_free(cb); 185 mei_me_cl_put(cl->me_cl); 186 cl->me_cl = NULL; 187 188 return ret; 189 } 190 191 /** 192 * mei_cl_irq_read - processes client read related operation from the 193 * interrupt thread context - request for flow control credits 194 * 195 * @cl: client 196 * @cb: callback block. 197 * @cmpl_list: complete list. 198 * 199 * Return: 0, OK; otherwise, error. 200 */ 201 static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb, 202 struct mei_cl_cb *cmpl_list) 203 { 204 struct mei_device *dev = cl->dev; 205 u32 msg_slots; 206 int slots; 207 int ret; 208 209 msg_slots = mei_data2slots(sizeof(struct hbm_flow_control)); 210 slots = mei_hbuf_empty_slots(dev); 211 212 if (slots < msg_slots) 213 return -EMSGSIZE; 214 215 ret = mei_hbm_cl_flow_control_req(dev, cl); 216 if (ret) { 217 cl->status = ret; 218 cb->buf_idx = 0; 219 list_move_tail(&cb->list, &cmpl_list->list); 220 return ret; 221 } 222 223 list_move_tail(&cb->list, &cl->rd_pending); 224 225 return 0; 226 } 227 228 /** 229 * mei_irq_read_handler - bottom half read routine after ISR to 230 * handle the read processing. 231 * 232 * @dev: the device structure 233 * @cmpl_list: An instance of our list structure 234 * @slots: slots to read. 235 * 236 * Return: 0 on success, <0 on failure. 237 */ 238 int mei_irq_read_handler(struct mei_device *dev, 239 struct mei_cl_cb *cmpl_list, s32 *slots) 240 { 241 struct mei_msg_hdr *mei_hdr; 242 struct mei_cl *cl; 243 int ret; 244 245 if (!dev->rd_msg_hdr) { 246 dev->rd_msg_hdr = mei_read_hdr(dev); 247 (*slots)--; 248 dev_dbg(dev->dev, "slots =%08x.\n", *slots); 249 } 250 mei_hdr = (struct mei_msg_hdr *) &dev->rd_msg_hdr; 251 dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr)); 252 253 if (mei_hdr->reserved || !dev->rd_msg_hdr) { 254 dev_err(dev->dev, "corrupted message header 0x%08X\n", 255 dev->rd_msg_hdr); 256 ret = -EBADMSG; 257 goto end; 258 } 259 260 if (mei_slots2data(*slots) < mei_hdr->length) { 261 dev_err(dev->dev, "less data available than length=%08x.\n", 262 *slots); 263 /* we can't read the message */ 264 ret = -ENODATA; 265 goto end; 266 } 267 268 /* HBM message */ 269 if (mei_hdr->host_addr == 0 && mei_hdr->me_addr == 0) { 270 ret = mei_hbm_dispatch(dev, mei_hdr); 271 if (ret) { 272 dev_dbg(dev->dev, "mei_hbm_dispatch failed ret = %d\n", 273 ret); 274 goto end; 275 } 276 goto reset_slots; 277 } 278 279 /* find recipient cl */ 280 list_for_each_entry(cl, &dev->file_list, link) { 281 if (mei_cl_hbm_equal(cl, mei_hdr)) { 282 cl_dbg(dev, cl, "got a message\n"); 283 break; 284 } 285 } 286 287 /* if no recipient cl was found we assume corrupted header */ 288 if (&cl->link == &dev->file_list) { 289 dev_err(dev->dev, "no destination client found 0x%08X\n", 290 dev->rd_msg_hdr); 291 ret = -EBADMSG; 292 goto end; 293 } 294 295 if (cl == &dev->iamthif_cl) { 296 ret = mei_amthif_irq_read_msg(cl, mei_hdr, cmpl_list); 297 } else { 298 ret = mei_cl_irq_read_msg(cl, mei_hdr, cmpl_list); 299 } 300 301 302 reset_slots: 303 /* reset the number of slots and header */ 304 *slots = mei_count_full_read_slots(dev); 305 dev->rd_msg_hdr = 0; 306 307 if (*slots == -EOVERFLOW) { 308 /* overflow - reset */ 309 dev_err(dev->dev, "resetting due to slots overflow.\n"); 310 /* set the event since message has been read */ 311 ret = -ERANGE; 312 goto end; 313 } 314 end: 315 return ret; 316 } 317 EXPORT_SYMBOL_GPL(mei_irq_read_handler); 318 319 320 /** 321 * mei_irq_write_handler - dispatch write requests 322 * after irq received 323 * 324 * @dev: the device structure 325 * @cmpl_list: An instance of our list structure 326 * 327 * Return: 0 on success, <0 on failure. 328 */ 329 int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list) 330 { 331 332 struct mei_cl *cl; 333 struct mei_cl_cb *cb, *next; 334 struct mei_cl_cb *list; 335 s32 slots; 336 int ret; 337 338 339 if (!mei_hbuf_acquire(dev)) 340 return 0; 341 342 slots = mei_hbuf_empty_slots(dev); 343 if (slots <= 0) 344 return -EMSGSIZE; 345 346 /* complete all waiting for write CB */ 347 dev_dbg(dev->dev, "complete all waiting for write cb.\n"); 348 349 list = &dev->write_waiting_list; 350 list_for_each_entry_safe(cb, next, &list->list, list) { 351 cl = cb->cl; 352 353 cl->status = 0; 354 cl_dbg(dev, cl, "MEI WRITE COMPLETE\n"); 355 cl->writing_state = MEI_WRITE_COMPLETE; 356 list_move_tail(&cb->list, &cmpl_list->list); 357 } 358 359 if (dev->wd_state == MEI_WD_STOPPING) { 360 dev->wd_state = MEI_WD_IDLE; 361 wake_up(&dev->wait_stop_wd); 362 } 363 364 if (mei_cl_is_connected(&dev->wd_cl)) { 365 if (dev->wd_pending && 366 mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) { 367 ret = mei_wd_send(dev); 368 if (ret) 369 return ret; 370 dev->wd_pending = false; 371 } 372 } 373 374 /* complete control write list CB */ 375 dev_dbg(dev->dev, "complete control write list cb.\n"); 376 list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list.list, list) { 377 cl = cb->cl; 378 switch (cb->fop_type) { 379 case MEI_FOP_DISCONNECT: 380 /* send disconnect message */ 381 ret = mei_cl_irq_disconnect(cl, cb, cmpl_list); 382 if (ret) 383 return ret; 384 385 break; 386 case MEI_FOP_READ: 387 /* send flow control message */ 388 ret = mei_cl_irq_read(cl, cb, cmpl_list); 389 if (ret) 390 return ret; 391 392 break; 393 case MEI_FOP_CONNECT: 394 /* connect message */ 395 ret = mei_cl_irq_connect(cl, cb, cmpl_list); 396 if (ret) 397 return ret; 398 399 break; 400 case MEI_FOP_DISCONNECT_RSP: 401 /* send disconnect resp */ 402 ret = mei_cl_irq_disconnect_rsp(cl, cb, cmpl_list); 403 if (ret) 404 return ret; 405 break; 406 407 case MEI_FOP_NOTIFY_START: 408 case MEI_FOP_NOTIFY_STOP: 409 ret = mei_cl_irq_notify(cl, cb, cmpl_list); 410 if (ret) 411 return ret; 412 break; 413 default: 414 BUG(); 415 } 416 417 } 418 /* complete write list CB */ 419 dev_dbg(dev->dev, "complete write list cb.\n"); 420 list_for_each_entry_safe(cb, next, &dev->write_list.list, list) { 421 cl = cb->cl; 422 if (cl == &dev->iamthif_cl) 423 ret = mei_amthif_irq_write(cl, cb, cmpl_list); 424 else 425 ret = mei_cl_irq_write(cl, cb, cmpl_list); 426 if (ret) 427 return ret; 428 } 429 return 0; 430 } 431 EXPORT_SYMBOL_GPL(mei_irq_write_handler); 432 433 434 /** 435 * mei_connect_timeout - connect/disconnect timeouts 436 * 437 * @cl: host client 438 */ 439 static void mei_connect_timeout(struct mei_cl *cl) 440 { 441 struct mei_device *dev = cl->dev; 442 443 if (cl->state == MEI_FILE_CONNECTING) { 444 if (dev->hbm_f_dot_supported) { 445 cl->state = MEI_FILE_DISCONNECT_REQUIRED; 446 wake_up(&cl->wait); 447 return; 448 } 449 } 450 mei_reset(dev); 451 } 452 453 /** 454 * mei_timer - timer function. 455 * 456 * @work: pointer to the work_struct structure 457 * 458 */ 459 void mei_timer(struct work_struct *work) 460 { 461 unsigned long timeout; 462 struct mei_cl *cl; 463 464 struct mei_device *dev = container_of(work, 465 struct mei_device, timer_work.work); 466 467 468 mutex_lock(&dev->device_lock); 469 470 /* Catch interrupt stalls during HBM init handshake */ 471 if (dev->dev_state == MEI_DEV_INIT_CLIENTS && 472 dev->hbm_state != MEI_HBM_IDLE) { 473 474 if (dev->init_clients_timer) { 475 if (--dev->init_clients_timer == 0) { 476 dev_err(dev->dev, "timer: init clients timeout hbm_state = %d.\n", 477 dev->hbm_state); 478 mei_reset(dev); 479 goto out; 480 } 481 } 482 } 483 484 if (dev->dev_state != MEI_DEV_ENABLED) 485 goto out; 486 487 /*** connect/disconnect timeouts ***/ 488 list_for_each_entry(cl, &dev->file_list, link) { 489 if (cl->timer_count) { 490 if (--cl->timer_count == 0) { 491 dev_err(dev->dev, "timer: connect/disconnect timeout.\n"); 492 mei_connect_timeout(cl); 493 goto out; 494 } 495 } 496 } 497 498 if (!mei_cl_is_connected(&dev->iamthif_cl)) 499 goto out; 500 501 if (dev->iamthif_stall_timer) { 502 if (--dev->iamthif_stall_timer == 0) { 503 dev_err(dev->dev, "timer: amthif hanged.\n"); 504 mei_reset(dev); 505 dev->iamthif_canceled = false; 506 dev->iamthif_state = MEI_IAMTHIF_IDLE; 507 dev->iamthif_timer = 0; 508 509 mei_io_cb_free(dev->iamthif_current_cb); 510 dev->iamthif_current_cb = NULL; 511 512 dev->iamthif_file_object = NULL; 513 mei_amthif_run_next_cmd(dev); 514 } 515 } 516 517 if (dev->iamthif_timer) { 518 519 timeout = dev->iamthif_timer + 520 mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER); 521 522 dev_dbg(dev->dev, "dev->iamthif_timer = %ld\n", 523 dev->iamthif_timer); 524 dev_dbg(dev->dev, "timeout = %ld\n", timeout); 525 dev_dbg(dev->dev, "jiffies = %ld\n", jiffies); 526 if (time_after(jiffies, timeout)) { 527 /* 528 * User didn't read the AMTHI data on time (15sec) 529 * freeing AMTHI for other requests 530 */ 531 532 dev_dbg(dev->dev, "freeing AMTHI for other requests\n"); 533 534 mei_io_list_flush(&dev->amthif_rd_complete_list, 535 &dev->iamthif_cl); 536 mei_io_cb_free(dev->iamthif_current_cb); 537 dev->iamthif_current_cb = NULL; 538 539 dev->iamthif_file_object->private_data = NULL; 540 dev->iamthif_file_object = NULL; 541 dev->iamthif_timer = 0; 542 mei_amthif_run_next_cmd(dev); 543 544 } 545 } 546 out: 547 if (dev->dev_state != MEI_DEV_DISABLED) 548 schedule_delayed_work(&dev->timer_work, 2 * HZ); 549 mutex_unlock(&dev->device_lock); 550 } 551