1 /* 2 * Copyright (c) 2003 Evgeniy Polyakov <zbr@ioremap.net> 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 */ 14 15 #include <linux/slab.h> 16 #include <linux/skbuff.h> 17 #include <linux/netlink.h> 18 #include <linux/connector.h> 19 20 #include "w1_internal.h" 21 #include "w1_netlink.h" 22 23 #if defined(CONFIG_W1_CON) && (defined(CONFIG_CONNECTOR) || (defined(CONFIG_CONNECTOR_MODULE) && defined(CONFIG_W1_MODULE))) 24 25 /* Bundle together everything required to process a request in one memory 26 * allocation. 27 */ 28 struct w1_cb_block { 29 atomic_t refcnt; 30 u32 portid; /* Sending process port ID */ 31 /* maximum value for first_cn->len */ 32 u16 maxlen; 33 /* pointers to building up the reply message */ 34 struct cn_msg *first_cn; /* fixed once the structure is populated */ 35 struct cn_msg *cn; /* advances as cn_msg is appeneded */ 36 struct w1_netlink_msg *msg; /* advances as w1_netlink_msg is appened */ 37 struct w1_netlink_cmd *cmd; /* advances as cmds are appened */ 38 struct w1_netlink_msg *cur_msg; /* currently message being processed */ 39 /* copy of the original request follows */ 40 struct cn_msg request_cn; 41 /* followed by variable length: 42 * cn_msg, data (w1_netlink_msg and w1_netlink_cmd) 43 * one or more struct w1_cb_node 44 * reply first_cn, data (w1_netlink_msg and w1_netlink_cmd) 45 */ 46 }; 47 struct w1_cb_node { 48 struct w1_async_cmd async; 49 /* pointers within w1_cb_block and cn data */ 50 struct w1_cb_block *block; 51 struct w1_netlink_msg *msg; 52 struct w1_slave *sl; 53 struct w1_master *dev; 54 }; 55 56 /** 57 * w1_reply_len() - calculate current reply length, compare to maxlen 58 * @block: block to calculate 59 * 60 * Calculates the current message length including possible multiple 61 * cn_msg and data, excludes the first sizeof(struct cn_msg). Direclty 62 * compariable to maxlen and usable to send the message. 63 */ 64 static u16 w1_reply_len(struct w1_cb_block *block) 65 { 66 if (!block->cn) 67 return 0; 68 return (u8 *)block->cn - (u8 *)block->first_cn + block->cn->len; 69 } 70 71 static void w1_unref_block(struct w1_cb_block *block) 72 { 73 if (atomic_sub_return(1, &block->refcnt) == 0) { 74 u16 len = w1_reply_len(block); 75 if (len) { 76 cn_netlink_send_mult(block->first_cn, len, 77 block->portid, 0, GFP_KERNEL); 78 } 79 kfree(block); 80 } 81 } 82 83 /** 84 * w1_reply_make_space() - send message if needed to make space 85 * @block: block to make space on 86 * @space: how many bytes requested 87 * 88 * Verify there is enough room left for the caller to add "space" bytes to the 89 * message, if there isn't send the message and reset. 90 */ 91 static void w1_reply_make_space(struct w1_cb_block *block, u16 space) 92 { 93 u16 len = w1_reply_len(block); 94 if (len + space >= block->maxlen) { 95 cn_netlink_send_mult(block->first_cn, len, block->portid, 0, GFP_KERNEL); 96 block->first_cn->len = 0; 97 block->cn = NULL; 98 block->msg = NULL; 99 block->cmd = NULL; 100 } 101 } 102 103 /* Early send when replies aren't bundled. */ 104 static void w1_netlink_check_send(struct w1_cb_block *block) 105 { 106 if (!(block->request_cn.flags & W1_CN_BUNDLE) && block->cn) 107 w1_reply_make_space(block, block->maxlen); 108 } 109 110 /** 111 * w1_netlink_setup_msg() - prepare to write block->msg 112 * @block: block to operate on 113 * @ack: determines if cn can be reused 114 * 115 * block->cn will be setup with the correct ack, advancing if needed 116 * block->cn->len does not include space for block->msg 117 * block->msg advances but remains uninitialized 118 */ 119 static void w1_netlink_setup_msg(struct w1_cb_block *block, u32 ack) 120 { 121 if (block->cn && block->cn->ack == ack) { 122 block->msg = (struct w1_netlink_msg *)(block->cn->data + block->cn->len); 123 } else { 124 /* advance or set to data */ 125 if (block->cn) 126 block->cn = (struct cn_msg *)(block->cn->data + 127 block->cn->len); 128 else 129 block->cn = block->first_cn; 130 131 memcpy(block->cn, &block->request_cn, sizeof(*block->cn)); 132 block->cn->len = 0; 133 block->cn->ack = ack; 134 block->msg = (struct w1_netlink_msg *)block->cn->data; 135 } 136 } 137 138 /* Append cmd to msg, include cmd->data as well. This is because 139 * any following data goes with the command and in the case of a read is 140 * the results. 141 */ 142 static void w1_netlink_queue_cmd(struct w1_cb_block *block, 143 struct w1_netlink_cmd *cmd) 144 { 145 u32 space; 146 w1_reply_make_space(block, sizeof(struct cn_msg) + 147 sizeof(struct w1_netlink_msg) + sizeof(*cmd) + cmd->len); 148 149 /* There's a status message sent after each command, so no point 150 * in trying to bundle this cmd after an existing one, because 151 * there won't be one. Allocate and copy over a new cn_msg. 152 */ 153 w1_netlink_setup_msg(block, block->request_cn.seq + 1); 154 memcpy(block->msg, block->cur_msg, sizeof(*block->msg)); 155 block->cn->len += sizeof(*block->msg); 156 block->msg->len = 0; 157 block->cmd = (struct w1_netlink_cmd *)(block->msg->data); 158 159 space = sizeof(*cmd) + cmd->len; 160 if (block->cmd != cmd) 161 memcpy(block->cmd, cmd, space); 162 block->cn->len += space; 163 block->msg->len += space; 164 } 165 166 /* Append req_msg and req_cmd, no other commands and no data from req_cmd are 167 * copied. 168 */ 169 static void w1_netlink_queue_status(struct w1_cb_block *block, 170 struct w1_netlink_msg *req_msg, struct w1_netlink_cmd *req_cmd, 171 int error) 172 { 173 u16 space = sizeof(struct cn_msg) + sizeof(*req_msg) + sizeof(*req_cmd); 174 w1_reply_make_space(block, space); 175 w1_netlink_setup_msg(block, block->request_cn.ack); 176 177 memcpy(block->msg, req_msg, sizeof(*req_msg)); 178 block->cn->len += sizeof(*req_msg); 179 block->msg->len = 0; 180 block->msg->status = (u8)-error; 181 if (req_cmd) { 182 struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)block->msg->data; 183 memcpy(cmd, req_cmd, sizeof(*cmd)); 184 block->cn->len += sizeof(*cmd); 185 block->msg->len += sizeof(*cmd); 186 cmd->len = 0; 187 } 188 w1_netlink_check_send(block); 189 } 190 191 /** 192 * w1_netlink_send_error() - sends the error message now 193 * @cn: original cn_msg 194 * @msg: original w1_netlink_msg 195 * @portid: where to send it 196 * @error: error status 197 * 198 * Use when a block isn't available to queue the message to and cn, msg 199 * might not be contiguous. 200 */ 201 static void w1_netlink_send_error(struct cn_msg *cn, struct w1_netlink_msg *msg, 202 int portid, int error) 203 { 204 struct { 205 struct cn_msg cn; 206 struct w1_netlink_msg msg; 207 } packet; 208 memcpy(&packet.cn, cn, sizeof(packet.cn)); 209 memcpy(&packet.msg, msg, sizeof(packet.msg)); 210 packet.cn.len = sizeof(packet.msg); 211 packet.msg.len = 0; 212 packet.msg.status = (u8)-error; 213 cn_netlink_send(&packet.cn, portid, 0, GFP_KERNEL); 214 } 215 216 /** 217 * w1_netlink_send() - sends w1 netlink notifications 218 * @dev: w1_master the even is associated with or for 219 * @msg: w1_netlink_msg message to be sent 220 * 221 * This are notifications generated from the kernel. 222 */ 223 void w1_netlink_send(struct w1_master *dev, struct w1_netlink_msg *msg) 224 { 225 struct { 226 struct cn_msg cn; 227 struct w1_netlink_msg msg; 228 } packet; 229 memset(&packet, 0, sizeof(packet)); 230 231 packet.cn.id.idx = CN_W1_IDX; 232 packet.cn.id.val = CN_W1_VAL; 233 234 packet.cn.seq = dev->seq++; 235 packet.cn.len = sizeof(*msg); 236 237 memcpy(&packet.msg, msg, sizeof(*msg)); 238 packet.msg.len = 0; 239 240 cn_netlink_send(&packet.cn, 0, 0, GFP_KERNEL); 241 } 242 243 static void w1_send_slave(struct w1_master *dev, u64 rn) 244 { 245 struct w1_cb_block *block = dev->priv; 246 struct w1_netlink_cmd *cache_cmd = block->cmd; 247 u64 *data; 248 249 w1_reply_make_space(block, sizeof(*data)); 250 251 /* Add cmd back if the packet was sent */ 252 if (!block->cmd) { 253 cache_cmd->len = 0; 254 w1_netlink_queue_cmd(block, cache_cmd); 255 } 256 257 data = (u64 *)(block->cmd->data + block->cmd->len); 258 259 *data = rn; 260 block->cn->len += sizeof(*data); 261 block->msg->len += sizeof(*data); 262 block->cmd->len += sizeof(*data); 263 } 264 265 static void w1_found_send_slave(struct w1_master *dev, u64 rn) 266 { 267 /* update kernel slave list */ 268 w1_slave_found(dev, rn); 269 270 w1_send_slave(dev, rn); 271 } 272 273 /* Get the current slave list, or search (with or without alarm) */ 274 static int w1_get_slaves(struct w1_master *dev, struct w1_netlink_cmd *req_cmd) 275 { 276 struct w1_slave *sl; 277 278 req_cmd->len = 0; 279 w1_netlink_queue_cmd(dev->priv, req_cmd); 280 281 if (req_cmd->cmd == W1_CMD_LIST_SLAVES) { 282 u64 rn; 283 mutex_lock(&dev->list_mutex); 284 list_for_each_entry(sl, &dev->slist, w1_slave_entry) { 285 memcpy(&rn, &sl->reg_num, sizeof(rn)); 286 w1_send_slave(dev, rn); 287 } 288 mutex_unlock(&dev->list_mutex); 289 } else { 290 w1_search_process_cb(dev, req_cmd->cmd == W1_CMD_ALARM_SEARCH ? 291 W1_ALARM_SEARCH : W1_SEARCH, w1_found_send_slave); 292 } 293 294 return 0; 295 } 296 297 static int w1_process_command_io(struct w1_master *dev, 298 struct w1_netlink_cmd *cmd) 299 { 300 int err = 0; 301 302 switch (cmd->cmd) { 303 case W1_CMD_TOUCH: 304 w1_touch_block(dev, cmd->data, cmd->len); 305 w1_netlink_queue_cmd(dev->priv, cmd); 306 break; 307 case W1_CMD_READ: 308 w1_read_block(dev, cmd->data, cmd->len); 309 w1_netlink_queue_cmd(dev->priv, cmd); 310 break; 311 case W1_CMD_WRITE: 312 w1_write_block(dev, cmd->data, cmd->len); 313 break; 314 default: 315 err = -EINVAL; 316 break; 317 } 318 319 return err; 320 } 321 322 static int w1_process_command_addremove(struct w1_master *dev, 323 struct w1_netlink_cmd *cmd) 324 { 325 struct w1_slave *sl; 326 int err = 0; 327 struct w1_reg_num *id; 328 329 if (cmd->len != sizeof(*id)) 330 return -EINVAL; 331 332 id = (struct w1_reg_num *)cmd->data; 333 334 sl = w1_slave_search_device(dev, id); 335 switch (cmd->cmd) { 336 case W1_CMD_SLAVE_ADD: 337 if (sl) 338 err = -EINVAL; 339 else 340 err = w1_attach_slave_device(dev, id); 341 break; 342 case W1_CMD_SLAVE_REMOVE: 343 if (sl) 344 w1_slave_detach(sl); 345 else 346 err = -EINVAL; 347 break; 348 default: 349 err = -EINVAL; 350 break; 351 } 352 353 return err; 354 } 355 356 static int w1_process_command_master(struct w1_master *dev, 357 struct w1_netlink_cmd *req_cmd) 358 { 359 int err = -EINVAL; 360 361 /* drop bus_mutex for search (does it's own locking), and add/remove 362 * which doesn't use the bus 363 */ 364 switch (req_cmd->cmd) { 365 case W1_CMD_SEARCH: 366 case W1_CMD_ALARM_SEARCH: 367 case W1_CMD_LIST_SLAVES: 368 mutex_unlock(&dev->bus_mutex); 369 err = w1_get_slaves(dev, req_cmd); 370 mutex_lock(&dev->bus_mutex); 371 break; 372 case W1_CMD_READ: 373 case W1_CMD_WRITE: 374 case W1_CMD_TOUCH: 375 err = w1_process_command_io(dev, req_cmd); 376 break; 377 case W1_CMD_RESET: 378 err = w1_reset_bus(dev); 379 break; 380 case W1_CMD_SLAVE_ADD: 381 case W1_CMD_SLAVE_REMOVE: 382 mutex_unlock(&dev->bus_mutex); 383 mutex_lock(&dev->mutex); 384 err = w1_process_command_addremove(dev, req_cmd); 385 mutex_unlock(&dev->mutex); 386 mutex_lock(&dev->bus_mutex); 387 break; 388 default: 389 err = -EINVAL; 390 break; 391 } 392 393 return err; 394 } 395 396 static int w1_process_command_slave(struct w1_slave *sl, 397 struct w1_netlink_cmd *cmd) 398 { 399 dev_dbg(&sl->master->dev, "%s: %02x.%012llx.%02x: cmd=%02x, len=%u.\n", 400 __func__, sl->reg_num.family, (unsigned long long)sl->reg_num.id, 401 sl->reg_num.crc, cmd->cmd, cmd->len); 402 403 return w1_process_command_io(sl->master, cmd); 404 } 405 406 static int w1_process_command_root(struct cn_msg *req_cn, u32 portid) 407 { 408 struct w1_master *dev; 409 struct cn_msg *cn; 410 struct w1_netlink_msg *msg; 411 u32 *id; 412 413 cn = kmalloc(PAGE_SIZE, GFP_KERNEL); 414 if (!cn) 415 return -ENOMEM; 416 417 cn->id.idx = CN_W1_IDX; 418 cn->id.val = CN_W1_VAL; 419 420 cn->seq = req_cn->seq; 421 cn->ack = req_cn->seq + 1; 422 cn->len = sizeof(struct w1_netlink_msg); 423 msg = (struct w1_netlink_msg *)cn->data; 424 425 msg->type = W1_LIST_MASTERS; 426 msg->status = 0; 427 msg->len = 0; 428 id = (u32 *)msg->data; 429 430 mutex_lock(&w1_mlock); 431 list_for_each_entry(dev, &w1_masters, w1_master_entry) { 432 if (cn->len + sizeof(*id) > PAGE_SIZE - sizeof(struct cn_msg)) { 433 cn_netlink_send(cn, portid, 0, GFP_KERNEL); 434 cn->len = sizeof(struct w1_netlink_msg); 435 msg->len = 0; 436 id = (u32 *)msg->data; 437 } 438 439 *id = dev->id; 440 msg->len += sizeof(*id); 441 cn->len += sizeof(*id); 442 id++; 443 } 444 cn_netlink_send(cn, portid, 0, GFP_KERNEL); 445 mutex_unlock(&w1_mlock); 446 447 kfree(cn); 448 return 0; 449 } 450 451 static void w1_process_cb(struct w1_master *dev, struct w1_async_cmd *async_cmd) 452 { 453 struct w1_cb_node *node = container_of(async_cmd, struct w1_cb_node, 454 async); 455 u16 mlen = node->msg->len; 456 u16 len; 457 int err = 0; 458 struct w1_slave *sl = node->sl; 459 struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)node->msg->data; 460 461 mutex_lock(&dev->bus_mutex); 462 dev->priv = node->block; 463 if (sl && w1_reset_select_slave(sl)) 464 err = -ENODEV; 465 node->block->cur_msg = node->msg; 466 467 while (mlen && !err) { 468 if (cmd->len + sizeof(struct w1_netlink_cmd) > mlen) { 469 err = -E2BIG; 470 break; 471 } 472 473 if (sl) 474 err = w1_process_command_slave(sl, cmd); 475 else 476 err = w1_process_command_master(dev, cmd); 477 w1_netlink_check_send(node->block); 478 479 w1_netlink_queue_status(node->block, node->msg, cmd, err); 480 err = 0; 481 482 len = sizeof(*cmd) + cmd->len; 483 cmd = (struct w1_netlink_cmd *)((u8 *)cmd + len); 484 mlen -= len; 485 } 486 487 if (!cmd || err) 488 w1_netlink_queue_status(node->block, node->msg, cmd, err); 489 490 /* ref taken in w1_search_slave or w1_search_master_id when building 491 * the block 492 */ 493 if (sl) 494 w1_unref_slave(sl); 495 else 496 atomic_dec(&dev->refcnt); 497 dev->priv = NULL; 498 mutex_unlock(&dev->bus_mutex); 499 500 mutex_lock(&dev->list_mutex); 501 list_del(&async_cmd->async_entry); 502 mutex_unlock(&dev->list_mutex); 503 504 w1_unref_block(node->block); 505 } 506 507 static void w1_list_count_cmds(struct w1_netlink_msg *msg, int *cmd_count, 508 u16 *slave_len) 509 { 510 struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)msg->data; 511 u16 mlen = msg->len; 512 u16 len; 513 int slave_list = 0; 514 while (mlen) { 515 if (cmd->len + sizeof(struct w1_netlink_cmd) > mlen) 516 break; 517 518 switch (cmd->cmd) { 519 case W1_CMD_SEARCH: 520 case W1_CMD_ALARM_SEARCH: 521 case W1_CMD_LIST_SLAVES: 522 ++slave_list; 523 } 524 ++*cmd_count; 525 len = sizeof(*cmd) + cmd->len; 526 cmd = (struct w1_netlink_cmd *)((u8 *)cmd + len); 527 mlen -= len; 528 } 529 530 if (slave_list) { 531 struct w1_master *dev = w1_search_master_id(msg->id.mst.id); 532 if (dev) { 533 /* Bytes, and likely an overstimate, and if it isn't 534 * the results can still be split between packets. 535 */ 536 *slave_len += sizeof(struct w1_reg_num) * slave_list * 537 (dev->slave_count + dev->max_slave_count); 538 /* search incremented it */ 539 atomic_dec(&dev->refcnt); 540 } 541 } 542 } 543 544 static void w1_cn_callback(struct cn_msg *cn, struct netlink_skb_parms *nsp) 545 { 546 struct w1_netlink_msg *msg = (struct w1_netlink_msg *)(cn + 1); 547 struct w1_slave *sl; 548 struct w1_master *dev; 549 u16 msg_len; 550 u16 slave_len = 0; 551 int err = 0; 552 struct w1_cb_block *block = NULL; 553 struct w1_cb_node *node = NULL; 554 int node_count = 0; 555 int cmd_count = 0; 556 557 /* If any unknown flag is set let the application know, that way 558 * applications can detect the absence of features in kernels that 559 * don't know about them. http://lwn.net/Articles/587527/ 560 */ 561 if (cn->flags & ~(W1_CN_BUNDLE)) { 562 w1_netlink_send_error(cn, msg, nsp->portid, -EINVAL); 563 return; 564 } 565 566 /* Count the number of master or slave commands there are to allocate 567 * space for one cb_node each. 568 */ 569 msg_len = cn->len; 570 while (msg_len && !err) { 571 if (msg->len + sizeof(struct w1_netlink_msg) > msg_len) { 572 err = -E2BIG; 573 break; 574 } 575 576 /* count messages for nodes and allocate any additional space 577 * required for slave lists 578 */ 579 if (msg->type == W1_MASTER_CMD || msg->type == W1_SLAVE_CMD) { 580 ++node_count; 581 w1_list_count_cmds(msg, &cmd_count, &slave_len); 582 } 583 584 msg_len -= sizeof(struct w1_netlink_msg) + msg->len; 585 msg = (struct w1_netlink_msg *)(((u8 *)msg) + 586 sizeof(struct w1_netlink_msg) + msg->len); 587 } 588 msg = (struct w1_netlink_msg *)(cn + 1); 589 if (node_count) { 590 int size; 591 int reply_size = sizeof(*cn) + cn->len + slave_len; 592 if (cn->flags & W1_CN_BUNDLE) { 593 /* bundling duplicats some of the messages */ 594 reply_size += 2 * cmd_count * (sizeof(struct cn_msg) + 595 sizeof(struct w1_netlink_msg) + 596 sizeof(struct w1_netlink_cmd)); 597 } 598 reply_size = min(CONNECTOR_MAX_MSG_SIZE, reply_size); 599 600 /* allocate space for the block, a copy of the original message, 601 * one node per cmd to point into the original message, 602 * space for replies which is the original message size plus 603 * space for any list slave data and status messages 604 * cn->len doesn't include itself which is part of the block 605 * */ 606 size = /* block + original message */ 607 sizeof(struct w1_cb_block) + sizeof(*cn) + cn->len + 608 /* space for nodes */ 609 node_count * sizeof(struct w1_cb_node) + 610 /* replies */ 611 sizeof(struct cn_msg) + reply_size; 612 block = kzalloc(size, GFP_KERNEL); 613 if (!block) { 614 /* if the system is already out of memory, 615 * (A) will this work, and (B) would it be better 616 * to not try? 617 */ 618 w1_netlink_send_error(cn, msg, nsp->portid, -ENOMEM); 619 return; 620 } 621 atomic_set(&block->refcnt, 1); 622 block->portid = nsp->portid; 623 memcpy(&block->request_cn, cn, sizeof(*cn) + cn->len); 624 node = (struct w1_cb_node *)(block->request_cn.data + cn->len); 625 626 /* Sneeky, when not bundling, reply_size is the allocated space 627 * required for the reply, cn_msg isn't part of maxlen so 628 * it should be reply_size - sizeof(struct cn_msg), however 629 * when checking if there is enough space, w1_reply_make_space 630 * is called with the full message size including cn_msg, 631 * because it isn't known at that time if an additional cn_msg 632 * will need to be allocated. So an extra cn_msg is added 633 * above in "size". 634 */ 635 block->maxlen = reply_size; 636 block->first_cn = (struct cn_msg *)(node + node_count); 637 memset(block->first_cn, 0, sizeof(*block->first_cn)); 638 } 639 640 msg_len = cn->len; 641 while (msg_len && !err) { 642 643 dev = NULL; 644 sl = NULL; 645 646 if (msg->len + sizeof(struct w1_netlink_msg) > msg_len) { 647 err = -E2BIG; 648 break; 649 } 650 651 /* execute on this thread, no need to process later */ 652 if (msg->type == W1_LIST_MASTERS) { 653 err = w1_process_command_root(cn, nsp->portid); 654 goto out_cont; 655 } 656 657 /* All following message types require additional data, 658 * check here before references are taken. 659 */ 660 if (!msg->len) { 661 err = -EPROTO; 662 goto out_cont; 663 } 664 665 /* both search calls take references */ 666 if (msg->type == W1_MASTER_CMD) { 667 dev = w1_search_master_id(msg->id.mst.id); 668 } else if (msg->type == W1_SLAVE_CMD) { 669 sl = w1_search_slave((struct w1_reg_num *)msg->id.id); 670 if (sl) 671 dev = sl->master; 672 } else { 673 pr_notice("%s: cn: %x.%x, wrong type: %u, len: %u.\n", 674 __func__, cn->id.idx, cn->id.val, 675 msg->type, msg->len); 676 err = -EPROTO; 677 goto out_cont; 678 } 679 680 if (!dev) { 681 err = -ENODEV; 682 goto out_cont; 683 } 684 685 err = 0; 686 687 atomic_inc(&block->refcnt); 688 node->async.cb = w1_process_cb; 689 node->block = block; 690 node->msg = (struct w1_netlink_msg *)((u8 *)&block->request_cn + 691 (size_t)((u8 *)msg - (u8 *)cn)); 692 node->sl = sl; 693 node->dev = dev; 694 695 mutex_lock(&dev->list_mutex); 696 list_add_tail(&node->async.async_entry, &dev->async_list); 697 wake_up_process(dev->thread); 698 mutex_unlock(&dev->list_mutex); 699 ++node; 700 701 out_cont: 702 /* Can't queue because that modifies block and another 703 * thread could be processing the messages by now and 704 * there isn't a lock, send directly. 705 */ 706 if (err) 707 w1_netlink_send_error(cn, msg, nsp->portid, err); 708 msg_len -= sizeof(struct w1_netlink_msg) + msg->len; 709 msg = (struct w1_netlink_msg *)(((u8 *)msg) + 710 sizeof(struct w1_netlink_msg) + msg->len); 711 712 /* 713 * Let's allow requests for nonexisting devices. 714 */ 715 if (err == -ENODEV) 716 err = 0; 717 } 718 if (block) 719 w1_unref_block(block); 720 } 721 722 int w1_init_netlink(void) 723 { 724 struct cb_id w1_id = {.idx = CN_W1_IDX, .val = CN_W1_VAL}; 725 726 return cn_add_callback(&w1_id, "w1", &w1_cn_callback); 727 } 728 729 void w1_fini_netlink(void) 730 { 731 struct cb_id w1_id = {.idx = CN_W1_IDX, .val = CN_W1_VAL}; 732 733 cn_del_callback(&w1_id); 734 } 735 #else 736 void w1_netlink_send(struct w1_master *dev, struct w1_netlink_msg *cn) 737 { 738 } 739 740 int w1_init_netlink(void) 741 { 742 return 0; 743 } 744 745 void w1_fini_netlink(void) 746 { 747 } 748 #endif 749