1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (c) 2003 Evgeniy Polyakov <zbr@ioremap.net> 4 */ 5 6 #include <linux/slab.h> 7 #include <linux/skbuff.h> 8 #include <linux/netlink.h> 9 #include <linux/connector.h> 10 11 #include "w1_internal.h" 12 #include "w1_netlink.h" 13 14 #if defined(CONFIG_W1_CON) && (defined(CONFIG_CONNECTOR) || (defined(CONFIG_CONNECTOR_MODULE) && defined(CONFIG_W1_MODULE))) 15 16 /* Bundle together everything required to process a request in one memory 17 * allocation. 18 */ 19 struct w1_cb_block { 20 atomic_t refcnt; 21 u32 portid; /* Sending process port ID */ 22 /* maximum value for first_cn->len */ 23 u16 maxlen; 24 /* pointers to building up the reply message */ 25 struct cn_msg *first_cn; /* fixed once the structure is populated */ 26 struct cn_msg *cn; /* advances as cn_msg is appeneded */ 27 struct w1_netlink_msg *msg; /* advances as w1_netlink_msg is appened */ 28 struct w1_netlink_cmd *cmd; /* advances as cmds are appened */ 29 struct w1_netlink_msg *cur_msg; /* currently message being processed */ 30 /* copy of the original request follows */ 31 struct cn_msg request_cn; 32 /* followed by variable length: 33 * cn_msg, data (w1_netlink_msg and w1_netlink_cmd) 34 * one or more struct w1_cb_node 35 * reply first_cn, data (w1_netlink_msg and w1_netlink_cmd) 36 */ 37 }; 38 struct w1_cb_node { 39 struct w1_async_cmd async; 40 /* pointers within w1_cb_block and cn data */ 41 struct w1_cb_block *block; 42 struct w1_netlink_msg *msg; 43 struct w1_slave *sl; 44 struct w1_master *dev; 45 }; 46 47 /** 48 * w1_reply_len() - calculate current reply length, compare to maxlen 49 * @block: block to calculate 50 * 51 * Calculates the current message length including possible multiple 52 * cn_msg and data, excludes the first sizeof(struct cn_msg). Direclty 53 * compariable to maxlen and usable to send the message. 54 */ 55 static u16 w1_reply_len(struct w1_cb_block *block) 56 { 57 if (!block->cn) 58 return 0; 59 return (u8 *)block->cn - (u8 *)block->first_cn + block->cn->len; 60 } 61 62 static void w1_unref_block(struct w1_cb_block *block) 63 { 64 if (atomic_sub_return(1, &block->refcnt) == 0) { 65 u16 len = w1_reply_len(block); 66 if (len) { 67 cn_netlink_send_mult(block->first_cn, len, 68 block->portid, 0, GFP_KERNEL); 69 } 70 kfree(block); 71 } 72 } 73 74 /** 75 * w1_reply_make_space() - send message if needed to make space 76 * @block: block to make space on 77 * @space: how many bytes requested 78 * 79 * Verify there is enough room left for the caller to add "space" bytes to the 80 * message, if there isn't send the message and reset. 81 */ 82 static void w1_reply_make_space(struct w1_cb_block *block, u16 space) 83 { 84 u16 len = w1_reply_len(block); 85 if (len + space >= block->maxlen) { 86 cn_netlink_send_mult(block->first_cn, len, block->portid, 0, GFP_KERNEL); 87 block->first_cn->len = 0; 88 block->cn = NULL; 89 block->msg = NULL; 90 block->cmd = NULL; 91 } 92 } 93 94 /* Early send when replies aren't bundled. */ 95 static void w1_netlink_check_send(struct w1_cb_block *block) 96 { 97 if (!(block->request_cn.flags & W1_CN_BUNDLE) && block->cn) 98 w1_reply_make_space(block, block->maxlen); 99 } 100 101 /** 102 * w1_netlink_setup_msg() - prepare to write block->msg 103 * @block: block to operate on 104 * @ack: determines if cn can be reused 105 * 106 * block->cn will be setup with the correct ack, advancing if needed 107 * block->cn->len does not include space for block->msg 108 * block->msg advances but remains uninitialized 109 */ 110 static void w1_netlink_setup_msg(struct w1_cb_block *block, u32 ack) 111 { 112 if (block->cn && block->cn->ack == ack) { 113 block->msg = (struct w1_netlink_msg *)(block->cn->data + block->cn->len); 114 } else { 115 /* advance or set to data */ 116 if (block->cn) 117 block->cn = (struct cn_msg *)(block->cn->data + 118 block->cn->len); 119 else 120 block->cn = block->first_cn; 121 122 memcpy(block->cn, &block->request_cn, sizeof(*block->cn)); 123 block->cn->len = 0; 124 block->cn->ack = ack; 125 block->msg = (struct w1_netlink_msg *)block->cn->data; 126 } 127 } 128 129 /* Append cmd to msg, include cmd->data as well. This is because 130 * any following data goes with the command and in the case of a read is 131 * the results. 132 */ 133 static void w1_netlink_queue_cmd(struct w1_cb_block *block, 134 struct w1_netlink_cmd *cmd) 135 { 136 u32 space; 137 w1_reply_make_space(block, sizeof(struct cn_msg) + 138 sizeof(struct w1_netlink_msg) + sizeof(*cmd) + cmd->len); 139 140 /* There's a status message sent after each command, so no point 141 * in trying to bundle this cmd after an existing one, because 142 * there won't be one. Allocate and copy over a new cn_msg. 143 */ 144 w1_netlink_setup_msg(block, block->request_cn.seq + 1); 145 memcpy(block->msg, block->cur_msg, sizeof(*block->msg)); 146 block->cn->len += sizeof(*block->msg); 147 block->msg->len = 0; 148 block->cmd = (struct w1_netlink_cmd *)(block->msg->data); 149 150 space = sizeof(*cmd) + cmd->len; 151 if (block->cmd != cmd) 152 memcpy(block->cmd, cmd, space); 153 block->cn->len += space; 154 block->msg->len += space; 155 } 156 157 /* Append req_msg and req_cmd, no other commands and no data from req_cmd are 158 * copied. 159 */ 160 static void w1_netlink_queue_status(struct w1_cb_block *block, 161 struct w1_netlink_msg *req_msg, struct w1_netlink_cmd *req_cmd, 162 int error) 163 { 164 u16 space = sizeof(struct cn_msg) + sizeof(*req_msg) + sizeof(*req_cmd); 165 w1_reply_make_space(block, space); 166 w1_netlink_setup_msg(block, block->request_cn.ack); 167 168 memcpy(block->msg, req_msg, sizeof(*req_msg)); 169 block->cn->len += sizeof(*req_msg); 170 block->msg->len = 0; 171 block->msg->status = (u8)-error; 172 if (req_cmd) { 173 struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)block->msg->data; 174 memcpy(cmd, req_cmd, sizeof(*cmd)); 175 block->cn->len += sizeof(*cmd); 176 block->msg->len += sizeof(*cmd); 177 cmd->len = 0; 178 } 179 w1_netlink_check_send(block); 180 } 181 182 /** 183 * w1_netlink_send_error() - sends the error message now 184 * @cn: original cn_msg 185 * @msg: original w1_netlink_msg 186 * @portid: where to send it 187 * @error: error status 188 * 189 * Use when a block isn't available to queue the message to and cn, msg 190 * might not be contiguous. 191 */ 192 static void w1_netlink_send_error(struct cn_msg *cn, struct w1_netlink_msg *msg, 193 int portid, int error) 194 { 195 struct { 196 struct cn_msg cn; 197 struct w1_netlink_msg msg; 198 } packet; 199 memcpy(&packet.cn, cn, sizeof(packet.cn)); 200 memcpy(&packet.msg, msg, sizeof(packet.msg)); 201 packet.cn.len = sizeof(packet.msg); 202 packet.msg.len = 0; 203 packet.msg.status = (u8)-error; 204 cn_netlink_send(&packet.cn, portid, 0, GFP_KERNEL); 205 } 206 207 /** 208 * w1_netlink_send() - sends w1 netlink notifications 209 * @dev: w1_master the even is associated with or for 210 * @msg: w1_netlink_msg message to be sent 211 * 212 * This are notifications generated from the kernel. 213 */ 214 void w1_netlink_send(struct w1_master *dev, struct w1_netlink_msg *msg) 215 { 216 struct { 217 struct cn_msg cn; 218 struct w1_netlink_msg msg; 219 } packet; 220 memset(&packet, 0, sizeof(packet)); 221 222 packet.cn.id.idx = CN_W1_IDX; 223 packet.cn.id.val = CN_W1_VAL; 224 225 packet.cn.seq = dev->seq++; 226 packet.cn.len = sizeof(*msg); 227 228 memcpy(&packet.msg, msg, sizeof(*msg)); 229 packet.msg.len = 0; 230 231 cn_netlink_send(&packet.cn, 0, 0, GFP_KERNEL); 232 } 233 234 static void w1_send_slave(struct w1_master *dev, u64 rn) 235 { 236 struct w1_cb_block *block = dev->priv; 237 struct w1_netlink_cmd *cache_cmd = block->cmd; 238 u64 *data; 239 240 w1_reply_make_space(block, sizeof(*data)); 241 242 /* Add cmd back if the packet was sent */ 243 if (!block->cmd) { 244 cache_cmd->len = 0; 245 w1_netlink_queue_cmd(block, cache_cmd); 246 } 247 248 data = (u64 *)(block->cmd->data + block->cmd->len); 249 250 *data = rn; 251 block->cn->len += sizeof(*data); 252 block->msg->len += sizeof(*data); 253 block->cmd->len += sizeof(*data); 254 } 255 256 static void w1_found_send_slave(struct w1_master *dev, u64 rn) 257 { 258 /* update kernel slave list */ 259 w1_slave_found(dev, rn); 260 261 w1_send_slave(dev, rn); 262 } 263 264 /* Get the current slave list, or search (with or without alarm) */ 265 static int w1_get_slaves(struct w1_master *dev, struct w1_netlink_cmd *req_cmd) 266 { 267 struct w1_slave *sl; 268 269 req_cmd->len = 0; 270 w1_netlink_queue_cmd(dev->priv, req_cmd); 271 272 if (req_cmd->cmd == W1_CMD_LIST_SLAVES) { 273 u64 rn; 274 mutex_lock(&dev->list_mutex); 275 list_for_each_entry(sl, &dev->slist, w1_slave_entry) { 276 memcpy(&rn, &sl->reg_num, sizeof(rn)); 277 w1_send_slave(dev, rn); 278 } 279 mutex_unlock(&dev->list_mutex); 280 } else { 281 w1_search_process_cb(dev, req_cmd->cmd == W1_CMD_ALARM_SEARCH ? 282 W1_ALARM_SEARCH : W1_SEARCH, w1_found_send_slave); 283 } 284 285 return 0; 286 } 287 288 static int w1_process_command_io(struct w1_master *dev, 289 struct w1_netlink_cmd *cmd) 290 { 291 int err = 0; 292 293 switch (cmd->cmd) { 294 case W1_CMD_TOUCH: 295 w1_touch_block(dev, cmd->data, cmd->len); 296 w1_netlink_queue_cmd(dev->priv, cmd); 297 break; 298 case W1_CMD_READ: 299 w1_read_block(dev, cmd->data, cmd->len); 300 w1_netlink_queue_cmd(dev->priv, cmd); 301 break; 302 case W1_CMD_WRITE: 303 w1_write_block(dev, cmd->data, cmd->len); 304 break; 305 default: 306 err = -EINVAL; 307 break; 308 } 309 310 return err; 311 } 312 313 static int w1_process_command_addremove(struct w1_master *dev, 314 struct w1_netlink_cmd *cmd) 315 { 316 struct w1_slave *sl; 317 int err = 0; 318 struct w1_reg_num *id; 319 320 if (cmd->len != sizeof(*id)) 321 return -EINVAL; 322 323 id = (struct w1_reg_num *)cmd->data; 324 325 sl = w1_slave_search_device(dev, id); 326 switch (cmd->cmd) { 327 case W1_CMD_SLAVE_ADD: 328 if (sl) 329 err = -EINVAL; 330 else 331 err = w1_attach_slave_device(dev, id); 332 break; 333 case W1_CMD_SLAVE_REMOVE: 334 if (sl) 335 w1_slave_detach(sl); 336 else 337 err = -EINVAL; 338 break; 339 default: 340 err = -EINVAL; 341 break; 342 } 343 344 return err; 345 } 346 347 static int w1_process_command_master(struct w1_master *dev, 348 struct w1_netlink_cmd *req_cmd) 349 { 350 int err = -EINVAL; 351 352 /* drop bus_mutex for search (does it's own locking), and add/remove 353 * which doesn't use the bus 354 */ 355 switch (req_cmd->cmd) { 356 case W1_CMD_SEARCH: 357 case W1_CMD_ALARM_SEARCH: 358 case W1_CMD_LIST_SLAVES: 359 mutex_unlock(&dev->bus_mutex); 360 err = w1_get_slaves(dev, req_cmd); 361 mutex_lock(&dev->bus_mutex); 362 break; 363 case W1_CMD_READ: 364 case W1_CMD_WRITE: 365 case W1_CMD_TOUCH: 366 err = w1_process_command_io(dev, req_cmd); 367 break; 368 case W1_CMD_RESET: 369 err = w1_reset_bus(dev); 370 break; 371 case W1_CMD_SLAVE_ADD: 372 case W1_CMD_SLAVE_REMOVE: 373 mutex_unlock(&dev->bus_mutex); 374 mutex_lock(&dev->mutex); 375 err = w1_process_command_addremove(dev, req_cmd); 376 mutex_unlock(&dev->mutex); 377 mutex_lock(&dev->bus_mutex); 378 break; 379 default: 380 err = -EINVAL; 381 break; 382 } 383 384 return err; 385 } 386 387 static int w1_process_command_slave(struct w1_slave *sl, 388 struct w1_netlink_cmd *cmd) 389 { 390 dev_dbg(&sl->master->dev, "%s: %02x.%012llx.%02x: cmd=%02x, len=%u.\n", 391 __func__, sl->reg_num.family, (unsigned long long)sl->reg_num.id, 392 sl->reg_num.crc, cmd->cmd, cmd->len); 393 394 return w1_process_command_io(sl->master, cmd); 395 } 396 397 static int w1_process_command_root(struct cn_msg *req_cn, u32 portid) 398 { 399 struct w1_master *dev; 400 struct cn_msg *cn; 401 struct w1_netlink_msg *msg; 402 u32 *id; 403 404 cn = kmalloc(PAGE_SIZE, GFP_KERNEL); 405 if (!cn) 406 return -ENOMEM; 407 408 cn->id.idx = CN_W1_IDX; 409 cn->id.val = CN_W1_VAL; 410 411 cn->seq = req_cn->seq; 412 cn->ack = req_cn->seq + 1; 413 cn->len = sizeof(struct w1_netlink_msg); 414 msg = (struct w1_netlink_msg *)cn->data; 415 416 msg->type = W1_LIST_MASTERS; 417 msg->status = 0; 418 msg->len = 0; 419 id = (u32 *)msg->data; 420 421 mutex_lock(&w1_mlock); 422 list_for_each_entry(dev, &w1_masters, w1_master_entry) { 423 if (cn->len + sizeof(*id) > PAGE_SIZE - sizeof(struct cn_msg)) { 424 cn_netlink_send(cn, portid, 0, GFP_KERNEL); 425 cn->len = sizeof(struct w1_netlink_msg); 426 msg->len = 0; 427 id = (u32 *)msg->data; 428 } 429 430 *id = dev->id; 431 msg->len += sizeof(*id); 432 cn->len += sizeof(*id); 433 id++; 434 } 435 cn_netlink_send(cn, portid, 0, GFP_KERNEL); 436 mutex_unlock(&w1_mlock); 437 438 kfree(cn); 439 return 0; 440 } 441 442 static void w1_process_cb(struct w1_master *dev, struct w1_async_cmd *async_cmd) 443 { 444 struct w1_cb_node *node = container_of(async_cmd, struct w1_cb_node, 445 async); 446 u16 mlen = node->msg->len; 447 u16 len; 448 int err = 0; 449 struct w1_slave *sl = node->sl; 450 struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)node->msg->data; 451 452 mutex_lock(&dev->bus_mutex); 453 dev->priv = node->block; 454 if (sl && w1_reset_select_slave(sl)) 455 err = -ENODEV; 456 node->block->cur_msg = node->msg; 457 458 while (mlen && !err) { 459 if (cmd->len + sizeof(struct w1_netlink_cmd) > mlen) { 460 err = -E2BIG; 461 break; 462 } 463 464 if (sl) 465 err = w1_process_command_slave(sl, cmd); 466 else 467 err = w1_process_command_master(dev, cmd); 468 w1_netlink_check_send(node->block); 469 470 w1_netlink_queue_status(node->block, node->msg, cmd, err); 471 err = 0; 472 473 len = sizeof(*cmd) + cmd->len; 474 cmd = (struct w1_netlink_cmd *)((u8 *)cmd + len); 475 mlen -= len; 476 } 477 478 if (!cmd || err) 479 w1_netlink_queue_status(node->block, node->msg, cmd, err); 480 481 /* ref taken in w1_search_slave or w1_search_master_id when building 482 * the block 483 */ 484 if (sl) 485 w1_unref_slave(sl); 486 else 487 atomic_dec(&dev->refcnt); 488 dev->priv = NULL; 489 mutex_unlock(&dev->bus_mutex); 490 491 mutex_lock(&dev->list_mutex); 492 list_del(&async_cmd->async_entry); 493 mutex_unlock(&dev->list_mutex); 494 495 w1_unref_block(node->block); 496 } 497 498 static void w1_list_count_cmds(struct w1_netlink_msg *msg, int *cmd_count, 499 u16 *slave_len) 500 { 501 struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)msg->data; 502 u16 mlen = msg->len; 503 u16 len; 504 int slave_list = 0; 505 while (mlen) { 506 if (cmd->len + sizeof(struct w1_netlink_cmd) > mlen) 507 break; 508 509 switch (cmd->cmd) { 510 case W1_CMD_SEARCH: 511 case W1_CMD_ALARM_SEARCH: 512 case W1_CMD_LIST_SLAVES: 513 ++slave_list; 514 } 515 ++*cmd_count; 516 len = sizeof(*cmd) + cmd->len; 517 cmd = (struct w1_netlink_cmd *)((u8 *)cmd + len); 518 mlen -= len; 519 } 520 521 if (slave_list) { 522 struct w1_master *dev = w1_search_master_id(msg->id.mst.id); 523 if (dev) { 524 /* Bytes, and likely an overstimate, and if it isn't 525 * the results can still be split between packets. 526 */ 527 *slave_len += sizeof(struct w1_reg_num) * slave_list * 528 (dev->slave_count + dev->max_slave_count); 529 /* search incremented it */ 530 atomic_dec(&dev->refcnt); 531 } 532 } 533 } 534 535 static void w1_cn_callback(struct cn_msg *cn, struct netlink_skb_parms *nsp) 536 { 537 struct w1_netlink_msg *msg = (struct w1_netlink_msg *)(cn + 1); 538 struct w1_slave *sl; 539 struct w1_master *dev; 540 u16 msg_len; 541 u16 slave_len = 0; 542 int err = 0; 543 struct w1_cb_block *block = NULL; 544 struct w1_cb_node *node = NULL; 545 int node_count = 0; 546 int cmd_count = 0; 547 548 /* If any unknown flag is set let the application know, that way 549 * applications can detect the absence of features in kernels that 550 * don't know about them. http://lwn.net/Articles/587527/ 551 */ 552 if (cn->flags & ~(W1_CN_BUNDLE)) { 553 w1_netlink_send_error(cn, msg, nsp->portid, -EINVAL); 554 return; 555 } 556 557 /* Count the number of master or slave commands there are to allocate 558 * space for one cb_node each. 559 */ 560 msg_len = cn->len; 561 while (msg_len && !err) { 562 if (msg->len + sizeof(struct w1_netlink_msg) > msg_len) { 563 err = -E2BIG; 564 break; 565 } 566 567 /* count messages for nodes and allocate any additional space 568 * required for slave lists 569 */ 570 if (msg->type == W1_MASTER_CMD || msg->type == W1_SLAVE_CMD) { 571 ++node_count; 572 w1_list_count_cmds(msg, &cmd_count, &slave_len); 573 } 574 575 msg_len -= sizeof(struct w1_netlink_msg) + msg->len; 576 msg = (struct w1_netlink_msg *)(((u8 *)msg) + 577 sizeof(struct w1_netlink_msg) + msg->len); 578 } 579 msg = (struct w1_netlink_msg *)(cn + 1); 580 if (node_count) { 581 int size; 582 int reply_size = sizeof(*cn) + cn->len + slave_len; 583 if (cn->flags & W1_CN_BUNDLE) { 584 /* bundling duplicats some of the messages */ 585 reply_size += 2 * cmd_count * (sizeof(struct cn_msg) + 586 sizeof(struct w1_netlink_msg) + 587 sizeof(struct w1_netlink_cmd)); 588 } 589 reply_size = min(CONNECTOR_MAX_MSG_SIZE, reply_size); 590 591 /* allocate space for the block, a copy of the original message, 592 * one node per cmd to point into the original message, 593 * space for replies which is the original message size plus 594 * space for any list slave data and status messages 595 * cn->len doesn't include itself which is part of the block 596 * */ 597 size = /* block + original message */ 598 sizeof(struct w1_cb_block) + sizeof(*cn) + cn->len + 599 /* space for nodes */ 600 node_count * sizeof(struct w1_cb_node) + 601 /* replies */ 602 sizeof(struct cn_msg) + reply_size; 603 block = kzalloc(size, GFP_KERNEL); 604 if (!block) { 605 /* if the system is already out of memory, 606 * (A) will this work, and (B) would it be better 607 * to not try? 608 */ 609 w1_netlink_send_error(cn, msg, nsp->portid, -ENOMEM); 610 return; 611 } 612 atomic_set(&block->refcnt, 1); 613 block->portid = nsp->portid; 614 memcpy(&block->request_cn, cn, sizeof(*cn) + cn->len); 615 node = (struct w1_cb_node *)(block->request_cn.data + cn->len); 616 617 /* Sneeky, when not bundling, reply_size is the allocated space 618 * required for the reply, cn_msg isn't part of maxlen so 619 * it should be reply_size - sizeof(struct cn_msg), however 620 * when checking if there is enough space, w1_reply_make_space 621 * is called with the full message size including cn_msg, 622 * because it isn't known at that time if an additional cn_msg 623 * will need to be allocated. So an extra cn_msg is added 624 * above in "size". 625 */ 626 block->maxlen = reply_size; 627 block->first_cn = (struct cn_msg *)(node + node_count); 628 memset(block->first_cn, 0, sizeof(*block->first_cn)); 629 } 630 631 msg_len = cn->len; 632 while (msg_len && !err) { 633 634 dev = NULL; 635 sl = NULL; 636 637 if (msg->len + sizeof(struct w1_netlink_msg) > msg_len) { 638 err = -E2BIG; 639 break; 640 } 641 642 /* execute on this thread, no need to process later */ 643 if (msg->type == W1_LIST_MASTERS) { 644 err = w1_process_command_root(cn, nsp->portid); 645 goto out_cont; 646 } 647 648 /* All following message types require additional data, 649 * check here before references are taken. 650 */ 651 if (!msg->len) { 652 err = -EPROTO; 653 goto out_cont; 654 } 655 656 /* both search calls take references */ 657 if (msg->type == W1_MASTER_CMD) { 658 dev = w1_search_master_id(msg->id.mst.id); 659 } else if (msg->type == W1_SLAVE_CMD) { 660 sl = w1_search_slave((struct w1_reg_num *)msg->id.id); 661 if (sl) 662 dev = sl->master; 663 } else { 664 pr_notice("%s: cn: %x.%x, wrong type: %u, len: %u.\n", 665 __func__, cn->id.idx, cn->id.val, 666 msg->type, msg->len); 667 err = -EPROTO; 668 goto out_cont; 669 } 670 671 if (!dev) { 672 err = -ENODEV; 673 goto out_cont; 674 } 675 676 err = 0; 677 678 atomic_inc(&block->refcnt); 679 node->async.cb = w1_process_cb; 680 node->block = block; 681 node->msg = (struct w1_netlink_msg *)((u8 *)&block->request_cn + 682 (size_t)((u8 *)msg - (u8 *)cn)); 683 node->sl = sl; 684 node->dev = dev; 685 686 mutex_lock(&dev->list_mutex); 687 list_add_tail(&node->async.async_entry, &dev->async_list); 688 wake_up_process(dev->thread); 689 mutex_unlock(&dev->list_mutex); 690 ++node; 691 692 out_cont: 693 /* Can't queue because that modifies block and another 694 * thread could be processing the messages by now and 695 * there isn't a lock, send directly. 696 */ 697 if (err) 698 w1_netlink_send_error(cn, msg, nsp->portid, err); 699 msg_len -= sizeof(struct w1_netlink_msg) + msg->len; 700 msg = (struct w1_netlink_msg *)(((u8 *)msg) + 701 sizeof(struct w1_netlink_msg) + msg->len); 702 703 /* 704 * Let's allow requests for nonexisting devices. 705 */ 706 if (err == -ENODEV) 707 err = 0; 708 } 709 if (block) 710 w1_unref_block(block); 711 } 712 713 int w1_init_netlink(void) 714 { 715 struct cb_id w1_id = {.idx = CN_W1_IDX, .val = CN_W1_VAL}; 716 717 return cn_add_callback(&w1_id, "w1", &w1_cn_callback); 718 } 719 720 void w1_fini_netlink(void) 721 { 722 struct cb_id w1_id = {.idx = CN_W1_IDX, .val = CN_W1_VAL}; 723 724 cn_del_callback(&w1_id); 725 } 726 #else 727 void w1_netlink_send(struct w1_master *dev, struct w1_netlink_msg *cn) 728 { 729 } 730 731 int w1_init_netlink(void) 732 { 733 return 0; 734 } 735 736 void w1_fini_netlink(void) 737 { 738 } 739 #endif 740