xref: /openbmc/linux/drivers/w1/w1_netlink.c (revision 2aa1f7a1)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (c) 2003 Evgeniy Polyakov <zbr@ioremap.net>
4  */
5 
6 #include <linux/slab.h>
7 #include <linux/skbuff.h>
8 #include <linux/netlink.h>
9 #include <linux/connector.h>
10 
11 #include "w1_internal.h"
12 #include "w1_netlink.h"
13 
14 #if defined(CONFIG_W1_CON) && (defined(CONFIG_CONNECTOR) || (defined(CONFIG_CONNECTOR_MODULE) && defined(CONFIG_W1_MODULE)))
15 
16 /* Bundle together everything required to process a request in one memory
17  * allocation.
18  */
19 struct w1_cb_block {
20 	atomic_t refcnt;
21 	u32 portid; /* Sending process port ID */
22 	/* maximum value for first_cn->len */
23 	u16 maxlen;
24 	/* pointers to building up the reply message */
25 	struct cn_msg *first_cn; /* fixed once the structure is populated */
26 	struct cn_msg *cn; /* advances as cn_msg is appeneded */
27 	struct w1_netlink_msg *msg; /* advances as w1_netlink_msg is appened */
28 	struct w1_netlink_cmd *cmd; /* advances as cmds are appened */
29 	struct w1_netlink_msg *cur_msg; /* currently message being processed */
30 	/* copy of the original request follows */
31 	struct cn_msg request_cn;
32 	/* followed by variable length:
33 	 * cn_msg, data (w1_netlink_msg and w1_netlink_cmd)
34 	 * one or more struct w1_cb_node
35 	 * reply first_cn, data (w1_netlink_msg and w1_netlink_cmd)
36 	 */
37 };
38 struct w1_cb_node {
39 	struct w1_async_cmd async;
40 	/* pointers within w1_cb_block and cn data */
41 	struct w1_cb_block *block;
42 	struct w1_netlink_msg *msg;
43 	struct w1_slave *sl;
44 	struct w1_master *dev;
45 };
46 
47 /**
48  * w1_reply_len() - calculate current reply length, compare to maxlen
49  * @block: block to calculate
50  *
51  * Calculates the current message length including possible multiple
52  * cn_msg and data, excludes the first sizeof(struct cn_msg).  Direclty
53  * compariable to maxlen and usable to send the message.
54  */
w1_reply_len(struct w1_cb_block * block)55 static u16 w1_reply_len(struct w1_cb_block *block)
56 {
57 	if (!block->cn)
58 		return 0;
59 	return (u8 *)block->cn - (u8 *)block->first_cn + block->cn->len;
60 }
61 
w1_unref_block(struct w1_cb_block * block)62 static void w1_unref_block(struct w1_cb_block *block)
63 {
64 	if (atomic_sub_return(1, &block->refcnt) == 0) {
65 		u16 len = w1_reply_len(block);
66 		if (len) {
67 			cn_netlink_send_mult(block->first_cn, len,
68 					     block->portid, 0,
69 					     GFP_KERNEL, NULL, NULL);
70 		}
71 		kfree(block);
72 	}
73 }
74 
75 /**
76  * w1_reply_make_space() - send message if needed to make space
77  * @block: block to make space on
78  * @space: how many bytes requested
79  *
80  * Verify there is enough room left for the caller to add "space" bytes to the
81  * message, if there isn't send the message and reset.
82  */
w1_reply_make_space(struct w1_cb_block * block,u16 space)83 static void w1_reply_make_space(struct w1_cb_block *block, u16 space)
84 {
85 	u16 len = w1_reply_len(block);
86 	if (len + space >= block->maxlen) {
87 		cn_netlink_send_mult(block->first_cn, len, block->portid,
88 				     0, GFP_KERNEL, NULL, NULL);
89 		block->first_cn->len = 0;
90 		block->cn = NULL;
91 		block->msg = NULL;
92 		block->cmd = NULL;
93 	}
94 }
95 
96 /* Early send when replies aren't bundled. */
w1_netlink_check_send(struct w1_cb_block * block)97 static void w1_netlink_check_send(struct w1_cb_block *block)
98 {
99 	if (!(block->request_cn.flags & W1_CN_BUNDLE) && block->cn)
100 		w1_reply_make_space(block, block->maxlen);
101 }
102 
103 /**
104  * w1_netlink_setup_msg() - prepare to write block->msg
105  * @block: block to operate on
106  * @ack: determines if cn can be reused
107  *
108  * block->cn will be setup with the correct ack, advancing if needed
109  * block->cn->len does not include space for block->msg
110  * block->msg advances but remains uninitialized
111  */
w1_netlink_setup_msg(struct w1_cb_block * block,u32 ack)112 static void w1_netlink_setup_msg(struct w1_cb_block *block, u32 ack)
113 {
114 	if (block->cn && block->cn->ack == ack) {
115 		block->msg = (struct w1_netlink_msg *)(block->cn->data + block->cn->len);
116 	} else {
117 		/* advance or set to data */
118 		if (block->cn)
119 			block->cn = (struct cn_msg *)(block->cn->data +
120 				block->cn->len);
121 		else
122 			block->cn = block->first_cn;
123 
124 		memcpy(block->cn, &block->request_cn, sizeof(*block->cn));
125 		block->cn->len = 0;
126 		block->cn->ack = ack;
127 		block->msg = (struct w1_netlink_msg *)block->cn->data;
128 	}
129 }
130 
131 /* Append cmd to msg, include cmd->data as well.  This is because
132  * any following data goes with the command and in the case of a read is
133  * the results.
134  */
w1_netlink_queue_cmd(struct w1_cb_block * block,struct w1_netlink_cmd * cmd)135 static void w1_netlink_queue_cmd(struct w1_cb_block *block,
136 	struct w1_netlink_cmd *cmd)
137 {
138 	u32 space;
139 	w1_reply_make_space(block, sizeof(struct cn_msg) +
140 		sizeof(struct w1_netlink_msg) + sizeof(*cmd) + cmd->len);
141 
142 	/* There's a status message sent after each command, so no point
143 	 * in trying to bundle this cmd after an existing one, because
144 	 * there won't be one.  Allocate and copy over a new cn_msg.
145 	 */
146 	w1_netlink_setup_msg(block, block->request_cn.seq + 1);
147 	memcpy(block->msg, block->cur_msg, sizeof(*block->msg));
148 	block->cn->len += sizeof(*block->msg);
149 	block->msg->len = 0;
150 	block->cmd = (struct w1_netlink_cmd *)(block->msg->data);
151 
152 	space = sizeof(*cmd) + cmd->len;
153 	if (block->cmd != cmd)
154 		memcpy(block->cmd, cmd, space);
155 	block->cn->len += space;
156 	block->msg->len += space;
157 }
158 
159 /* Append req_msg and req_cmd, no other commands and no data from req_cmd are
160  * copied.
161  */
w1_netlink_queue_status(struct w1_cb_block * block,struct w1_netlink_msg * req_msg,struct w1_netlink_cmd * req_cmd,int error)162 static void w1_netlink_queue_status(struct w1_cb_block *block,
163 	struct w1_netlink_msg *req_msg, struct w1_netlink_cmd *req_cmd,
164 	int error)
165 {
166 	u16 space = sizeof(struct cn_msg) + sizeof(*req_msg) + sizeof(*req_cmd);
167 	w1_reply_make_space(block, space);
168 	w1_netlink_setup_msg(block, block->request_cn.ack);
169 
170 	memcpy(block->msg, req_msg, sizeof(*req_msg));
171 	block->cn->len += sizeof(*req_msg);
172 	block->msg->len = 0;
173 	block->msg->status = (u8)-error;
174 	if (req_cmd) {
175 		struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)block->msg->data;
176 		memcpy(cmd, req_cmd, sizeof(*cmd));
177 		block->cn->len += sizeof(*cmd);
178 		block->msg->len += sizeof(*cmd);
179 		cmd->len = 0;
180 	}
181 	w1_netlink_check_send(block);
182 }
183 
184 /**
185  * w1_netlink_send_error() - sends the error message now
186  * @cn: original cn_msg
187  * @msg: original w1_netlink_msg
188  * @portid: where to send it
189  * @error: error status
190  *
191  * Use when a block isn't available to queue the message to and cn, msg
192  * might not be contiguous.
193  */
w1_netlink_send_error(struct cn_msg * cn,struct w1_netlink_msg * msg,int portid,int error)194 static void w1_netlink_send_error(struct cn_msg *cn, struct w1_netlink_msg *msg,
195 	int portid, int error)
196 {
197 	struct {
198 		struct cn_msg cn;
199 		struct w1_netlink_msg msg;
200 	} packet;
201 	memcpy(&packet.cn, cn, sizeof(packet.cn));
202 	memcpy(&packet.msg, msg, sizeof(packet.msg));
203 	packet.cn.len = sizeof(packet.msg);
204 	packet.msg.len = 0;
205 	packet.msg.status = (u8)-error;
206 	cn_netlink_send(&packet.cn, portid, 0, GFP_KERNEL);
207 }
208 
209 /**
210  * w1_netlink_send() - sends w1 netlink notifications
211  * @dev: w1_master the even is associated with or for
212  * @msg: w1_netlink_msg message to be sent
213  *
214  * This are notifications generated from the kernel.
215  */
w1_netlink_send(struct w1_master * dev,struct w1_netlink_msg * msg)216 void w1_netlink_send(struct w1_master *dev, struct w1_netlink_msg *msg)
217 {
218 	struct {
219 		struct cn_msg cn;
220 		struct w1_netlink_msg msg;
221 	} packet;
222 	memset(&packet, 0, sizeof(packet));
223 
224 	packet.cn.id.idx = CN_W1_IDX;
225 	packet.cn.id.val = CN_W1_VAL;
226 
227 	packet.cn.seq = dev->seq++;
228 	packet.cn.len = sizeof(*msg);
229 
230 	memcpy(&packet.msg, msg, sizeof(*msg));
231 	packet.msg.len = 0;
232 
233 	cn_netlink_send(&packet.cn, 0, 0, GFP_KERNEL);
234 }
235 
w1_send_slave(struct w1_master * dev,u64 rn)236 static void w1_send_slave(struct w1_master *dev, u64 rn)
237 {
238 	struct w1_cb_block *block = dev->priv;
239 	struct w1_netlink_cmd *cache_cmd = block->cmd;
240 	u64 *data;
241 
242 	w1_reply_make_space(block, sizeof(*data));
243 
244 	/* Add cmd back if the packet was sent */
245 	if (!block->cmd) {
246 		cache_cmd->len = 0;
247 		w1_netlink_queue_cmd(block, cache_cmd);
248 	}
249 
250 	data = (u64 *)(block->cmd->data + block->cmd->len);
251 
252 	*data = rn;
253 	block->cn->len += sizeof(*data);
254 	block->msg->len += sizeof(*data);
255 	block->cmd->len += sizeof(*data);
256 }
257 
w1_found_send_slave(struct w1_master * dev,u64 rn)258 static void w1_found_send_slave(struct w1_master *dev, u64 rn)
259 {
260 	/* update kernel slave list */
261 	w1_slave_found(dev, rn);
262 
263 	w1_send_slave(dev, rn);
264 }
265 
266 /* Get the current slave list, or search (with or without alarm) */
w1_get_slaves(struct w1_master * dev,struct w1_netlink_cmd * req_cmd)267 static int w1_get_slaves(struct w1_master *dev, struct w1_netlink_cmd *req_cmd)
268 {
269 	struct w1_slave *sl;
270 
271 	req_cmd->len = 0;
272 	w1_netlink_queue_cmd(dev->priv, req_cmd);
273 
274 	if (req_cmd->cmd == W1_CMD_LIST_SLAVES) {
275 		u64 rn;
276 		mutex_lock(&dev->list_mutex);
277 		list_for_each_entry(sl, &dev->slist, w1_slave_entry) {
278 			memcpy(&rn, &sl->reg_num, sizeof(rn));
279 			w1_send_slave(dev, rn);
280 		}
281 		mutex_unlock(&dev->list_mutex);
282 	} else {
283 		w1_search_process_cb(dev, req_cmd->cmd == W1_CMD_ALARM_SEARCH ?
284 			W1_ALARM_SEARCH : W1_SEARCH, w1_found_send_slave);
285 	}
286 
287 	return 0;
288 }
289 
w1_process_command_io(struct w1_master * dev,struct w1_netlink_cmd * cmd)290 static int w1_process_command_io(struct w1_master *dev,
291 	struct w1_netlink_cmd *cmd)
292 {
293 	int err = 0;
294 
295 	switch (cmd->cmd) {
296 	case W1_CMD_TOUCH:
297 		w1_touch_block(dev, cmd->data, cmd->len);
298 		w1_netlink_queue_cmd(dev->priv, cmd);
299 		break;
300 	case W1_CMD_READ:
301 		w1_read_block(dev, cmd->data, cmd->len);
302 		w1_netlink_queue_cmd(dev->priv, cmd);
303 		break;
304 	case W1_CMD_WRITE:
305 		w1_write_block(dev, cmd->data, cmd->len);
306 		break;
307 	default:
308 		err = -EINVAL;
309 		break;
310 	}
311 
312 	return err;
313 }
314 
w1_process_command_addremove(struct w1_master * dev,struct w1_netlink_cmd * cmd)315 static int w1_process_command_addremove(struct w1_master *dev,
316 	struct w1_netlink_cmd *cmd)
317 {
318 	struct w1_slave *sl;
319 	int err = 0;
320 	struct w1_reg_num *id;
321 
322 	if (cmd->len != sizeof(*id))
323 		return -EINVAL;
324 
325 	id = (struct w1_reg_num *)cmd->data;
326 
327 	sl = w1_slave_search_device(dev, id);
328 	switch (cmd->cmd) {
329 	case W1_CMD_SLAVE_ADD:
330 		if (sl)
331 			err = -EINVAL;
332 		else
333 			err = w1_attach_slave_device(dev, id);
334 		break;
335 	case W1_CMD_SLAVE_REMOVE:
336 		if (sl)
337 			w1_slave_detach(sl);
338 		else
339 			err = -EINVAL;
340 		break;
341 	default:
342 		err = -EINVAL;
343 		break;
344 	}
345 
346 	return err;
347 }
348 
w1_process_command_master(struct w1_master * dev,struct w1_netlink_cmd * req_cmd)349 static int w1_process_command_master(struct w1_master *dev,
350 	struct w1_netlink_cmd *req_cmd)
351 {
352 	int err = -EINVAL;
353 
354 	/* drop bus_mutex for search (does it's own locking), and add/remove
355 	 * which doesn't use the bus
356 	 */
357 	switch (req_cmd->cmd) {
358 	case W1_CMD_SEARCH:
359 	case W1_CMD_ALARM_SEARCH:
360 	case W1_CMD_LIST_SLAVES:
361 		mutex_unlock(&dev->bus_mutex);
362 		err = w1_get_slaves(dev, req_cmd);
363 		mutex_lock(&dev->bus_mutex);
364 		break;
365 	case W1_CMD_READ:
366 	case W1_CMD_WRITE:
367 	case W1_CMD_TOUCH:
368 		err = w1_process_command_io(dev, req_cmd);
369 		break;
370 	case W1_CMD_RESET:
371 		err = w1_reset_bus(dev);
372 		break;
373 	case W1_CMD_SLAVE_ADD:
374 	case W1_CMD_SLAVE_REMOVE:
375 		mutex_unlock(&dev->bus_mutex);
376 		mutex_lock(&dev->mutex);
377 		err = w1_process_command_addremove(dev, req_cmd);
378 		mutex_unlock(&dev->mutex);
379 		mutex_lock(&dev->bus_mutex);
380 		break;
381 	default:
382 		err = -EINVAL;
383 		break;
384 	}
385 
386 	return err;
387 }
388 
w1_process_command_slave(struct w1_slave * sl,struct w1_netlink_cmd * cmd)389 static int w1_process_command_slave(struct w1_slave *sl,
390 		struct w1_netlink_cmd *cmd)
391 {
392 	dev_dbg(&sl->master->dev, "%s: %02x.%012llx.%02x: cmd=%02x, len=%u.\n",
393 		__func__, sl->reg_num.family, (unsigned long long)sl->reg_num.id,
394 		sl->reg_num.crc, cmd->cmd, cmd->len);
395 
396 	return w1_process_command_io(sl->master, cmd);
397 }
398 
w1_process_command_root(struct cn_msg * req_cn,u32 portid)399 static int w1_process_command_root(struct cn_msg *req_cn, u32 portid)
400 {
401 	struct w1_master *dev;
402 	struct cn_msg *cn;
403 	struct w1_netlink_msg *msg;
404 	u32 *id;
405 
406 	cn = kmalloc(PAGE_SIZE, GFP_KERNEL);
407 	if (!cn)
408 		return -ENOMEM;
409 
410 	cn->id.idx = CN_W1_IDX;
411 	cn->id.val = CN_W1_VAL;
412 
413 	cn->seq = req_cn->seq;
414 	cn->ack = req_cn->seq + 1;
415 	cn->len = sizeof(struct w1_netlink_msg);
416 	msg = (struct w1_netlink_msg *)cn->data;
417 
418 	msg->type = W1_LIST_MASTERS;
419 	msg->status = 0;
420 	msg->len = 0;
421 	id = (u32 *)msg->data;
422 
423 	mutex_lock(&w1_mlock);
424 	list_for_each_entry(dev, &w1_masters, w1_master_entry) {
425 		if (cn->len + sizeof(*id) > PAGE_SIZE - sizeof(struct cn_msg)) {
426 			cn_netlink_send(cn, portid, 0, GFP_KERNEL);
427 			cn->len = sizeof(struct w1_netlink_msg);
428 			msg->len = 0;
429 			id = (u32 *)msg->data;
430 		}
431 
432 		*id = dev->id;
433 		msg->len += sizeof(*id);
434 		cn->len += sizeof(*id);
435 		id++;
436 	}
437 	cn_netlink_send(cn, portid, 0, GFP_KERNEL);
438 	mutex_unlock(&w1_mlock);
439 
440 	kfree(cn);
441 	return 0;
442 }
443 
w1_process_cb(struct w1_master * dev,struct w1_async_cmd * async_cmd)444 static void w1_process_cb(struct w1_master *dev, struct w1_async_cmd *async_cmd)
445 {
446 	struct w1_cb_node *node = container_of(async_cmd, struct w1_cb_node,
447 		async);
448 	u16 mlen = node->msg->len;
449 	u16 len;
450 	int err = 0;
451 	struct w1_slave *sl = node->sl;
452 	struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)node->msg->data;
453 
454 	mutex_lock(&dev->bus_mutex);
455 	dev->priv = node->block;
456 	if (sl && w1_reset_select_slave(sl))
457 		err = -ENODEV;
458 	node->block->cur_msg = node->msg;
459 
460 	while (mlen && !err) {
461 		if (cmd->len + sizeof(struct w1_netlink_cmd) > mlen) {
462 			err = -E2BIG;
463 			break;
464 		}
465 
466 		if (sl)
467 			err = w1_process_command_slave(sl, cmd);
468 		else
469 			err = w1_process_command_master(dev, cmd);
470 		w1_netlink_check_send(node->block);
471 
472 		w1_netlink_queue_status(node->block, node->msg, cmd, err);
473 		err = 0;
474 
475 		len = sizeof(*cmd) + cmd->len;
476 		cmd = (struct w1_netlink_cmd *)((u8 *)cmd + len);
477 		mlen -= len;
478 	}
479 
480 	if (!cmd || err)
481 		w1_netlink_queue_status(node->block, node->msg, cmd, err);
482 
483 	/* ref taken in w1_search_slave or w1_search_master_id when building
484 	 * the block
485 	 */
486 	if (sl)
487 		w1_unref_slave(sl);
488 	else
489 		atomic_dec(&dev->refcnt);
490 	dev->priv = NULL;
491 	mutex_unlock(&dev->bus_mutex);
492 
493 	mutex_lock(&dev->list_mutex);
494 	list_del(&async_cmd->async_entry);
495 	mutex_unlock(&dev->list_mutex);
496 
497 	w1_unref_block(node->block);
498 }
499 
w1_list_count_cmds(struct w1_netlink_msg * msg,int * cmd_count,u16 * slave_len)500 static void w1_list_count_cmds(struct w1_netlink_msg *msg, int *cmd_count,
501 	u16 *slave_len)
502 {
503 	struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)msg->data;
504 	u16 mlen = msg->len;
505 	u16 len;
506 	int slave_list = 0;
507 	while (mlen) {
508 		if (cmd->len + sizeof(struct w1_netlink_cmd) > mlen)
509 			break;
510 
511 		switch (cmd->cmd) {
512 		case W1_CMD_SEARCH:
513 		case W1_CMD_ALARM_SEARCH:
514 		case W1_CMD_LIST_SLAVES:
515 			++slave_list;
516 		}
517 		++*cmd_count;
518 		len = sizeof(*cmd) + cmd->len;
519 		cmd = (struct w1_netlink_cmd *)((u8 *)cmd + len);
520 		mlen -= len;
521 	}
522 
523 	if (slave_list) {
524 		struct w1_master *dev = w1_search_master_id(msg->id.mst.id);
525 		if (dev) {
526 			/* Bytes, and likely an overstimate, and if it isn't
527 			 * the results can still be split between packets.
528 			 */
529 			*slave_len += sizeof(struct w1_reg_num) * slave_list *
530 				(dev->slave_count + dev->max_slave_count);
531 			/* search incremented it */
532 			atomic_dec(&dev->refcnt);
533 		}
534 	}
535 }
536 
w1_cn_callback(struct cn_msg * cn,struct netlink_skb_parms * nsp)537 static void w1_cn_callback(struct cn_msg *cn, struct netlink_skb_parms *nsp)
538 {
539 	struct w1_netlink_msg *msg = (struct w1_netlink_msg *)(cn + 1);
540 	struct w1_slave *sl;
541 	struct w1_master *dev;
542 	u16 msg_len;
543 	u16 slave_len = 0;
544 	int err = 0;
545 	struct w1_cb_block *block = NULL;
546 	struct w1_cb_node *node = NULL;
547 	int node_count = 0;
548 	int cmd_count = 0;
549 
550 	/* If any unknown flag is set let the application know, that way
551 	 * applications can detect the absence of features in kernels that
552 	 * don't know about them.  http://lwn.net/Articles/587527/
553 	 */
554 	if (cn->flags & ~(W1_CN_BUNDLE)) {
555 		w1_netlink_send_error(cn, msg, nsp->portid, -EINVAL);
556 		return;
557 	}
558 
559 	/* Count the number of master or slave commands there are to allocate
560 	 * space for one cb_node each.
561 	 */
562 	msg_len = cn->len;
563 	while (msg_len && !err) {
564 		if (msg->len + sizeof(struct w1_netlink_msg) > msg_len) {
565 			err = -E2BIG;
566 			break;
567 		}
568 
569 		/* count messages for nodes and allocate any additional space
570 		 * required for slave lists
571 		 */
572 		if (msg->type == W1_MASTER_CMD || msg->type == W1_SLAVE_CMD) {
573 			++node_count;
574 			w1_list_count_cmds(msg, &cmd_count, &slave_len);
575 		}
576 
577 		msg_len -= sizeof(struct w1_netlink_msg) + msg->len;
578 		msg = (struct w1_netlink_msg *)(((u8 *)msg) +
579 			sizeof(struct w1_netlink_msg) + msg->len);
580 	}
581 	msg = (struct w1_netlink_msg *)(cn + 1);
582 	if (node_count) {
583 		int size;
584 		int reply_size = sizeof(*cn) + cn->len + slave_len;
585 		if (cn->flags & W1_CN_BUNDLE) {
586 			/* bundling duplicats some of the messages */
587 			reply_size += 2 * cmd_count * (sizeof(struct cn_msg) +
588 				sizeof(struct w1_netlink_msg) +
589 				sizeof(struct w1_netlink_cmd));
590 		}
591 		reply_size = min(CONNECTOR_MAX_MSG_SIZE, reply_size);
592 
593 		/* allocate space for the block, a copy of the original message,
594 		 * one node per cmd to point into the original message,
595 		 * space for replies which is the original message size plus
596 		 * space for any list slave data and status messages
597 		 * cn->len doesn't include itself which is part of the block
598 		 * */
599 		size =  /* block + original message */
600 			sizeof(struct w1_cb_block) + sizeof(*cn) + cn->len +
601 			/* space for nodes */
602 			node_count * sizeof(struct w1_cb_node) +
603 			/* replies */
604 			sizeof(struct cn_msg) + reply_size;
605 		block = kzalloc(size, GFP_KERNEL);
606 		if (!block) {
607 			/* if the system is already out of memory,
608 			 * (A) will this work, and (B) would it be better
609 			 * to not try?
610 			 */
611 			w1_netlink_send_error(cn, msg, nsp->portid, -ENOMEM);
612 			return;
613 		}
614 		atomic_set(&block->refcnt, 1);
615 		block->portid = nsp->portid;
616 		block->request_cn = *cn;
617 		memcpy(block->request_cn.data, cn->data, cn->len);
618 		node = (struct w1_cb_node *)(block->request_cn.data + cn->len);
619 
620 		/* Sneeky, when not bundling, reply_size is the allocated space
621 		 * required for the reply, cn_msg isn't part of maxlen so
622 		 * it should be reply_size - sizeof(struct cn_msg), however
623 		 * when checking if there is enough space, w1_reply_make_space
624 		 * is called with the full message size including cn_msg,
625 		 * because it isn't known at that time if an additional cn_msg
626 		 * will need to be allocated.  So an extra cn_msg is added
627 		 * above in "size".
628 		 */
629 		block->maxlen = reply_size;
630 		block->first_cn = (struct cn_msg *)(node + node_count);
631 		memset(block->first_cn, 0, sizeof(*block->first_cn));
632 	}
633 
634 	msg_len = cn->len;
635 	while (msg_len && !err) {
636 
637 		dev = NULL;
638 		sl = NULL;
639 
640 		if (msg->len + sizeof(struct w1_netlink_msg) > msg_len) {
641 			err = -E2BIG;
642 			break;
643 		}
644 
645 		/* execute on this thread, no need to process later */
646 		if (msg->type == W1_LIST_MASTERS) {
647 			err = w1_process_command_root(cn, nsp->portid);
648 			goto out_cont;
649 		}
650 
651 		/* All following message types require additional data,
652 		 * check here before references are taken.
653 		 */
654 		if (!msg->len) {
655 			err = -EPROTO;
656 			goto out_cont;
657 		}
658 
659 		/* both search calls take references */
660 		if (msg->type == W1_MASTER_CMD) {
661 			dev = w1_search_master_id(msg->id.mst.id);
662 		} else if (msg->type == W1_SLAVE_CMD) {
663 			sl = w1_search_slave((struct w1_reg_num *)msg->id.id);
664 			if (sl)
665 				dev = sl->master;
666 		} else {
667 			pr_notice("%s: cn: %x.%x, wrong type: %u, len: %u.\n",
668 				__func__, cn->id.idx, cn->id.val,
669 				msg->type, msg->len);
670 			err = -EPROTO;
671 			goto out_cont;
672 		}
673 
674 		if (!dev) {
675 			err = -ENODEV;
676 			goto out_cont;
677 		}
678 
679 		err = 0;
680 
681 		atomic_inc(&block->refcnt);
682 		node->async.cb = w1_process_cb;
683 		node->block = block;
684 		node->msg = (struct w1_netlink_msg *)((u8 *)&block->request_cn +
685 			(size_t)((u8 *)msg - (u8 *)cn));
686 		node->sl = sl;
687 		node->dev = dev;
688 
689 		mutex_lock(&dev->list_mutex);
690 		list_add_tail(&node->async.async_entry, &dev->async_list);
691 		wake_up_process(dev->thread);
692 		mutex_unlock(&dev->list_mutex);
693 		++node;
694 
695 out_cont:
696 		/* Can't queue because that modifies block and another
697 		 * thread could be processing the messages by now and
698 		 * there isn't a lock, send directly.
699 		 */
700 		if (err)
701 			w1_netlink_send_error(cn, msg, nsp->portid, err);
702 		msg_len -= sizeof(struct w1_netlink_msg) + msg->len;
703 		msg = (struct w1_netlink_msg *)(((u8 *)msg) +
704 			sizeof(struct w1_netlink_msg) + msg->len);
705 
706 		/*
707 		 * Let's allow requests for nonexisting devices.
708 		 */
709 		if (err == -ENODEV)
710 			err = 0;
711 	}
712 	if (block)
713 		w1_unref_block(block);
714 }
715 
w1_init_netlink(void)716 int w1_init_netlink(void)
717 {
718 	struct cb_id w1_id = {.idx = CN_W1_IDX, .val = CN_W1_VAL};
719 
720 	return cn_add_callback(&w1_id, "w1", &w1_cn_callback);
721 }
722 
w1_fini_netlink(void)723 void w1_fini_netlink(void)
724 {
725 	struct cb_id w1_id = {.idx = CN_W1_IDX, .val = CN_W1_VAL};
726 
727 	cn_del_callback(&w1_id);
728 }
729 #else
w1_netlink_send(struct w1_master * dev,struct w1_netlink_msg * cn)730 void w1_netlink_send(struct w1_master *dev, struct w1_netlink_msg *cn)
731 {
732 }
733 
w1_init_netlink(void)734 int w1_init_netlink(void)
735 {
736 	return 0;
737 }
738 
w1_fini_netlink(void)739 void w1_fini_netlink(void)
740 {
741 }
742 #endif
743