xref: /openbmc/linux/drivers/connector/connector.c (revision 82e6fdd6)
1 /*
2  *	connector.c
3  *
4  * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
5  * All rights reserved.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
20  */
21 
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/list.h>
25 #include <linux/skbuff.h>
26 #include <net/netlink.h>
27 #include <linux/moduleparam.h>
28 #include <linux/connector.h>
29 #include <linux/slab.h>
30 #include <linux/mutex.h>
31 #include <linux/proc_fs.h>
32 #include <linux/spinlock.h>
33 
34 #include <net/sock.h>
35 
36 MODULE_LICENSE("GPL");
37 MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
38 MODULE_DESCRIPTION("Generic userspace <-> kernelspace connector.");
39 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_CONNECTOR);
40 
41 static struct cn_dev cdev;
42 
43 static int cn_already_initialized;
44 
45 /*
46  * Sends mult (multiple) cn_msg at a time.
47  *
48  * msg->seq and msg->ack are used to determine message genealogy.
49  * When someone sends message it puts there locally unique sequence
50  * and random acknowledge numbers.  Sequence number may be copied into
51  * nlmsghdr->nlmsg_seq too.
52  *
53  * Sequence number is incremented with each message to be sent.
54  *
55  * If we expect a reply to our message then the sequence number in
56  * received message MUST be the same as in original message, and
57  * acknowledge number MUST be the same + 1.
58  *
59  * If we receive a message and its sequence number is not equal to the
60  * one we are expecting then it is a new message.
61  *
62  * If we receive a message and its sequence number is the same as one
63  * we are expecting but it's acknowledgement number is not equal to
64  * the acknowledgement number in the original message + 1, then it is
65  * a new message.
66  *
67  * If msg->len != len, then additional cn_msg messages are expected following
68  * the first msg.
69  *
70  * The message is sent to, the portid if given, the group if given, both if
71  * both, or if both are zero then the group is looked up and sent there.
72  */
73 int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 __group,
74 	gfp_t gfp_mask)
75 {
76 	struct cn_callback_entry *__cbq;
77 	unsigned int size;
78 	struct sk_buff *skb;
79 	struct nlmsghdr *nlh;
80 	struct cn_msg *data;
81 	struct cn_dev *dev = &cdev;
82 	u32 group = 0;
83 	int found = 0;
84 
85 	if (portid || __group) {
86 		group = __group;
87 	} else {
88 		spin_lock_bh(&dev->cbdev->queue_lock);
89 		list_for_each_entry(__cbq, &dev->cbdev->queue_list,
90 				    callback_entry) {
91 			if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
92 				found = 1;
93 				group = __cbq->group;
94 				break;
95 			}
96 		}
97 		spin_unlock_bh(&dev->cbdev->queue_lock);
98 
99 		if (!found)
100 			return -ENODEV;
101 	}
102 
103 	if (!portid && !netlink_has_listeners(dev->nls, group))
104 		return -ESRCH;
105 
106 	size = sizeof(*msg) + len;
107 
108 	skb = nlmsg_new(size, gfp_mask);
109 	if (!skb)
110 		return -ENOMEM;
111 
112 	nlh = nlmsg_put(skb, 0, msg->seq, NLMSG_DONE, size, 0);
113 	if (!nlh) {
114 		kfree_skb(skb);
115 		return -EMSGSIZE;
116 	}
117 
118 	data = nlmsg_data(nlh);
119 
120 	memcpy(data, msg, size);
121 
122 	NETLINK_CB(skb).dst_group = group;
123 
124 	if (group)
125 		return netlink_broadcast(dev->nls, skb, portid, group,
126 					 gfp_mask);
127 	return netlink_unicast(dev->nls, skb, portid,
128 			!gfpflags_allow_blocking(gfp_mask));
129 }
130 EXPORT_SYMBOL_GPL(cn_netlink_send_mult);
131 
132 /* same as cn_netlink_send_mult except msg->len is used for len */
133 int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 __group,
134 	gfp_t gfp_mask)
135 {
136 	return cn_netlink_send_mult(msg, msg->len, portid, __group, gfp_mask);
137 }
138 EXPORT_SYMBOL_GPL(cn_netlink_send);
139 
140 /*
141  * Callback helper - queues work and setup destructor for given data.
142  */
143 static int cn_call_callback(struct sk_buff *skb)
144 {
145 	struct nlmsghdr *nlh;
146 	struct cn_callback_entry *i, *cbq = NULL;
147 	struct cn_dev *dev = &cdev;
148 	struct cn_msg *msg = nlmsg_data(nlmsg_hdr(skb));
149 	struct netlink_skb_parms *nsp = &NETLINK_CB(skb);
150 	int err = -ENODEV;
151 
152 	/* verify msg->len is within skb */
153 	nlh = nlmsg_hdr(skb);
154 	if (nlh->nlmsg_len < NLMSG_HDRLEN + sizeof(struct cn_msg) + msg->len)
155 		return -EINVAL;
156 
157 	spin_lock_bh(&dev->cbdev->queue_lock);
158 	list_for_each_entry(i, &dev->cbdev->queue_list, callback_entry) {
159 		if (cn_cb_equal(&i->id.id, &msg->id)) {
160 			refcount_inc(&i->refcnt);
161 			cbq = i;
162 			break;
163 		}
164 	}
165 	spin_unlock_bh(&dev->cbdev->queue_lock);
166 
167 	if (cbq != NULL) {
168 		cbq->callback(msg, nsp);
169 		kfree_skb(skb);
170 		cn_queue_release_callback(cbq);
171 		err = 0;
172 	}
173 
174 	return err;
175 }
176 
177 /*
178  * Main netlink receiving function.
179  *
180  * It checks skb, netlink header and msg sizes, and calls callback helper.
181  */
182 static void cn_rx_skb(struct sk_buff *skb)
183 {
184 	struct nlmsghdr *nlh;
185 	int len, err;
186 
187 	if (skb->len >= NLMSG_HDRLEN) {
188 		nlh = nlmsg_hdr(skb);
189 		len = nlmsg_len(nlh);
190 
191 		if (len < (int)sizeof(struct cn_msg) ||
192 		    skb->len < nlh->nlmsg_len ||
193 		    len > CONNECTOR_MAX_MSG_SIZE)
194 			return;
195 
196 		err = cn_call_callback(skb_get(skb));
197 		if (err < 0)
198 			kfree_skb(skb);
199 	}
200 }
201 
202 /*
203  * Callback add routing - adds callback with given ID and name.
204  * If there is registered callback with the same ID it will not be added.
205  *
206  * May sleep.
207  */
208 int cn_add_callback(struct cb_id *id, const char *name,
209 		    void (*callback)(struct cn_msg *,
210 				     struct netlink_skb_parms *))
211 {
212 	int err;
213 	struct cn_dev *dev = &cdev;
214 
215 	if (!cn_already_initialized)
216 		return -EAGAIN;
217 
218 	err = cn_queue_add_callback(dev->cbdev, name, id, callback);
219 	if (err)
220 		return err;
221 
222 	return 0;
223 }
224 EXPORT_SYMBOL_GPL(cn_add_callback);
225 
226 /*
227  * Callback remove routing - removes callback
228  * with given ID.
229  * If there is no registered callback with given
230  * ID nothing happens.
231  *
232  * May sleep while waiting for reference counter to become zero.
233  */
234 void cn_del_callback(struct cb_id *id)
235 {
236 	struct cn_dev *dev = &cdev;
237 
238 	cn_queue_del_callback(dev->cbdev, id);
239 }
240 EXPORT_SYMBOL_GPL(cn_del_callback);
241 
242 static int cn_proc_show(struct seq_file *m, void *v)
243 {
244 	struct cn_queue_dev *dev = cdev.cbdev;
245 	struct cn_callback_entry *cbq;
246 
247 	seq_printf(m, "Name            ID\n");
248 
249 	spin_lock_bh(&dev->queue_lock);
250 
251 	list_for_each_entry(cbq, &dev->queue_list, callback_entry) {
252 		seq_printf(m, "%-15s %u:%u\n",
253 			   cbq->id.name,
254 			   cbq->id.id.idx,
255 			   cbq->id.id.val);
256 	}
257 
258 	spin_unlock_bh(&dev->queue_lock);
259 
260 	return 0;
261 }
262 
263 static int cn_proc_open(struct inode *inode, struct file *file)
264 {
265 	return single_open(file, cn_proc_show, NULL);
266 }
267 
268 static const struct file_operations cn_file_ops = {
269 	.owner   = THIS_MODULE,
270 	.open    = cn_proc_open,
271 	.read    = seq_read,
272 	.llseek  = seq_lseek,
273 	.release = single_release
274 };
275 
276 static struct cn_dev cdev = {
277 	.input   = cn_rx_skb,
278 };
279 
280 static int cn_init(void)
281 {
282 	struct cn_dev *dev = &cdev;
283 	struct netlink_kernel_cfg cfg = {
284 		.groups	= CN_NETLINK_USERS + 0xf,
285 		.input	= dev->input,
286 	};
287 
288 	dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR, &cfg);
289 	if (!dev->nls)
290 		return -EIO;
291 
292 	dev->cbdev = cn_queue_alloc_dev("cqueue", dev->nls);
293 	if (!dev->cbdev) {
294 		netlink_kernel_release(dev->nls);
295 		return -EINVAL;
296 	}
297 
298 	cn_already_initialized = 1;
299 
300 	proc_create("connector", S_IRUGO, init_net.proc_net, &cn_file_ops);
301 
302 	return 0;
303 }
304 
305 static void cn_fini(void)
306 {
307 	struct cn_dev *dev = &cdev;
308 
309 	cn_already_initialized = 0;
310 
311 	remove_proc_entry("connector", init_net.proc_net);
312 
313 	cn_queue_free_dev(dev->cbdev);
314 	netlink_kernel_release(dev->nls);
315 }
316 
317 subsys_initcall(cn_init);
318 module_exit(cn_fini);
319