xref: /openbmc/linux/drivers/connector/cn_queue.c (revision 1fa6ac37)
1 /*
2  * 	cn_queue.c
3  *
4  * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
5  * All rights reserved.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
20  *
21  */
22 
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/list.h>
26 #include <linux/workqueue.h>
27 #include <linux/spinlock.h>
28 #include <linux/slab.h>
29 #include <linux/skbuff.h>
30 #include <linux/suspend.h>
31 #include <linux/connector.h>
32 #include <linux/delay.h>
33 
34 
35 /*
36  * This job is sent to the kevent workqueue.
37  * While no event is once sent to any callback, the connector workqueue
38  * is not created to avoid a useless waiting kernel task.
39  * Once the first event is received, we create this dedicated workqueue which
40  * is necessary because the flow of data can be high and we don't want
41  * to encumber keventd with that.
42  */
43 static void cn_queue_create(struct work_struct *work)
44 {
45 	struct cn_queue_dev *dev;
46 
47 	dev = container_of(work, struct cn_queue_dev, wq_creation);
48 
49 	dev->cn_queue = create_singlethread_workqueue(dev->name);
50 	/* If we fail, we will use keventd for all following connector jobs */
51 	WARN_ON(!dev->cn_queue);
52 }
53 
54 /*
55  * Queue a data sent to a callback.
56  * If the connector workqueue is already created, we queue the job on it.
57  * Otherwise, we queue the job to kevent and queue the connector workqueue
58  * creation too.
59  */
60 int queue_cn_work(struct cn_callback_entry *cbq, struct work_struct *work)
61 {
62 	struct cn_queue_dev *pdev = cbq->pdev;
63 
64 	if (likely(pdev->cn_queue))
65 		return queue_work(pdev->cn_queue, work);
66 
67 	/* Don't create the connector workqueue twice */
68 	if (atomic_inc_return(&pdev->wq_requested) == 1)
69 		schedule_work(&pdev->wq_creation);
70 	else
71 		atomic_dec(&pdev->wq_requested);
72 
73 	return schedule_work(work);
74 }
75 
76 void cn_queue_wrapper(struct work_struct *work)
77 {
78 	struct cn_callback_entry *cbq =
79 		container_of(work, struct cn_callback_entry, work);
80 	struct cn_callback_data *d = &cbq->data;
81 	struct cn_msg *msg = NLMSG_DATA(nlmsg_hdr(d->skb));
82 	struct netlink_skb_parms *nsp = &NETLINK_CB(d->skb);
83 
84 	d->callback(msg, nsp);
85 
86 	kfree_skb(d->skb);
87 	d->skb = NULL;
88 
89 	kfree(d->free);
90 }
91 
92 static struct cn_callback_entry *
93 cn_queue_alloc_callback_entry(char *name, struct cb_id *id,
94 			      void (*callback)(struct cn_msg *, struct netlink_skb_parms *))
95 {
96 	struct cn_callback_entry *cbq;
97 
98 	cbq = kzalloc(sizeof(*cbq), GFP_KERNEL);
99 	if (!cbq) {
100 		printk(KERN_ERR "Failed to create new callback queue.\n");
101 		return NULL;
102 	}
103 
104 	snprintf(cbq->id.name, sizeof(cbq->id.name), "%s", name);
105 	memcpy(&cbq->id.id, id, sizeof(struct cb_id));
106 	cbq->data.callback = callback;
107 
108 	INIT_WORK(&cbq->work, &cn_queue_wrapper);
109 	return cbq;
110 }
111 
112 static void cn_queue_free_callback(struct cn_callback_entry *cbq)
113 {
114 	/* The first jobs have been sent to kevent, flush them too */
115 	flush_scheduled_work();
116 	if (cbq->pdev->cn_queue)
117 		flush_workqueue(cbq->pdev->cn_queue);
118 
119 	kfree(cbq);
120 }
121 
122 int cn_cb_equal(struct cb_id *i1, struct cb_id *i2)
123 {
124 	return ((i1->idx == i2->idx) && (i1->val == i2->val));
125 }
126 
127 int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id,
128 			  void (*callback)(struct cn_msg *, struct netlink_skb_parms *))
129 {
130 	struct cn_callback_entry *cbq, *__cbq;
131 	int found = 0;
132 
133 	cbq = cn_queue_alloc_callback_entry(name, id, callback);
134 	if (!cbq)
135 		return -ENOMEM;
136 
137 	atomic_inc(&dev->refcnt);
138 	cbq->pdev = dev;
139 
140 	spin_lock_bh(&dev->queue_lock);
141 	list_for_each_entry(__cbq, &dev->queue_list, callback_entry) {
142 		if (cn_cb_equal(&__cbq->id.id, id)) {
143 			found = 1;
144 			break;
145 		}
146 	}
147 	if (!found)
148 		list_add_tail(&cbq->callback_entry, &dev->queue_list);
149 	spin_unlock_bh(&dev->queue_lock);
150 
151 	if (found) {
152 		cn_queue_free_callback(cbq);
153 		atomic_dec(&dev->refcnt);
154 		return -EINVAL;
155 	}
156 
157 	cbq->seq = 0;
158 	cbq->group = cbq->id.id.idx;
159 
160 	return 0;
161 }
162 
163 void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id)
164 {
165 	struct cn_callback_entry *cbq, *n;
166 	int found = 0;
167 
168 	spin_lock_bh(&dev->queue_lock);
169 	list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) {
170 		if (cn_cb_equal(&cbq->id.id, id)) {
171 			list_del(&cbq->callback_entry);
172 			found = 1;
173 			break;
174 		}
175 	}
176 	spin_unlock_bh(&dev->queue_lock);
177 
178 	if (found) {
179 		cn_queue_free_callback(cbq);
180 		atomic_dec(&dev->refcnt);
181 	}
182 }
183 
184 struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *nls)
185 {
186 	struct cn_queue_dev *dev;
187 
188 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
189 	if (!dev)
190 		return NULL;
191 
192 	snprintf(dev->name, sizeof(dev->name), "%s", name);
193 	atomic_set(&dev->refcnt, 0);
194 	INIT_LIST_HEAD(&dev->queue_list);
195 	spin_lock_init(&dev->queue_lock);
196 	init_waitqueue_head(&dev->wq_created);
197 
198 	dev->nls = nls;
199 
200 	INIT_WORK(&dev->wq_creation, cn_queue_create);
201 
202 	return dev;
203 }
204 
205 void cn_queue_free_dev(struct cn_queue_dev *dev)
206 {
207 	struct cn_callback_entry *cbq, *n;
208 	long timeout;
209 	DEFINE_WAIT(wait);
210 
211 	/* Flush the first pending jobs queued on kevent */
212 	flush_scheduled_work();
213 
214 	/* If the connector workqueue creation is still pending, wait for it */
215 	prepare_to_wait(&dev->wq_created, &wait, TASK_UNINTERRUPTIBLE);
216 	if (atomic_read(&dev->wq_requested) && !dev->cn_queue) {
217 		timeout = schedule_timeout(HZ * 2);
218 		if (!timeout && !dev->cn_queue)
219 			WARN_ON(1);
220 	}
221 	finish_wait(&dev->wq_created, &wait);
222 
223 	if (dev->cn_queue) {
224 		flush_workqueue(dev->cn_queue);
225 		destroy_workqueue(dev->cn_queue);
226 	}
227 
228 	spin_lock_bh(&dev->queue_lock);
229 	list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry)
230 		list_del(&cbq->callback_entry);
231 	spin_unlock_bh(&dev->queue_lock);
232 
233 	while (atomic_read(&dev->refcnt)) {
234 		printk(KERN_INFO "Waiting for %s to become free: refcnt=%d.\n",
235 		       dev->name, atomic_read(&dev->refcnt));
236 		msleep(1000);
237 	}
238 
239 	kfree(dev);
240 	dev = NULL;
241 }
242