1 /* 2 * cn_queue.c 3 * 4 * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net> 5 * All rights reserved. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 * 21 */ 22 23 #include <linux/kernel.h> 24 #include <linux/module.h> 25 #include <linux/list.h> 26 #include <linux/workqueue.h> 27 #include <linux/spinlock.h> 28 #include <linux/slab.h> 29 #include <linux/skbuff.h> 30 #include <linux/suspend.h> 31 #include <linux/connector.h> 32 #include <linux/delay.h> 33 34 void cn_queue_wrapper(struct work_struct *work) 35 { 36 struct cn_callback_entry *cbq = 37 container_of(work, struct cn_callback_entry, work); 38 struct cn_callback_data *d = &cbq->data; 39 struct cn_msg *msg = NLMSG_DATA(nlmsg_hdr(d->skb)); 40 struct netlink_skb_parms *nsp = &NETLINK_CB(d->skb); 41 42 d->callback(msg, nsp); 43 44 kfree_skb(d->skb); 45 d->skb = NULL; 46 47 kfree(d->free); 48 } 49 50 static struct cn_callback_entry * 51 cn_queue_alloc_callback_entry(const char *name, struct cb_id *id, 52 void (*callback)(struct cn_msg *, struct netlink_skb_parms *)) 53 { 54 struct cn_callback_entry *cbq; 55 56 cbq = kzalloc(sizeof(*cbq), GFP_KERNEL); 57 if (!cbq) { 58 printk(KERN_ERR "Failed to create new callback queue.\n"); 59 return NULL; 60 } 61 62 snprintf(cbq->id.name, sizeof(cbq->id.name), "%s", name); 63 memcpy(&cbq->id.id, id, sizeof(struct cb_id)); 64 cbq->data.callback = callback; 65 66 INIT_WORK(&cbq->work, &cn_queue_wrapper); 67 return cbq; 68 } 69 70 static void cn_queue_free_callback(struct cn_callback_entry *cbq) 71 { 72 flush_workqueue(cbq->pdev->cn_queue); 73 kfree(cbq); 74 } 75 76 int cn_cb_equal(struct cb_id *i1, struct cb_id *i2) 77 { 78 return ((i1->idx == i2->idx) && (i1->val == i2->val)); 79 } 80 81 int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name, 82 struct cb_id *id, 83 void (*callback)(struct cn_msg *, struct netlink_skb_parms *)) 84 { 85 struct cn_callback_entry *cbq, *__cbq; 86 int found = 0; 87 88 cbq = cn_queue_alloc_callback_entry(name, id, callback); 89 if (!cbq) 90 return -ENOMEM; 91 92 atomic_inc(&dev->refcnt); 93 cbq->pdev = dev; 94 95 spin_lock_bh(&dev->queue_lock); 96 list_for_each_entry(__cbq, &dev->queue_list, callback_entry) { 97 if (cn_cb_equal(&__cbq->id.id, id)) { 98 found = 1; 99 break; 100 } 101 } 102 if (!found) 103 list_add_tail(&cbq->callback_entry, &dev->queue_list); 104 spin_unlock_bh(&dev->queue_lock); 105 106 if (found) { 107 cn_queue_free_callback(cbq); 108 atomic_dec(&dev->refcnt); 109 return -EINVAL; 110 } 111 112 cbq->seq = 0; 113 cbq->group = cbq->id.id.idx; 114 115 return 0; 116 } 117 118 void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id) 119 { 120 struct cn_callback_entry *cbq, *n; 121 int found = 0; 122 123 spin_lock_bh(&dev->queue_lock); 124 list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) { 125 if (cn_cb_equal(&cbq->id.id, id)) { 126 list_del(&cbq->callback_entry); 127 found = 1; 128 break; 129 } 130 } 131 spin_unlock_bh(&dev->queue_lock); 132 133 if (found) { 134 cn_queue_free_callback(cbq); 135 atomic_dec(&dev->refcnt); 136 } 137 } 138 139 struct cn_queue_dev *cn_queue_alloc_dev(const char *name, struct sock *nls) 140 { 141 struct cn_queue_dev *dev; 142 143 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 144 if (!dev) 145 return NULL; 146 147 snprintf(dev->name, sizeof(dev->name), "%s", name); 148 atomic_set(&dev->refcnt, 0); 149 INIT_LIST_HEAD(&dev->queue_list); 150 spin_lock_init(&dev->queue_lock); 151 152 dev->nls = nls; 153 154 dev->cn_queue = alloc_ordered_workqueue(dev->name, 0); 155 if (!dev->cn_queue) { 156 kfree(dev); 157 return NULL; 158 } 159 160 return dev; 161 } 162 163 void cn_queue_free_dev(struct cn_queue_dev *dev) 164 { 165 struct cn_callback_entry *cbq, *n; 166 167 flush_workqueue(dev->cn_queue); 168 destroy_workqueue(dev->cn_queue); 169 170 spin_lock_bh(&dev->queue_lock); 171 list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) 172 list_del(&cbq->callback_entry); 173 spin_unlock_bh(&dev->queue_lock); 174 175 while (atomic_read(&dev->refcnt)) { 176 printk(KERN_INFO "Waiting for %s to become free: refcnt=%d.\n", 177 dev->name, atomic_read(&dev->refcnt)); 178 msleep(1000); 179 } 180 181 kfree(dev); 182 dev = NULL; 183 } 184