1 /* 2 * cn_queue.c 3 * 4 * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net> 5 * All rights reserved. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 * 21 */ 22 23 #include <linux/kernel.h> 24 #include <linux/module.h> 25 #include <linux/list.h> 26 #include <linux/workqueue.h> 27 #include <linux/spinlock.h> 28 #include <linux/slab.h> 29 #include <linux/skbuff.h> 30 #include <linux/suspend.h> 31 #include <linux/connector.h> 32 #include <linux/delay.h> 33 34 void cn_queue_wrapper(struct work_struct *work) 35 { 36 struct cn_callback_entry *cbq = 37 container_of(work, struct cn_callback_entry, work); 38 struct cn_callback_data *d = &cbq->data; 39 struct cn_msg *msg = NLMSG_DATA(nlmsg_hdr(d->skb)); 40 struct netlink_skb_parms *nsp = &NETLINK_CB(d->skb); 41 42 d->callback(msg, nsp); 43 44 kfree_skb(d->skb); 45 d->skb = NULL; 46 47 kfree(d->free); 48 } 49 50 static struct cn_callback_entry * 51 cn_queue_alloc_callback_entry(char *name, struct cb_id *id, 52 void (*callback)(struct cn_msg *, struct netlink_skb_parms *)) 53 { 54 struct cn_callback_entry *cbq; 55 56 cbq = kzalloc(sizeof(*cbq), GFP_KERNEL); 57 if (!cbq) { 58 printk(KERN_ERR "Failed to create new callback queue.\n"); 59 return NULL; 60 } 61 62 snprintf(cbq->id.name, sizeof(cbq->id.name), "%s", name); 63 memcpy(&cbq->id.id, id, sizeof(struct cb_id)); 64 cbq->data.callback = callback; 65 66 INIT_WORK(&cbq->work, &cn_queue_wrapper); 67 return cbq; 68 } 69 70 static void cn_queue_free_callback(struct cn_callback_entry *cbq) 71 { 72 flush_workqueue(cbq->pdev->cn_queue); 73 kfree(cbq); 74 } 75 76 int cn_cb_equal(struct cb_id *i1, struct cb_id *i2) 77 { 78 return ((i1->idx == i2->idx) && (i1->val == i2->val)); 79 } 80 81 int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, 82 void (*callback)(struct cn_msg *, struct netlink_skb_parms *)) 83 { 84 struct cn_callback_entry *cbq, *__cbq; 85 int found = 0; 86 87 cbq = cn_queue_alloc_callback_entry(name, id, callback); 88 if (!cbq) 89 return -ENOMEM; 90 91 atomic_inc(&dev->refcnt); 92 cbq->pdev = dev; 93 94 spin_lock_bh(&dev->queue_lock); 95 list_for_each_entry(__cbq, &dev->queue_list, callback_entry) { 96 if (cn_cb_equal(&__cbq->id.id, id)) { 97 found = 1; 98 break; 99 } 100 } 101 if (!found) 102 list_add_tail(&cbq->callback_entry, &dev->queue_list); 103 spin_unlock_bh(&dev->queue_lock); 104 105 if (found) { 106 cn_queue_free_callback(cbq); 107 atomic_dec(&dev->refcnt); 108 return -EINVAL; 109 } 110 111 cbq->seq = 0; 112 cbq->group = cbq->id.id.idx; 113 114 return 0; 115 } 116 117 void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id) 118 { 119 struct cn_callback_entry *cbq, *n; 120 int found = 0; 121 122 spin_lock_bh(&dev->queue_lock); 123 list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) { 124 if (cn_cb_equal(&cbq->id.id, id)) { 125 list_del(&cbq->callback_entry); 126 found = 1; 127 break; 128 } 129 } 130 spin_unlock_bh(&dev->queue_lock); 131 132 if (found) { 133 cn_queue_free_callback(cbq); 134 atomic_dec(&dev->refcnt); 135 } 136 } 137 138 struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *nls) 139 { 140 struct cn_queue_dev *dev; 141 142 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 143 if (!dev) 144 return NULL; 145 146 snprintf(dev->name, sizeof(dev->name), "%s", name); 147 atomic_set(&dev->refcnt, 0); 148 INIT_LIST_HEAD(&dev->queue_list); 149 spin_lock_init(&dev->queue_lock); 150 151 dev->nls = nls; 152 153 dev->cn_queue = alloc_ordered_workqueue(dev->name, 0); 154 if (!dev->cn_queue) { 155 kfree(dev); 156 return NULL; 157 } 158 159 return dev; 160 } 161 162 void cn_queue_free_dev(struct cn_queue_dev *dev) 163 { 164 struct cn_callback_entry *cbq, *n; 165 166 flush_workqueue(dev->cn_queue); 167 destroy_workqueue(dev->cn_queue); 168 169 spin_lock_bh(&dev->queue_lock); 170 list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) 171 list_del(&cbq->callback_entry); 172 spin_unlock_bh(&dev->queue_lock); 173 174 while (atomic_read(&dev->refcnt)) { 175 printk(KERN_INFO "Waiting for %s to become free: refcnt=%d.\n", 176 dev->name, atomic_read(&dev->refcnt)); 177 msleep(1000); 178 } 179 180 kfree(dev); 181 dev = NULL; 182 } 183