1 /* 2 * 3 * Author Karsten Keil <kkeil@novell.com> 4 * 5 * Copyright 2008 by Karsten Keil <kkeil@novell.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 */ 17 18 #include <linux/module.h> 19 #include <linux/mISDNhw.h> 20 21 static void 22 dchannel_bh(struct work_struct *ws) 23 { 24 struct dchannel *dch = container_of(ws, struct dchannel, workq); 25 struct sk_buff *skb; 26 int err; 27 28 if (test_and_clear_bit(FLG_RECVQUEUE, &dch->Flags)) { 29 while ((skb = skb_dequeue(&dch->rqueue))) { 30 if (likely(dch->dev.D.peer)) { 31 err = dch->dev.D.recv(dch->dev.D.peer, skb); 32 if (err) 33 dev_kfree_skb(skb); 34 } else 35 dev_kfree_skb(skb); 36 } 37 } 38 if (test_and_clear_bit(FLG_PHCHANGE, &dch->Flags)) { 39 if (dch->phfunc) 40 dch->phfunc(dch); 41 } 42 } 43 44 static void 45 bchannel_bh(struct work_struct *ws) 46 { 47 struct bchannel *bch = container_of(ws, struct bchannel, workq); 48 struct sk_buff *skb; 49 int err; 50 51 if (test_and_clear_bit(FLG_RECVQUEUE, &bch->Flags)) { 52 while ((skb = skb_dequeue(&bch->rqueue))) { 53 bch->rcount--; 54 if (likely(bch->ch.peer)) { 55 err = bch->ch.recv(bch->ch.peer, skb); 56 if (err) 57 dev_kfree_skb(skb); 58 } else 59 dev_kfree_skb(skb); 60 } 61 } 62 } 63 64 int 65 mISDN_initdchannel(struct dchannel *ch, int maxlen, void *phf) 66 { 67 test_and_set_bit(FLG_HDLC, &ch->Flags); 68 ch->maxlen = maxlen; 69 ch->hw = NULL; 70 ch->rx_skb = NULL; 71 ch->tx_skb = NULL; 72 ch->tx_idx = 0; 73 ch->phfunc = phf; 74 skb_queue_head_init(&ch->squeue); 75 skb_queue_head_init(&ch->rqueue); 76 INIT_LIST_HEAD(&ch->dev.bchannels); 77 INIT_WORK(&ch->workq, dchannel_bh); 78 return 0; 79 } 80 EXPORT_SYMBOL(mISDN_initdchannel); 81 82 int 83 mISDN_initbchannel(struct bchannel *ch, int maxlen) 84 { 85 ch->Flags = 0; 86 ch->maxlen = maxlen; 87 ch->hw = NULL; 88 ch->rx_skb = NULL; 89 ch->tx_skb = NULL; 90 ch->tx_idx = 0; 91 skb_queue_head_init(&ch->rqueue); 92 ch->rcount = 0; 93 ch->next_skb = NULL; 94 INIT_WORK(&ch->workq, bchannel_bh); 95 return 0; 96 } 97 EXPORT_SYMBOL(mISDN_initbchannel); 98 99 int 100 mISDN_freedchannel(struct dchannel *ch) 101 { 102 if (ch->tx_skb) { 103 dev_kfree_skb(ch->tx_skb); 104 ch->tx_skb = NULL; 105 } 106 if (ch->rx_skb) { 107 dev_kfree_skb(ch->rx_skb); 108 ch->rx_skb = NULL; 109 } 110 skb_queue_purge(&ch->squeue); 111 skb_queue_purge(&ch->rqueue); 112 flush_scheduled_work(); 113 return 0; 114 } 115 EXPORT_SYMBOL(mISDN_freedchannel); 116 117 int 118 mISDN_freebchannel(struct bchannel *ch) 119 { 120 if (ch->tx_skb) { 121 dev_kfree_skb(ch->tx_skb); 122 ch->tx_skb = NULL; 123 } 124 if (ch->rx_skb) { 125 dev_kfree_skb(ch->rx_skb); 126 ch->rx_skb = NULL; 127 } 128 if (ch->next_skb) { 129 dev_kfree_skb(ch->next_skb); 130 ch->next_skb = NULL; 131 } 132 skb_queue_purge(&ch->rqueue); 133 ch->rcount = 0; 134 flush_scheduled_work(); 135 return 0; 136 } 137 EXPORT_SYMBOL(mISDN_freebchannel); 138 139 static inline u_int 140 get_sapi_tei(u_char *p) 141 { 142 u_int sapi, tei; 143 144 sapi = *p >> 2; 145 tei = p[1] >> 1; 146 return sapi | (tei << 8); 147 } 148 149 void 150 recv_Dchannel(struct dchannel *dch) 151 { 152 struct mISDNhead *hh; 153 154 if (dch->rx_skb->len < 2) { /* at least 2 for sapi / tei */ 155 dev_kfree_skb(dch->rx_skb); 156 dch->rx_skb = NULL; 157 return; 158 } 159 hh = mISDN_HEAD_P(dch->rx_skb); 160 hh->prim = PH_DATA_IND; 161 hh->id = get_sapi_tei(dch->rx_skb->data); 162 skb_queue_tail(&dch->rqueue, dch->rx_skb); 163 dch->rx_skb = NULL; 164 schedule_event(dch, FLG_RECVQUEUE); 165 } 166 EXPORT_SYMBOL(recv_Dchannel); 167 168 void 169 recv_Echannel(struct dchannel *ech, struct dchannel *dch) 170 { 171 struct mISDNhead *hh; 172 173 if (ech->rx_skb->len < 2) { /* at least 2 for sapi / tei */ 174 dev_kfree_skb(ech->rx_skb); 175 ech->rx_skb = NULL; 176 return; 177 } 178 hh = mISDN_HEAD_P(ech->rx_skb); 179 hh->prim = PH_DATA_E_IND; 180 hh->id = get_sapi_tei(ech->rx_skb->data); 181 skb_queue_tail(&dch->rqueue, ech->rx_skb); 182 ech->rx_skb = NULL; 183 schedule_event(dch, FLG_RECVQUEUE); 184 } 185 EXPORT_SYMBOL(recv_Echannel); 186 187 void 188 recv_Bchannel(struct bchannel *bch, unsigned int id) 189 { 190 struct mISDNhead *hh; 191 192 hh = mISDN_HEAD_P(bch->rx_skb); 193 hh->prim = PH_DATA_IND; 194 hh->id = id; 195 if (bch->rcount >= 64) { 196 printk(KERN_WARNING "B-channel %p receive queue overflow, " 197 "fushing!\n", bch); 198 skb_queue_purge(&bch->rqueue); 199 bch->rcount = 0; 200 return; 201 } 202 bch->rcount++; 203 skb_queue_tail(&bch->rqueue, bch->rx_skb); 204 bch->rx_skb = NULL; 205 schedule_event(bch, FLG_RECVQUEUE); 206 } 207 EXPORT_SYMBOL(recv_Bchannel); 208 209 void 210 recv_Dchannel_skb(struct dchannel *dch, struct sk_buff *skb) 211 { 212 skb_queue_tail(&dch->rqueue, skb); 213 schedule_event(dch, FLG_RECVQUEUE); 214 } 215 EXPORT_SYMBOL(recv_Dchannel_skb); 216 217 void 218 recv_Bchannel_skb(struct bchannel *bch, struct sk_buff *skb) 219 { 220 if (bch->rcount >= 64) { 221 printk(KERN_WARNING "B-channel %p receive queue overflow, " 222 "fushing!\n", bch); 223 skb_queue_purge(&bch->rqueue); 224 bch->rcount = 0; 225 } 226 bch->rcount++; 227 skb_queue_tail(&bch->rqueue, skb); 228 schedule_event(bch, FLG_RECVQUEUE); 229 } 230 EXPORT_SYMBOL(recv_Bchannel_skb); 231 232 static void 233 confirm_Dsend(struct dchannel *dch) 234 { 235 struct sk_buff *skb; 236 237 skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(dch->tx_skb), 238 0, NULL, GFP_ATOMIC); 239 if (!skb) { 240 printk(KERN_ERR "%s: no skb id %x\n", __func__, 241 mISDN_HEAD_ID(dch->tx_skb)); 242 return; 243 } 244 skb_queue_tail(&dch->rqueue, skb); 245 schedule_event(dch, FLG_RECVQUEUE); 246 } 247 248 int 249 get_next_dframe(struct dchannel *dch) 250 { 251 dch->tx_idx = 0; 252 dch->tx_skb = skb_dequeue(&dch->squeue); 253 if (dch->tx_skb) { 254 confirm_Dsend(dch); 255 return 1; 256 } 257 dch->tx_skb = NULL; 258 test_and_clear_bit(FLG_TX_BUSY, &dch->Flags); 259 return 0; 260 } 261 EXPORT_SYMBOL(get_next_dframe); 262 263 void 264 confirm_Bsend(struct bchannel *bch) 265 { 266 struct sk_buff *skb; 267 268 if (bch->rcount >= 64) { 269 printk(KERN_WARNING "B-channel %p receive queue overflow, " 270 "fushing!\n", bch); 271 skb_queue_purge(&bch->rqueue); 272 bch->rcount = 0; 273 } 274 skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(bch->tx_skb), 275 0, NULL, GFP_ATOMIC); 276 if (!skb) { 277 printk(KERN_ERR "%s: no skb id %x\n", __func__, 278 mISDN_HEAD_ID(bch->tx_skb)); 279 return; 280 } 281 bch->rcount++; 282 skb_queue_tail(&bch->rqueue, skb); 283 schedule_event(bch, FLG_RECVQUEUE); 284 } 285 EXPORT_SYMBOL(confirm_Bsend); 286 287 int 288 get_next_bframe(struct bchannel *bch) 289 { 290 bch->tx_idx = 0; 291 if (test_bit(FLG_TX_NEXT, &bch->Flags)) { 292 bch->tx_skb = bch->next_skb; 293 if (bch->tx_skb) { 294 bch->next_skb = NULL; 295 test_and_clear_bit(FLG_TX_NEXT, &bch->Flags); 296 if (!test_bit(FLG_TRANSPARENT, &bch->Flags)) 297 confirm_Bsend(bch); /* not for transparent */ 298 return 1; 299 } else { 300 test_and_clear_bit(FLG_TX_NEXT, &bch->Flags); 301 printk(KERN_WARNING "B TX_NEXT without skb\n"); 302 } 303 } 304 bch->tx_skb = NULL; 305 test_and_clear_bit(FLG_TX_BUSY, &bch->Flags); 306 return 0; 307 } 308 EXPORT_SYMBOL(get_next_bframe); 309 310 void 311 queue_ch_frame(struct mISDNchannel *ch, u_int pr, int id, struct sk_buff *skb) 312 { 313 struct mISDNhead *hh; 314 315 if (!skb) { 316 _queue_data(ch, pr, id, 0, NULL, GFP_ATOMIC); 317 } else { 318 if (ch->peer) { 319 hh = mISDN_HEAD_P(skb); 320 hh->prim = pr; 321 hh->id = id; 322 if (!ch->recv(ch->peer, skb)) 323 return; 324 } 325 dev_kfree_skb(skb); 326 } 327 } 328 EXPORT_SYMBOL(queue_ch_frame); 329 330 int 331 dchannel_senddata(struct dchannel *ch, struct sk_buff *skb) 332 { 333 /* check oversize */ 334 if (skb->len <= 0) { 335 printk(KERN_WARNING "%s: skb too small\n", __func__); 336 return -EINVAL; 337 } 338 if (skb->len > ch->maxlen) { 339 printk(KERN_WARNING "%s: skb too large(%d/%d)\n", 340 __func__, skb->len, ch->maxlen); 341 return -EINVAL; 342 } 343 /* HW lock must be obtained */ 344 if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) { 345 skb_queue_tail(&ch->squeue, skb); 346 return 0; 347 } else { 348 /* write to fifo */ 349 ch->tx_skb = skb; 350 ch->tx_idx = 0; 351 return 1; 352 } 353 } 354 EXPORT_SYMBOL(dchannel_senddata); 355 356 int 357 bchannel_senddata(struct bchannel *ch, struct sk_buff *skb) 358 { 359 360 /* check oversize */ 361 if (skb->len <= 0) { 362 printk(KERN_WARNING "%s: skb too small\n", __func__); 363 return -EINVAL; 364 } 365 if (skb->len > ch->maxlen) { 366 printk(KERN_WARNING "%s: skb too large(%d/%d)\n", 367 __func__, skb->len, ch->maxlen); 368 return -EINVAL; 369 } 370 /* HW lock must be obtained */ 371 /* check for pending next_skb */ 372 if (ch->next_skb) { 373 printk(KERN_WARNING 374 "%s: next_skb exist ERROR (skb->len=%d next_skb->len=%d)\n", 375 __func__, skb->len, ch->next_skb->len); 376 return -EBUSY; 377 } 378 if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) { 379 test_and_set_bit(FLG_TX_NEXT, &ch->Flags); 380 ch->next_skb = skb; 381 return 0; 382 } else { 383 /* write to fifo */ 384 ch->tx_skb = skb; 385 ch->tx_idx = 0; 386 return 1; 387 } 388 } 389 EXPORT_SYMBOL(bchannel_senddata); 390