1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 *
4 * Author Karsten Keil <kkeil@novell.com>
5 *
6 * Copyright 2008 by Karsten Keil <kkeil@novell.com>
7 */
8
9 #include <linux/mISDNif.h>
10 #include <linux/slab.h>
11 #include "core.h"
12 #include "fsm.h"
13 #include "layer2.h"
14
15 static u_int *debug;
16
17 static
18 struct Fsm l2fsm = {NULL, 0, 0, NULL, NULL};
19
20 static char *strL2State[] =
21 {
22 "ST_L2_1",
23 "ST_L2_2",
24 "ST_L2_3",
25 "ST_L2_4",
26 "ST_L2_5",
27 "ST_L2_6",
28 "ST_L2_7",
29 "ST_L2_8",
30 };
31
32 enum {
33 EV_L2_UI,
34 EV_L2_SABME,
35 EV_L2_DISC,
36 EV_L2_DM,
37 EV_L2_UA,
38 EV_L2_FRMR,
39 EV_L2_SUPER,
40 EV_L2_I,
41 EV_L2_DL_DATA,
42 EV_L2_ACK_PULL,
43 EV_L2_DL_UNITDATA,
44 EV_L2_DL_ESTABLISH_REQ,
45 EV_L2_DL_RELEASE_REQ,
46 EV_L2_MDL_ASSIGN,
47 EV_L2_MDL_REMOVE,
48 EV_L2_MDL_ERROR,
49 EV_L1_DEACTIVATE,
50 EV_L2_T200,
51 EV_L2_T203,
52 EV_L2_T200I,
53 EV_L2_T203I,
54 EV_L2_SET_OWN_BUSY,
55 EV_L2_CLEAR_OWN_BUSY,
56 EV_L2_FRAME_ERROR,
57 };
58
59 #define L2_EVENT_COUNT (EV_L2_FRAME_ERROR + 1)
60
61 static char *strL2Event[] =
62 {
63 "EV_L2_UI",
64 "EV_L2_SABME",
65 "EV_L2_DISC",
66 "EV_L2_DM",
67 "EV_L2_UA",
68 "EV_L2_FRMR",
69 "EV_L2_SUPER",
70 "EV_L2_I",
71 "EV_L2_DL_DATA",
72 "EV_L2_ACK_PULL",
73 "EV_L2_DL_UNITDATA",
74 "EV_L2_DL_ESTABLISH_REQ",
75 "EV_L2_DL_RELEASE_REQ",
76 "EV_L2_MDL_ASSIGN",
77 "EV_L2_MDL_REMOVE",
78 "EV_L2_MDL_ERROR",
79 "EV_L1_DEACTIVATE",
80 "EV_L2_T200",
81 "EV_L2_T203",
82 "EV_L2_T200I",
83 "EV_L2_T203I",
84 "EV_L2_SET_OWN_BUSY",
85 "EV_L2_CLEAR_OWN_BUSY",
86 "EV_L2_FRAME_ERROR",
87 };
88
89 static void
l2m_debug(struct FsmInst * fi,char * fmt,...)90 l2m_debug(struct FsmInst *fi, char *fmt, ...)
91 {
92 struct layer2 *l2 = fi->userdata;
93 struct va_format vaf;
94 va_list va;
95
96 if (!(*debug & DEBUG_L2_FSM))
97 return;
98
99 va_start(va, fmt);
100
101 vaf.fmt = fmt;
102 vaf.va = &va;
103
104 printk(KERN_DEBUG "%s l2 (sapi %d tei %d): %pV\n",
105 mISDNDevName4ch(&l2->ch), l2->sapi, l2->tei, &vaf);
106
107 va_end(va);
108 }
109
110 inline u_int
l2headersize(struct layer2 * l2,int ui)111 l2headersize(struct layer2 *l2, int ui)
112 {
113 return ((test_bit(FLG_MOD128, &l2->flag) && (!ui)) ? 2 : 1) +
114 (test_bit(FLG_LAPD, &l2->flag) ? 2 : 1);
115 }
116
117 inline u_int
l2addrsize(struct layer2 * l2)118 l2addrsize(struct layer2 *l2)
119 {
120 return test_bit(FLG_LAPD, &l2->flag) ? 2 : 1;
121 }
122
123 static u_int
l2_newid(struct layer2 * l2)124 l2_newid(struct layer2 *l2)
125 {
126 u_int id;
127
128 id = l2->next_id++;
129 if (id == 0x7fff)
130 l2->next_id = 1;
131 id <<= 16;
132 id |= l2->tei << 8;
133 id |= l2->sapi;
134 return id;
135 }
136
137 static void
l2up(struct layer2 * l2,u_int prim,struct sk_buff * skb)138 l2up(struct layer2 *l2, u_int prim, struct sk_buff *skb)
139 {
140 int err;
141
142 if (!l2->up)
143 return;
144 mISDN_HEAD_PRIM(skb) = prim;
145 mISDN_HEAD_ID(skb) = (l2->ch.nr << 16) | l2->ch.addr;
146 err = l2->up->send(l2->up, skb);
147 if (err) {
148 printk(KERN_WARNING "%s: dev %s err=%d\n", __func__,
149 mISDNDevName4ch(&l2->ch), err);
150 dev_kfree_skb(skb);
151 }
152 }
153
154 static void
l2up_create(struct layer2 * l2,u_int prim,int len,void * arg)155 l2up_create(struct layer2 *l2, u_int prim, int len, void *arg)
156 {
157 struct sk_buff *skb;
158 struct mISDNhead *hh;
159 int err;
160
161 if (!l2->up)
162 return;
163 skb = mI_alloc_skb(len, GFP_ATOMIC);
164 if (!skb)
165 return;
166 hh = mISDN_HEAD_P(skb);
167 hh->prim = prim;
168 hh->id = (l2->ch.nr << 16) | l2->ch.addr;
169 if (len)
170 skb_put_data(skb, arg, len);
171 err = l2->up->send(l2->up, skb);
172 if (err) {
173 printk(KERN_WARNING "%s: dev %s err=%d\n", __func__,
174 mISDNDevName4ch(&l2->ch), err);
175 dev_kfree_skb(skb);
176 }
177 }
178
179 static int
l2down_skb(struct layer2 * l2,struct sk_buff * skb)180 l2down_skb(struct layer2 *l2, struct sk_buff *skb) {
181 int ret;
182
183 ret = l2->ch.recv(l2->ch.peer, skb);
184 if (ret && (*debug & DEBUG_L2_RECV))
185 printk(KERN_DEBUG "l2down_skb: dev %s ret(%d)\n",
186 mISDNDevName4ch(&l2->ch), ret);
187 return ret;
188 }
189
190 static int
l2down_raw(struct layer2 * l2,struct sk_buff * skb)191 l2down_raw(struct layer2 *l2, struct sk_buff *skb)
192 {
193 struct mISDNhead *hh = mISDN_HEAD_P(skb);
194
195 if (hh->prim == PH_DATA_REQ) {
196 if (test_and_set_bit(FLG_L1_NOTREADY, &l2->flag)) {
197 skb_queue_tail(&l2->down_queue, skb);
198 return 0;
199 }
200 l2->down_id = mISDN_HEAD_ID(skb);
201 }
202 return l2down_skb(l2, skb);
203 }
204
205 static int
l2down(struct layer2 * l2,u_int prim,u_int id,struct sk_buff * skb)206 l2down(struct layer2 *l2, u_int prim, u_int id, struct sk_buff *skb)
207 {
208 struct mISDNhead *hh = mISDN_HEAD_P(skb);
209
210 hh->prim = prim;
211 hh->id = id;
212 return l2down_raw(l2, skb);
213 }
214
215 static int
l2down_create(struct layer2 * l2,u_int prim,u_int id,int len,void * arg)216 l2down_create(struct layer2 *l2, u_int prim, u_int id, int len, void *arg)
217 {
218 struct sk_buff *skb;
219 int err;
220 struct mISDNhead *hh;
221
222 skb = mI_alloc_skb(len, GFP_ATOMIC);
223 if (!skb)
224 return -ENOMEM;
225 hh = mISDN_HEAD_P(skb);
226 hh->prim = prim;
227 hh->id = id;
228 if (len)
229 skb_put_data(skb, arg, len);
230 err = l2down_raw(l2, skb);
231 if (err)
232 dev_kfree_skb(skb);
233 return err;
234 }
235
236 static int
ph_data_confirm(struct layer2 * l2,struct mISDNhead * hh,struct sk_buff * skb)237 ph_data_confirm(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb) {
238 struct sk_buff *nskb = skb;
239 int ret = -EAGAIN;
240
241 if (test_bit(FLG_L1_NOTREADY, &l2->flag)) {
242 if (hh->id == l2->down_id) {
243 nskb = skb_dequeue(&l2->down_queue);
244 if (nskb) {
245 l2->down_id = mISDN_HEAD_ID(nskb);
246 if (l2down_skb(l2, nskb)) {
247 dev_kfree_skb(nskb);
248 l2->down_id = MISDN_ID_NONE;
249 }
250 } else
251 l2->down_id = MISDN_ID_NONE;
252 if (ret) {
253 dev_kfree_skb(skb);
254 ret = 0;
255 }
256 if (l2->down_id == MISDN_ID_NONE) {
257 test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
258 mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL);
259 }
260 }
261 }
262 if (!test_and_set_bit(FLG_L1_NOTREADY, &l2->flag)) {
263 nskb = skb_dequeue(&l2->down_queue);
264 if (nskb) {
265 l2->down_id = mISDN_HEAD_ID(nskb);
266 if (l2down_skb(l2, nskb)) {
267 dev_kfree_skb(nskb);
268 l2->down_id = MISDN_ID_NONE;
269 test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
270 }
271 } else
272 test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
273 }
274 return ret;
275 }
276
277 static void
l2_timeout(struct FsmInst * fi,int event,void * arg)278 l2_timeout(struct FsmInst *fi, int event, void *arg)
279 {
280 struct layer2 *l2 = fi->userdata;
281 struct sk_buff *skb;
282 struct mISDNhead *hh;
283
284 skb = mI_alloc_skb(0, GFP_ATOMIC);
285 if (!skb) {
286 printk(KERN_WARNING "%s: L2(%d,%d) nr:%x timer %s no skb\n",
287 mISDNDevName4ch(&l2->ch), l2->sapi, l2->tei,
288 l2->ch.nr, event == EV_L2_T200 ? "T200" : "T203");
289 return;
290 }
291 hh = mISDN_HEAD_P(skb);
292 hh->prim = event == EV_L2_T200 ? DL_TIMER200_IND : DL_TIMER203_IND;
293 hh->id = l2->ch.nr;
294 if (*debug & DEBUG_TIMER)
295 printk(KERN_DEBUG "%s: L2(%d,%d) nr:%x timer %s expired\n",
296 mISDNDevName4ch(&l2->ch), l2->sapi, l2->tei,
297 l2->ch.nr, event == EV_L2_T200 ? "T200" : "T203");
298 if (l2->ch.st)
299 l2->ch.st->own.recv(&l2->ch.st->own, skb);
300 }
301
302 static int
l2mgr(struct layer2 * l2,u_int prim,void * arg)303 l2mgr(struct layer2 *l2, u_int prim, void *arg) {
304 long c = (long)arg;
305
306 printk(KERN_WARNING "l2mgr: dev %s addr:%x prim %x %c\n",
307 mISDNDevName4ch(&l2->ch), l2->id, prim, (char)c);
308 if (test_bit(FLG_LAPD, &l2->flag) &&
309 !test_bit(FLG_FIXED_TEI, &l2->flag)) {
310 switch (c) {
311 case 'C':
312 case 'D':
313 case 'G':
314 case 'H':
315 l2_tei(l2, prim, (u_long)arg);
316 break;
317 }
318 }
319 return 0;
320 }
321
322 static void
set_peer_busy(struct layer2 * l2)323 set_peer_busy(struct layer2 *l2) {
324 test_and_set_bit(FLG_PEER_BUSY, &l2->flag);
325 if (skb_queue_len(&l2->i_queue) || skb_queue_len(&l2->ui_queue))
326 test_and_set_bit(FLG_L2BLOCK, &l2->flag);
327 }
328
329 static void
clear_peer_busy(struct layer2 * l2)330 clear_peer_busy(struct layer2 *l2) {
331 if (test_and_clear_bit(FLG_PEER_BUSY, &l2->flag))
332 test_and_clear_bit(FLG_L2BLOCK, &l2->flag);
333 }
334
335 static void
InitWin(struct layer2 * l2)336 InitWin(struct layer2 *l2)
337 {
338 int i;
339
340 for (i = 0; i < MAX_WINDOW; i++)
341 l2->windowar[i] = NULL;
342 }
343
344 static int
freewin(struct layer2 * l2)345 freewin(struct layer2 *l2)
346 {
347 int i, cnt = 0;
348
349 for (i = 0; i < MAX_WINDOW; i++) {
350 if (l2->windowar[i]) {
351 cnt++;
352 dev_kfree_skb(l2->windowar[i]);
353 l2->windowar[i] = NULL;
354 }
355 }
356 return cnt;
357 }
358
359 static void
ReleaseWin(struct layer2 * l2)360 ReleaseWin(struct layer2 *l2)
361 {
362 int cnt = freewin(l2);
363
364 if (cnt)
365 printk(KERN_WARNING
366 "isdnl2 freed %d skbuffs in release\n", cnt);
367 }
368
369 inline unsigned int
cansend(struct layer2 * l2)370 cansend(struct layer2 *l2)
371 {
372 unsigned int p1;
373
374 if (test_bit(FLG_MOD128, &l2->flag))
375 p1 = (l2->vs - l2->va) % 128;
376 else
377 p1 = (l2->vs - l2->va) % 8;
378 return (p1 < l2->window) && !test_bit(FLG_PEER_BUSY, &l2->flag);
379 }
380
381 inline void
clear_exception(struct layer2 * l2)382 clear_exception(struct layer2 *l2)
383 {
384 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
385 test_and_clear_bit(FLG_REJEXC, &l2->flag);
386 test_and_clear_bit(FLG_OWN_BUSY, &l2->flag);
387 clear_peer_busy(l2);
388 }
389
390 static int
sethdraddr(struct layer2 * l2,u_char * header,int rsp)391 sethdraddr(struct layer2 *l2, u_char *header, int rsp)
392 {
393 u_char *ptr = header;
394 int crbit = rsp;
395
396 if (test_bit(FLG_LAPD, &l2->flag)) {
397 if (test_bit(FLG_LAPD_NET, &l2->flag))
398 crbit = !crbit;
399 *ptr++ = (l2->sapi << 2) | (crbit ? 2 : 0);
400 *ptr++ = (l2->tei << 1) | 1;
401 return 2;
402 } else {
403 if (test_bit(FLG_ORIG, &l2->flag))
404 crbit = !crbit;
405 if (crbit)
406 *ptr++ = l2->addr.B;
407 else
408 *ptr++ = l2->addr.A;
409 return 1;
410 }
411 }
412
413 static inline void
enqueue_super(struct layer2 * l2,struct sk_buff * skb)414 enqueue_super(struct layer2 *l2, struct sk_buff *skb)
415 {
416 if (l2down(l2, PH_DATA_REQ, l2_newid(l2), skb))
417 dev_kfree_skb(skb);
418 }
419
420 static inline void
enqueue_ui(struct layer2 * l2,struct sk_buff * skb)421 enqueue_ui(struct layer2 *l2, struct sk_buff *skb)
422 {
423 if (l2->tm)
424 l2_tei(l2, MDL_STATUS_UI_IND, 0);
425 if (l2down(l2, PH_DATA_REQ, l2_newid(l2), skb))
426 dev_kfree_skb(skb);
427 }
428
429 inline int
IsUI(u_char * data)430 IsUI(u_char *data)
431 {
432 return (data[0] & 0xef) == UI;
433 }
434
435 inline int
IsUA(u_char * data)436 IsUA(u_char *data)
437 {
438 return (data[0] & 0xef) == UA;
439 }
440
441 inline int
IsDM(u_char * data)442 IsDM(u_char *data)
443 {
444 return (data[0] & 0xef) == DM;
445 }
446
447 inline int
IsDISC(u_char * data)448 IsDISC(u_char *data)
449 {
450 return (data[0] & 0xef) == DISC;
451 }
452
453 inline int
IsRR(u_char * data,struct layer2 * l2)454 IsRR(u_char *data, struct layer2 *l2)
455 {
456 if (test_bit(FLG_MOD128, &l2->flag))
457 return data[0] == RR;
458 else
459 return (data[0] & 0xf) == 1;
460 }
461
462 inline int
IsSFrame(u_char * data,struct layer2 * l2)463 IsSFrame(u_char *data, struct layer2 *l2)
464 {
465 register u_char d = *data;
466
467 if (!test_bit(FLG_MOD128, &l2->flag))
468 d &= 0xf;
469 return ((d & 0xf3) == 1) && ((d & 0x0c) != 0x0c);
470 }
471
472 inline int
IsSABME(u_char * data,struct layer2 * l2)473 IsSABME(u_char *data, struct layer2 *l2)
474 {
475 u_char d = data[0] & ~0x10;
476
477 return test_bit(FLG_MOD128, &l2->flag) ? d == SABME : d == SABM;
478 }
479
480 inline int
IsREJ(u_char * data,struct layer2 * l2)481 IsREJ(u_char *data, struct layer2 *l2)
482 {
483 return test_bit(FLG_MOD128, &l2->flag) ?
484 data[0] == REJ : (data[0] & 0xf) == REJ;
485 }
486
487 inline int
IsFRMR(u_char * data)488 IsFRMR(u_char *data)
489 {
490 return (data[0] & 0xef) == FRMR;
491 }
492
493 inline int
IsRNR(u_char * data,struct layer2 * l2)494 IsRNR(u_char *data, struct layer2 *l2)
495 {
496 return test_bit(FLG_MOD128, &l2->flag) ?
497 data[0] == RNR : (data[0] & 0xf) == RNR;
498 }
499
500 static int
iframe_error(struct layer2 * l2,struct sk_buff * skb)501 iframe_error(struct layer2 *l2, struct sk_buff *skb)
502 {
503 u_int i;
504 int rsp = *skb->data & 0x2;
505
506 i = l2addrsize(l2) + (test_bit(FLG_MOD128, &l2->flag) ? 2 : 1);
507 if (test_bit(FLG_ORIG, &l2->flag))
508 rsp = !rsp;
509 if (rsp)
510 return 'L';
511 if (skb->len < i)
512 return 'N';
513 if ((skb->len - i) > l2->maxlen)
514 return 'O';
515 return 0;
516 }
517
518 static int
super_error(struct layer2 * l2,struct sk_buff * skb)519 super_error(struct layer2 *l2, struct sk_buff *skb)
520 {
521 if (skb->len != l2addrsize(l2) +
522 (test_bit(FLG_MOD128, &l2->flag) ? 2 : 1))
523 return 'N';
524 return 0;
525 }
526
527 static int
unnum_error(struct layer2 * l2,struct sk_buff * skb,int wantrsp)528 unnum_error(struct layer2 *l2, struct sk_buff *skb, int wantrsp)
529 {
530 int rsp = (*skb->data & 0x2) >> 1;
531 if (test_bit(FLG_ORIG, &l2->flag))
532 rsp = !rsp;
533 if (rsp != wantrsp)
534 return 'L';
535 if (skb->len != l2addrsize(l2) + 1)
536 return 'N';
537 return 0;
538 }
539
540 static int
UI_error(struct layer2 * l2,struct sk_buff * skb)541 UI_error(struct layer2 *l2, struct sk_buff *skb)
542 {
543 int rsp = *skb->data & 0x2;
544 if (test_bit(FLG_ORIG, &l2->flag))
545 rsp = !rsp;
546 if (rsp)
547 return 'L';
548 if (skb->len > l2->maxlen + l2addrsize(l2) + 1)
549 return 'O';
550 return 0;
551 }
552
553 static int
FRMR_error(struct layer2 * l2,struct sk_buff * skb)554 FRMR_error(struct layer2 *l2, struct sk_buff *skb)
555 {
556 u_int headers = l2addrsize(l2) + 1;
557 u_char *datap = skb->data + headers;
558 int rsp = *skb->data & 0x2;
559
560 if (test_bit(FLG_ORIG, &l2->flag))
561 rsp = !rsp;
562 if (!rsp)
563 return 'L';
564 if (test_bit(FLG_MOD128, &l2->flag)) {
565 if (skb->len < headers + 5)
566 return 'N';
567 else if (*debug & DEBUG_L2)
568 l2m_debug(&l2->l2m,
569 "FRMR information %2x %2x %2x %2x %2x",
570 datap[0], datap[1], datap[2], datap[3], datap[4]);
571 } else {
572 if (skb->len < headers + 3)
573 return 'N';
574 else if (*debug & DEBUG_L2)
575 l2m_debug(&l2->l2m,
576 "FRMR information %2x %2x %2x",
577 datap[0], datap[1], datap[2]);
578 }
579 return 0;
580 }
581
582 static unsigned int
legalnr(struct layer2 * l2,unsigned int nr)583 legalnr(struct layer2 *l2, unsigned int nr)
584 {
585 if (test_bit(FLG_MOD128, &l2->flag))
586 return ((nr - l2->va) % 128) <= ((l2->vs - l2->va) % 128);
587 else
588 return ((nr - l2->va) % 8) <= ((l2->vs - l2->va) % 8);
589 }
590
591 static void
setva(struct layer2 * l2,unsigned int nr)592 setva(struct layer2 *l2, unsigned int nr)
593 {
594 struct sk_buff *skb;
595
596 while (l2->va != nr) {
597 l2->va++;
598 if (test_bit(FLG_MOD128, &l2->flag))
599 l2->va %= 128;
600 else
601 l2->va %= 8;
602 if (l2->windowar[l2->sow]) {
603 skb_trim(l2->windowar[l2->sow], 0);
604 skb_queue_tail(&l2->tmp_queue, l2->windowar[l2->sow]);
605 l2->windowar[l2->sow] = NULL;
606 }
607 l2->sow = (l2->sow + 1) % l2->window;
608 }
609 skb = skb_dequeue(&l2->tmp_queue);
610 while (skb) {
611 dev_kfree_skb(skb);
612 skb = skb_dequeue(&l2->tmp_queue);
613 }
614 }
615
616 static void
send_uframe(struct layer2 * l2,struct sk_buff * skb,u_char cmd,u_char cr)617 send_uframe(struct layer2 *l2, struct sk_buff *skb, u_char cmd, u_char cr)
618 {
619 u_char tmp[MAX_L2HEADER_LEN];
620 int i;
621
622 i = sethdraddr(l2, tmp, cr);
623 tmp[i++] = cmd;
624 if (skb)
625 skb_trim(skb, 0);
626 else {
627 skb = mI_alloc_skb(i, GFP_ATOMIC);
628 if (!skb) {
629 printk(KERN_WARNING "%s: can't alloc skbuff in %s\n",
630 mISDNDevName4ch(&l2->ch), __func__);
631 return;
632 }
633 }
634 skb_put_data(skb, tmp, i);
635 enqueue_super(l2, skb);
636 }
637
638
639 inline u_char
get_PollFlag(struct layer2 * l2,struct sk_buff * skb)640 get_PollFlag(struct layer2 *l2, struct sk_buff *skb)
641 {
642 return skb->data[l2addrsize(l2)] & 0x10;
643 }
644
645 inline u_char
get_PollFlagFree(struct layer2 * l2,struct sk_buff * skb)646 get_PollFlagFree(struct layer2 *l2, struct sk_buff *skb)
647 {
648 u_char PF;
649
650 PF = get_PollFlag(l2, skb);
651 dev_kfree_skb(skb);
652 return PF;
653 }
654
655 inline void
start_t200(struct layer2 * l2,int i)656 start_t200(struct layer2 *l2, int i)
657 {
658 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, i);
659 test_and_set_bit(FLG_T200_RUN, &l2->flag);
660 }
661
662 inline void
restart_t200(struct layer2 * l2,int i)663 restart_t200(struct layer2 *l2, int i)
664 {
665 mISDN_FsmRestartTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, i);
666 test_and_set_bit(FLG_T200_RUN, &l2->flag);
667 }
668
669 inline void
stop_t200(struct layer2 * l2,int i)670 stop_t200(struct layer2 *l2, int i)
671 {
672 if (test_and_clear_bit(FLG_T200_RUN, &l2->flag))
673 mISDN_FsmDelTimer(&l2->t200, i);
674 }
675
676 inline void
st5_dl_release_l2l3(struct layer2 * l2)677 st5_dl_release_l2l3(struct layer2 *l2)
678 {
679 int pr;
680
681 if (test_and_clear_bit(FLG_PEND_REL, &l2->flag))
682 pr = DL_RELEASE_CNF;
683 else
684 pr = DL_RELEASE_IND;
685 l2up_create(l2, pr, 0, NULL);
686 }
687
688 inline void
lapb_dl_release_l2l3(struct layer2 * l2,int f)689 lapb_dl_release_l2l3(struct layer2 *l2, int f)
690 {
691 if (test_bit(FLG_LAPB, &l2->flag))
692 l2down_create(l2, PH_DEACTIVATE_REQ, l2_newid(l2), 0, NULL);
693 l2up_create(l2, f, 0, NULL);
694 }
695
696 static void
establishlink(struct FsmInst * fi)697 establishlink(struct FsmInst *fi)
698 {
699 struct layer2 *l2 = fi->userdata;
700 u_char cmd;
701
702 clear_exception(l2);
703 l2->rc = 0;
704 cmd = (test_bit(FLG_MOD128, &l2->flag) ? SABME : SABM) | 0x10;
705 send_uframe(l2, NULL, cmd, CMD);
706 mISDN_FsmDelTimer(&l2->t203, 1);
707 restart_t200(l2, 1);
708 test_and_clear_bit(FLG_PEND_REL, &l2->flag);
709 freewin(l2);
710 mISDN_FsmChangeState(fi, ST_L2_5);
711 }
712
713 static void
l2_mdl_error_ua(struct FsmInst * fi,int event,void * arg)714 l2_mdl_error_ua(struct FsmInst *fi, int event, void *arg)
715 {
716 struct sk_buff *skb = arg;
717 struct layer2 *l2 = fi->userdata;
718
719 if (get_PollFlagFree(l2, skb))
720 l2mgr(l2, MDL_ERROR_IND, (void *) 'C');
721 else
722 l2mgr(l2, MDL_ERROR_IND, (void *) 'D');
723
724 }
725
726 static void
l2_mdl_error_dm(struct FsmInst * fi,int event,void * arg)727 l2_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
728 {
729 struct sk_buff *skb = arg;
730 struct layer2 *l2 = fi->userdata;
731
732 if (get_PollFlagFree(l2, skb))
733 l2mgr(l2, MDL_ERROR_IND, (void *) 'B');
734 else {
735 l2mgr(l2, MDL_ERROR_IND, (void *) 'E');
736 establishlink(fi);
737 test_and_clear_bit(FLG_L3_INIT, &l2->flag);
738 }
739 }
740
741 static void
l2_st8_mdl_error_dm(struct FsmInst * fi,int event,void * arg)742 l2_st8_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
743 {
744 struct sk_buff *skb = arg;
745 struct layer2 *l2 = fi->userdata;
746
747 if (get_PollFlagFree(l2, skb))
748 l2mgr(l2, MDL_ERROR_IND, (void *) 'B');
749 else
750 l2mgr(l2, MDL_ERROR_IND, (void *) 'E');
751 establishlink(fi);
752 test_and_clear_bit(FLG_L3_INIT, &l2->flag);
753 }
754
755 static void
l2_go_st3(struct FsmInst * fi,int event,void * arg)756 l2_go_st3(struct FsmInst *fi, int event, void *arg)
757 {
758 dev_kfree_skb((struct sk_buff *)arg);
759 mISDN_FsmChangeState(fi, ST_L2_3);
760 }
761
762 static void
l2_mdl_assign(struct FsmInst * fi,int event,void * arg)763 l2_mdl_assign(struct FsmInst *fi, int event, void *arg)
764 {
765 struct layer2 *l2 = fi->userdata;
766
767 mISDN_FsmChangeState(fi, ST_L2_3);
768 dev_kfree_skb((struct sk_buff *)arg);
769 l2_tei(l2, MDL_ASSIGN_IND, 0);
770 }
771
772 static void
l2_queue_ui_assign(struct FsmInst * fi,int event,void * arg)773 l2_queue_ui_assign(struct FsmInst *fi, int event, void *arg)
774 {
775 struct layer2 *l2 = fi->userdata;
776 struct sk_buff *skb = arg;
777
778 skb_queue_tail(&l2->ui_queue, skb);
779 mISDN_FsmChangeState(fi, ST_L2_2);
780 l2_tei(l2, MDL_ASSIGN_IND, 0);
781 }
782
783 static void
l2_queue_ui(struct FsmInst * fi,int event,void * arg)784 l2_queue_ui(struct FsmInst *fi, int event, void *arg)
785 {
786 struct layer2 *l2 = fi->userdata;
787 struct sk_buff *skb = arg;
788
789 skb_queue_tail(&l2->ui_queue, skb);
790 }
791
792 static void
tx_ui(struct layer2 * l2)793 tx_ui(struct layer2 *l2)
794 {
795 struct sk_buff *skb;
796 u_char header[MAX_L2HEADER_LEN];
797 int i;
798
799 i = sethdraddr(l2, header, CMD);
800 if (test_bit(FLG_LAPD_NET, &l2->flag))
801 header[1] = 0xff; /* tei 127 */
802 header[i++] = UI;
803 while ((skb = skb_dequeue(&l2->ui_queue))) {
804 memcpy(skb_push(skb, i), header, i);
805 enqueue_ui(l2, skb);
806 }
807 }
808
809 static void
l2_send_ui(struct FsmInst * fi,int event,void * arg)810 l2_send_ui(struct FsmInst *fi, int event, void *arg)
811 {
812 struct layer2 *l2 = fi->userdata;
813 struct sk_buff *skb = arg;
814
815 skb_queue_tail(&l2->ui_queue, skb);
816 tx_ui(l2);
817 }
818
819 static void
l2_got_ui(struct FsmInst * fi,int event,void * arg)820 l2_got_ui(struct FsmInst *fi, int event, void *arg)
821 {
822 struct layer2 *l2 = fi->userdata;
823 struct sk_buff *skb = arg;
824
825 skb_pull(skb, l2headersize(l2, 1));
826 /*
827 * in states 1-3 for broadcast
828 */
829
830 if (l2->tm)
831 l2_tei(l2, MDL_STATUS_UI_IND, 0);
832 l2up(l2, DL_UNITDATA_IND, skb);
833 }
834
835 static void
l2_establish(struct FsmInst * fi,int event,void * arg)836 l2_establish(struct FsmInst *fi, int event, void *arg)
837 {
838 struct sk_buff *skb = arg;
839 struct layer2 *l2 = fi->userdata;
840
841 establishlink(fi);
842 test_and_set_bit(FLG_L3_INIT, &l2->flag);
843 dev_kfree_skb(skb);
844 }
845
846 static void
l2_discard_i_setl3(struct FsmInst * fi,int event,void * arg)847 l2_discard_i_setl3(struct FsmInst *fi, int event, void *arg)
848 {
849 struct sk_buff *skb = arg;
850 struct layer2 *l2 = fi->userdata;
851
852 skb_queue_purge(&l2->i_queue);
853 test_and_set_bit(FLG_L3_INIT, &l2->flag);
854 test_and_clear_bit(FLG_PEND_REL, &l2->flag);
855 dev_kfree_skb(skb);
856 }
857
858 static void
l2_l3_reestablish(struct FsmInst * fi,int event,void * arg)859 l2_l3_reestablish(struct FsmInst *fi, int event, void *arg)
860 {
861 struct sk_buff *skb = arg;
862 struct layer2 *l2 = fi->userdata;
863
864 skb_queue_purge(&l2->i_queue);
865 establishlink(fi);
866 test_and_set_bit(FLG_L3_INIT, &l2->flag);
867 dev_kfree_skb(skb);
868 }
869
870 static void
l2_release(struct FsmInst * fi,int event,void * arg)871 l2_release(struct FsmInst *fi, int event, void *arg)
872 {
873 struct layer2 *l2 = fi->userdata;
874 struct sk_buff *skb = arg;
875
876 skb_trim(skb, 0);
877 l2up(l2, DL_RELEASE_CNF, skb);
878 }
879
880 static void
l2_pend_rel(struct FsmInst * fi,int event,void * arg)881 l2_pend_rel(struct FsmInst *fi, int event, void *arg)
882 {
883 struct sk_buff *skb = arg;
884 struct layer2 *l2 = fi->userdata;
885
886 test_and_set_bit(FLG_PEND_REL, &l2->flag);
887 dev_kfree_skb(skb);
888 }
889
890 static void
l2_disconnect(struct FsmInst * fi,int event,void * arg)891 l2_disconnect(struct FsmInst *fi, int event, void *arg)
892 {
893 struct layer2 *l2 = fi->userdata;
894 struct sk_buff *skb = arg;
895
896 skb_queue_purge(&l2->i_queue);
897 freewin(l2);
898 mISDN_FsmChangeState(fi, ST_L2_6);
899 l2->rc = 0;
900 send_uframe(l2, NULL, DISC | 0x10, CMD);
901 mISDN_FsmDelTimer(&l2->t203, 1);
902 restart_t200(l2, 2);
903 dev_kfree_skb(skb);
904 }
905
906 static void
l2_start_multi(struct FsmInst * fi,int event,void * arg)907 l2_start_multi(struct FsmInst *fi, int event, void *arg)
908 {
909 struct layer2 *l2 = fi->userdata;
910 struct sk_buff *skb = arg;
911
912 l2->vs = 0;
913 l2->va = 0;
914 l2->vr = 0;
915 l2->sow = 0;
916 clear_exception(l2);
917 send_uframe(l2, NULL, UA | get_PollFlag(l2, skb), RSP);
918 mISDN_FsmChangeState(fi, ST_L2_7);
919 mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 3);
920 skb_trim(skb, 0);
921 l2up(l2, DL_ESTABLISH_IND, skb);
922 if (l2->tm)
923 l2_tei(l2, MDL_STATUS_UP_IND, 0);
924 }
925
926 static void
l2_send_UA(struct FsmInst * fi,int event,void * arg)927 l2_send_UA(struct FsmInst *fi, int event, void *arg)
928 {
929 struct layer2 *l2 = fi->userdata;
930 struct sk_buff *skb = arg;
931
932 send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
933 }
934
935 static void
l2_send_DM(struct FsmInst * fi,int event,void * arg)936 l2_send_DM(struct FsmInst *fi, int event, void *arg)
937 {
938 struct layer2 *l2 = fi->userdata;
939 struct sk_buff *skb = arg;
940
941 send_uframe(l2, skb, DM | get_PollFlag(l2, skb), RSP);
942 }
943
944 static void
l2_restart_multi(struct FsmInst * fi,int event,void * arg)945 l2_restart_multi(struct FsmInst *fi, int event, void *arg)
946 {
947 struct layer2 *l2 = fi->userdata;
948 struct sk_buff *skb = arg;
949 int est = 0;
950
951 send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
952
953 l2mgr(l2, MDL_ERROR_IND, (void *) 'F');
954
955 if (l2->vs != l2->va) {
956 skb_queue_purge(&l2->i_queue);
957 est = 1;
958 }
959
960 clear_exception(l2);
961 l2->vs = 0;
962 l2->va = 0;
963 l2->vr = 0;
964 l2->sow = 0;
965 mISDN_FsmChangeState(fi, ST_L2_7);
966 stop_t200(l2, 3);
967 mISDN_FsmRestartTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 3);
968
969 if (est)
970 l2up_create(l2, DL_ESTABLISH_IND, 0, NULL);
971 /* mISDN_queue_data(&l2->inst, l2->inst.id | MSG_BROADCAST,
972 * MGR_SHORTSTATUS | INDICATION, SSTATUS_L2_ESTABLISHED,
973 * 0, NULL, 0);
974 */
975 if (skb_queue_len(&l2->i_queue) && cansend(l2))
976 mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
977 }
978
979 static void
l2_stop_multi(struct FsmInst * fi,int event,void * arg)980 l2_stop_multi(struct FsmInst *fi, int event, void *arg)
981 {
982 struct layer2 *l2 = fi->userdata;
983 struct sk_buff *skb = arg;
984
985 mISDN_FsmChangeState(fi, ST_L2_4);
986 mISDN_FsmDelTimer(&l2->t203, 3);
987 stop_t200(l2, 4);
988
989 send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
990 skb_queue_purge(&l2->i_queue);
991 freewin(l2);
992 lapb_dl_release_l2l3(l2, DL_RELEASE_IND);
993 if (l2->tm)
994 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
995 }
996
997 static void
l2_connected(struct FsmInst * fi,int event,void * arg)998 l2_connected(struct FsmInst *fi, int event, void *arg)
999 {
1000 struct layer2 *l2 = fi->userdata;
1001 struct sk_buff *skb = arg;
1002 int pr = -1;
1003
1004 if (!get_PollFlag(l2, skb)) {
1005 l2_mdl_error_ua(fi, event, arg);
1006 return;
1007 }
1008 dev_kfree_skb(skb);
1009 if (test_and_clear_bit(FLG_PEND_REL, &l2->flag))
1010 l2_disconnect(fi, event, NULL);
1011 if (test_and_clear_bit(FLG_L3_INIT, &l2->flag)) {
1012 pr = DL_ESTABLISH_CNF;
1013 } else if (l2->vs != l2->va) {
1014 skb_queue_purge(&l2->i_queue);
1015 pr = DL_ESTABLISH_IND;
1016 }
1017 stop_t200(l2, 5);
1018 l2->vr = 0;
1019 l2->vs = 0;
1020 l2->va = 0;
1021 l2->sow = 0;
1022 mISDN_FsmChangeState(fi, ST_L2_7);
1023 mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 4);
1024 if (pr != -1)
1025 l2up_create(l2, pr, 0, NULL);
1026
1027 if (skb_queue_len(&l2->i_queue) && cansend(l2))
1028 mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
1029
1030 if (l2->tm)
1031 l2_tei(l2, MDL_STATUS_UP_IND, 0);
1032 }
1033
1034 static void
l2_released(struct FsmInst * fi,int event,void * arg)1035 l2_released(struct FsmInst *fi, int event, void *arg)
1036 {
1037 struct layer2 *l2 = fi->userdata;
1038 struct sk_buff *skb = arg;
1039
1040 if (!get_PollFlag(l2, skb)) {
1041 l2_mdl_error_ua(fi, event, arg);
1042 return;
1043 }
1044 dev_kfree_skb(skb);
1045 stop_t200(l2, 6);
1046 lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
1047 mISDN_FsmChangeState(fi, ST_L2_4);
1048 if (l2->tm)
1049 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1050 }
1051
1052 static void
l2_reestablish(struct FsmInst * fi,int event,void * arg)1053 l2_reestablish(struct FsmInst *fi, int event, void *arg)
1054 {
1055 struct layer2 *l2 = fi->userdata;
1056 struct sk_buff *skb = arg;
1057
1058 if (!get_PollFlagFree(l2, skb)) {
1059 establishlink(fi);
1060 test_and_set_bit(FLG_L3_INIT, &l2->flag);
1061 }
1062 }
1063
1064 static void
l2_st5_dm_release(struct FsmInst * fi,int event,void * arg)1065 l2_st5_dm_release(struct FsmInst *fi, int event, void *arg)
1066 {
1067 struct layer2 *l2 = fi->userdata;
1068 struct sk_buff *skb = arg;
1069
1070 if (get_PollFlagFree(l2, skb)) {
1071 stop_t200(l2, 7);
1072 if (!test_bit(FLG_L3_INIT, &l2->flag))
1073 skb_queue_purge(&l2->i_queue);
1074 if (test_bit(FLG_LAPB, &l2->flag))
1075 l2down_create(l2, PH_DEACTIVATE_REQ,
1076 l2_newid(l2), 0, NULL);
1077 st5_dl_release_l2l3(l2);
1078 mISDN_FsmChangeState(fi, ST_L2_4);
1079 if (l2->tm)
1080 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1081 }
1082 }
1083
1084 static void
l2_st6_dm_release(struct FsmInst * fi,int event,void * arg)1085 l2_st6_dm_release(struct FsmInst *fi, int event, void *arg)
1086 {
1087 struct layer2 *l2 = fi->userdata;
1088 struct sk_buff *skb = arg;
1089
1090 if (get_PollFlagFree(l2, skb)) {
1091 stop_t200(l2, 8);
1092 lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
1093 mISDN_FsmChangeState(fi, ST_L2_4);
1094 if (l2->tm)
1095 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1096 }
1097 }
1098
1099 static void
enquiry_cr(struct layer2 * l2,u_char typ,u_char cr,u_char pf)1100 enquiry_cr(struct layer2 *l2, u_char typ, u_char cr, u_char pf)
1101 {
1102 struct sk_buff *skb;
1103 u_char tmp[MAX_L2HEADER_LEN];
1104 int i;
1105
1106 i = sethdraddr(l2, tmp, cr);
1107 if (test_bit(FLG_MOD128, &l2->flag)) {
1108 tmp[i++] = typ;
1109 tmp[i++] = (l2->vr << 1) | (pf ? 1 : 0);
1110 } else
1111 tmp[i++] = (l2->vr << 5) | typ | (pf ? 0x10 : 0);
1112 skb = mI_alloc_skb(i, GFP_ATOMIC);
1113 if (!skb) {
1114 printk(KERN_WARNING "%s: isdnl2 can't alloc sbbuff in %s\n",
1115 mISDNDevName4ch(&l2->ch), __func__);
1116 return;
1117 }
1118 skb_put_data(skb, tmp, i);
1119 enqueue_super(l2, skb);
1120 }
1121
1122 inline void
enquiry_response(struct layer2 * l2)1123 enquiry_response(struct layer2 *l2)
1124 {
1125 if (test_bit(FLG_OWN_BUSY, &l2->flag))
1126 enquiry_cr(l2, RNR, RSP, 1);
1127 else
1128 enquiry_cr(l2, RR, RSP, 1);
1129 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1130 }
1131
1132 inline void
transmit_enquiry(struct layer2 * l2)1133 transmit_enquiry(struct layer2 *l2)
1134 {
1135 if (test_bit(FLG_OWN_BUSY, &l2->flag))
1136 enquiry_cr(l2, RNR, CMD, 1);
1137 else
1138 enquiry_cr(l2, RR, CMD, 1);
1139 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1140 start_t200(l2, 9);
1141 }
1142
1143
1144 static void
nrerrorrecovery(struct FsmInst * fi)1145 nrerrorrecovery(struct FsmInst *fi)
1146 {
1147 struct layer2 *l2 = fi->userdata;
1148
1149 l2mgr(l2, MDL_ERROR_IND, (void *) 'J');
1150 establishlink(fi);
1151 test_and_clear_bit(FLG_L3_INIT, &l2->flag);
1152 }
1153
1154 static void
invoke_retransmission(struct layer2 * l2,unsigned int nr)1155 invoke_retransmission(struct layer2 *l2, unsigned int nr)
1156 {
1157 u_int p1;
1158
1159 if (l2->vs != nr) {
1160 while (l2->vs != nr) {
1161 (l2->vs)--;
1162 if (test_bit(FLG_MOD128, &l2->flag)) {
1163 l2->vs %= 128;
1164 p1 = (l2->vs - l2->va) % 128;
1165 } else {
1166 l2->vs %= 8;
1167 p1 = (l2->vs - l2->va) % 8;
1168 }
1169 p1 = (p1 + l2->sow) % l2->window;
1170 if (l2->windowar[p1])
1171 skb_queue_head(&l2->i_queue, l2->windowar[p1]);
1172 else
1173 printk(KERN_WARNING
1174 "%s: windowar[%d] is NULL\n",
1175 mISDNDevName4ch(&l2->ch), p1);
1176 l2->windowar[p1] = NULL;
1177 }
1178 mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL);
1179 }
1180 }
1181
1182 static void
l2_st7_got_super(struct FsmInst * fi,int event,void * arg)1183 l2_st7_got_super(struct FsmInst *fi, int event, void *arg)
1184 {
1185 struct layer2 *l2 = fi->userdata;
1186 struct sk_buff *skb = arg;
1187 int PollFlag, rsp, typ = RR;
1188 unsigned int nr;
1189
1190 rsp = *skb->data & 0x2;
1191 if (test_bit(FLG_ORIG, &l2->flag))
1192 rsp = !rsp;
1193
1194 skb_pull(skb, l2addrsize(l2));
1195 if (IsRNR(skb->data, l2)) {
1196 set_peer_busy(l2);
1197 typ = RNR;
1198 } else
1199 clear_peer_busy(l2);
1200 if (IsREJ(skb->data, l2))
1201 typ = REJ;
1202
1203 if (test_bit(FLG_MOD128, &l2->flag)) {
1204 PollFlag = (skb->data[1] & 0x1) == 0x1;
1205 nr = skb->data[1] >> 1;
1206 } else {
1207 PollFlag = (skb->data[0] & 0x10);
1208 nr = (skb->data[0] >> 5) & 0x7;
1209 }
1210 dev_kfree_skb(skb);
1211
1212 if (PollFlag) {
1213 if (rsp)
1214 l2mgr(l2, MDL_ERROR_IND, (void *) 'A');
1215 else
1216 enquiry_response(l2);
1217 }
1218 if (legalnr(l2, nr)) {
1219 if (typ == REJ) {
1220 setva(l2, nr);
1221 invoke_retransmission(l2, nr);
1222 stop_t200(l2, 10);
1223 if (mISDN_FsmAddTimer(&l2->t203, l2->T203,
1224 EV_L2_T203, NULL, 6))
1225 l2m_debug(&l2->l2m, "Restart T203 ST7 REJ");
1226 } else if ((nr == l2->vs) && (typ == RR)) {
1227 setva(l2, nr);
1228 stop_t200(l2, 11);
1229 mISDN_FsmRestartTimer(&l2->t203, l2->T203,
1230 EV_L2_T203, NULL, 7);
1231 } else if ((l2->va != nr) || (typ == RNR)) {
1232 setva(l2, nr);
1233 if (typ != RR)
1234 mISDN_FsmDelTimer(&l2->t203, 9);
1235 restart_t200(l2, 12);
1236 }
1237 if (skb_queue_len(&l2->i_queue) && (typ == RR))
1238 mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
1239 } else
1240 nrerrorrecovery(fi);
1241 }
1242
1243 static void
l2_feed_i_if_reest(struct FsmInst * fi,int event,void * arg)1244 l2_feed_i_if_reest(struct FsmInst *fi, int event, void *arg)
1245 {
1246 struct layer2 *l2 = fi->userdata;
1247 struct sk_buff *skb = arg;
1248
1249 if (!test_bit(FLG_L3_INIT, &l2->flag))
1250 skb_queue_tail(&l2->i_queue, skb);
1251 else
1252 dev_kfree_skb(skb);
1253 }
1254
1255 static void
l2_feed_i_pull(struct FsmInst * fi,int event,void * arg)1256 l2_feed_i_pull(struct FsmInst *fi, int event, void *arg)
1257 {
1258 struct layer2 *l2 = fi->userdata;
1259 struct sk_buff *skb = arg;
1260
1261 skb_queue_tail(&l2->i_queue, skb);
1262 mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
1263 }
1264
1265 static void
l2_feed_iqueue(struct FsmInst * fi,int event,void * arg)1266 l2_feed_iqueue(struct FsmInst *fi, int event, void *arg)
1267 {
1268 struct layer2 *l2 = fi->userdata;
1269 struct sk_buff *skb = arg;
1270
1271 skb_queue_tail(&l2->i_queue, skb);
1272 }
1273
1274 static void
l2_got_iframe(struct FsmInst * fi,int event,void * arg)1275 l2_got_iframe(struct FsmInst *fi, int event, void *arg)
1276 {
1277 struct layer2 *l2 = fi->userdata;
1278 struct sk_buff *skb = arg;
1279 int PollFlag, i;
1280 u_int ns, nr;
1281
1282 i = l2addrsize(l2);
1283 if (test_bit(FLG_MOD128, &l2->flag)) {
1284 PollFlag = ((skb->data[i + 1] & 0x1) == 0x1);
1285 ns = skb->data[i] >> 1;
1286 nr = (skb->data[i + 1] >> 1) & 0x7f;
1287 } else {
1288 PollFlag = (skb->data[i] & 0x10);
1289 ns = (skb->data[i] >> 1) & 0x7;
1290 nr = (skb->data[i] >> 5) & 0x7;
1291 }
1292 if (test_bit(FLG_OWN_BUSY, &l2->flag)) {
1293 dev_kfree_skb(skb);
1294 if (PollFlag)
1295 enquiry_response(l2);
1296 } else {
1297 if (l2->vr == ns) {
1298 l2->vr++;
1299 if (test_bit(FLG_MOD128, &l2->flag))
1300 l2->vr %= 128;
1301 else
1302 l2->vr %= 8;
1303 test_and_clear_bit(FLG_REJEXC, &l2->flag);
1304 if (PollFlag)
1305 enquiry_response(l2);
1306 else
1307 test_and_set_bit(FLG_ACK_PEND, &l2->flag);
1308 skb_pull(skb, l2headersize(l2, 0));
1309 l2up(l2, DL_DATA_IND, skb);
1310 } else {
1311 /* n(s)!=v(r) */
1312 dev_kfree_skb(skb);
1313 if (test_and_set_bit(FLG_REJEXC, &l2->flag)) {
1314 if (PollFlag)
1315 enquiry_response(l2);
1316 } else {
1317 enquiry_cr(l2, REJ, RSP, PollFlag);
1318 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1319 }
1320 }
1321 }
1322 if (legalnr(l2, nr)) {
1323 if (!test_bit(FLG_PEER_BUSY, &l2->flag) &&
1324 (fi->state == ST_L2_7)) {
1325 if (nr == l2->vs) {
1326 stop_t200(l2, 13);
1327 mISDN_FsmRestartTimer(&l2->t203, l2->T203,
1328 EV_L2_T203, NULL, 7);
1329 } else if (nr != l2->va)
1330 restart_t200(l2, 14);
1331 }
1332 setva(l2, nr);
1333 } else {
1334 nrerrorrecovery(fi);
1335 return;
1336 }
1337 if (skb_queue_len(&l2->i_queue) && (fi->state == ST_L2_7))
1338 mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
1339 if (test_and_clear_bit(FLG_ACK_PEND, &l2->flag))
1340 enquiry_cr(l2, RR, RSP, 0);
1341 }
1342
1343 static void
l2_got_tei(struct FsmInst * fi,int event,void * arg)1344 l2_got_tei(struct FsmInst *fi, int event, void *arg)
1345 {
1346 struct layer2 *l2 = fi->userdata;
1347 u_int info;
1348
1349 l2->tei = (signed char)(long)arg;
1350 set_channel_address(&l2->ch, l2->sapi, l2->tei);
1351 info = DL_INFO_L2_CONNECT;
1352 l2up_create(l2, DL_INFORMATION_IND, sizeof(info), &info);
1353 if (fi->state == ST_L2_3) {
1354 establishlink(fi);
1355 test_and_set_bit(FLG_L3_INIT, &l2->flag);
1356 } else
1357 mISDN_FsmChangeState(fi, ST_L2_4);
1358 if (skb_queue_len(&l2->ui_queue))
1359 tx_ui(l2);
1360 }
1361
1362 static void
l2_st5_tout_200(struct FsmInst * fi,int event,void * arg)1363 l2_st5_tout_200(struct FsmInst *fi, int event, void *arg)
1364 {
1365 struct layer2 *l2 = fi->userdata;
1366
1367 if (test_bit(FLG_LAPD, &l2->flag) &&
1368 test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
1369 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
1370 } else if (l2->rc == l2->N200) {
1371 mISDN_FsmChangeState(fi, ST_L2_4);
1372 test_and_clear_bit(FLG_T200_RUN, &l2->flag);
1373 skb_queue_purge(&l2->i_queue);
1374 l2mgr(l2, MDL_ERROR_IND, (void *) 'G');
1375 if (test_bit(FLG_LAPB, &l2->flag))
1376 l2down_create(l2, PH_DEACTIVATE_REQ,
1377 l2_newid(l2), 0, NULL);
1378 st5_dl_release_l2l3(l2);
1379 if (l2->tm)
1380 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1381 } else {
1382 l2->rc++;
1383 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
1384 send_uframe(l2, NULL, (test_bit(FLG_MOD128, &l2->flag) ?
1385 SABME : SABM) | 0x10, CMD);
1386 }
1387 }
1388
1389 static void
l2_st6_tout_200(struct FsmInst * fi,int event,void * arg)1390 l2_st6_tout_200(struct FsmInst *fi, int event, void *arg)
1391 {
1392 struct layer2 *l2 = fi->userdata;
1393
1394 if (test_bit(FLG_LAPD, &l2->flag) &&
1395 test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
1396 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
1397 } else if (l2->rc == l2->N200) {
1398 mISDN_FsmChangeState(fi, ST_L2_4);
1399 test_and_clear_bit(FLG_T200_RUN, &l2->flag);
1400 l2mgr(l2, MDL_ERROR_IND, (void *) 'H');
1401 lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
1402 if (l2->tm)
1403 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1404 } else {
1405 l2->rc++;
1406 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200,
1407 NULL, 9);
1408 send_uframe(l2, NULL, DISC | 0x10, CMD);
1409 }
1410 }
1411
1412 static void
l2_st7_tout_200(struct FsmInst * fi,int event,void * arg)1413 l2_st7_tout_200(struct FsmInst *fi, int event, void *arg)
1414 {
1415 struct layer2 *l2 = fi->userdata;
1416
1417 if (test_bit(FLG_LAPD, &l2->flag) &&
1418 test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
1419 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
1420 return;
1421 }
1422 test_and_clear_bit(FLG_T200_RUN, &l2->flag);
1423 l2->rc = 0;
1424 mISDN_FsmChangeState(fi, ST_L2_8);
1425 transmit_enquiry(l2);
1426 l2->rc++;
1427 }
1428
1429 static void
l2_st8_tout_200(struct FsmInst * fi,int event,void * arg)1430 l2_st8_tout_200(struct FsmInst *fi, int event, void *arg)
1431 {
1432 struct layer2 *l2 = fi->userdata;
1433
1434 if (test_bit(FLG_LAPD, &l2->flag) &&
1435 test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
1436 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
1437 return;
1438 }
1439 test_and_clear_bit(FLG_T200_RUN, &l2->flag);
1440 if (l2->rc == l2->N200) {
1441 l2mgr(l2, MDL_ERROR_IND, (void *) 'I');
1442 establishlink(fi);
1443 test_and_clear_bit(FLG_L3_INIT, &l2->flag);
1444 } else {
1445 transmit_enquiry(l2);
1446 l2->rc++;
1447 }
1448 }
1449
1450 static void
l2_st7_tout_203(struct FsmInst * fi,int event,void * arg)1451 l2_st7_tout_203(struct FsmInst *fi, int event, void *arg)
1452 {
1453 struct layer2 *l2 = fi->userdata;
1454
1455 if (test_bit(FLG_LAPD, &l2->flag) &&
1456 test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
1457 mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 9);
1458 return;
1459 }
1460 mISDN_FsmChangeState(fi, ST_L2_8);
1461 transmit_enquiry(l2);
1462 l2->rc = 0;
1463 }
1464
1465 static void
l2_pull_iqueue(struct FsmInst * fi,int event,void * arg)1466 l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
1467 {
1468 struct layer2 *l2 = fi->userdata;
1469 struct sk_buff *skb, *nskb;
1470 u_char header[MAX_L2HEADER_LEN];
1471 u_int i, p1;
1472
1473 if (!cansend(l2))
1474 return;
1475
1476 skb = skb_dequeue(&l2->i_queue);
1477 if (!skb)
1478 return;
1479 i = sethdraddr(l2, header, CMD);
1480 if (test_bit(FLG_MOD128, &l2->flag)) {
1481 header[i++] = l2->vs << 1;
1482 header[i++] = l2->vr << 1;
1483 } else
1484 header[i++] = (l2->vr << 5) | (l2->vs << 1);
1485 nskb = skb_realloc_headroom(skb, i);
1486 if (!nskb) {
1487 printk(KERN_WARNING "%s: no headroom(%d) copy for IFrame\n",
1488 mISDNDevName4ch(&l2->ch), i);
1489 skb_queue_head(&l2->i_queue, skb);
1490 return;
1491 }
1492 if (test_bit(FLG_MOD128, &l2->flag)) {
1493 p1 = (l2->vs - l2->va) % 128;
1494 l2->vs = (l2->vs + 1) % 128;
1495 } else {
1496 p1 = (l2->vs - l2->va) % 8;
1497 l2->vs = (l2->vs + 1) % 8;
1498 }
1499 p1 = (p1 + l2->sow) % l2->window;
1500 if (l2->windowar[p1]) {
1501 printk(KERN_WARNING "%s: l2 try overwrite ack queue entry %d\n",
1502 mISDNDevName4ch(&l2->ch), p1);
1503 dev_kfree_skb(l2->windowar[p1]);
1504 }
1505 l2->windowar[p1] = skb;
1506 memcpy(skb_push(nskb, i), header, i);
1507 l2down(l2, PH_DATA_REQ, l2_newid(l2), nskb);
1508 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1509 if (!test_and_set_bit(FLG_T200_RUN, &l2->flag)) {
1510 mISDN_FsmDelTimer(&l2->t203, 13);
1511 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 11);
1512 }
1513 }
1514
1515 static void
l2_st8_got_super(struct FsmInst * fi,int event,void * arg)1516 l2_st8_got_super(struct FsmInst *fi, int event, void *arg)
1517 {
1518 struct layer2 *l2 = fi->userdata;
1519 struct sk_buff *skb = arg;
1520 int PollFlag, rsp, rnr = 0;
1521 unsigned int nr;
1522
1523 rsp = *skb->data & 0x2;
1524 if (test_bit(FLG_ORIG, &l2->flag))
1525 rsp = !rsp;
1526
1527 skb_pull(skb, l2addrsize(l2));
1528
1529 if (IsRNR(skb->data, l2)) {
1530 set_peer_busy(l2);
1531 rnr = 1;
1532 } else
1533 clear_peer_busy(l2);
1534
1535 if (test_bit(FLG_MOD128, &l2->flag)) {
1536 PollFlag = (skb->data[1] & 0x1) == 0x1;
1537 nr = skb->data[1] >> 1;
1538 } else {
1539 PollFlag = (skb->data[0] & 0x10);
1540 nr = (skb->data[0] >> 5) & 0x7;
1541 }
1542 dev_kfree_skb(skb);
1543 if (rsp && PollFlag) {
1544 if (legalnr(l2, nr)) {
1545 if (rnr) {
1546 restart_t200(l2, 15);
1547 } else {
1548 stop_t200(l2, 16);
1549 mISDN_FsmAddTimer(&l2->t203, l2->T203,
1550 EV_L2_T203, NULL, 5);
1551 setva(l2, nr);
1552 }
1553 invoke_retransmission(l2, nr);
1554 mISDN_FsmChangeState(fi, ST_L2_7);
1555 if (skb_queue_len(&l2->i_queue) && cansend(l2))
1556 mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
1557 } else
1558 nrerrorrecovery(fi);
1559 } else {
1560 if (!rsp && PollFlag)
1561 enquiry_response(l2);
1562 if (legalnr(l2, nr))
1563 setva(l2, nr);
1564 else
1565 nrerrorrecovery(fi);
1566 }
1567 }
1568
1569 static void
l2_got_FRMR(struct FsmInst * fi,int event,void * arg)1570 l2_got_FRMR(struct FsmInst *fi, int event, void *arg)
1571 {
1572 struct layer2 *l2 = fi->userdata;
1573 struct sk_buff *skb = arg;
1574
1575 skb_pull(skb, l2addrsize(l2) + 1);
1576
1577 if (!(skb->data[0] & 1) || ((skb->data[0] & 3) == 1) || /* I or S */
1578 (IsUA(skb->data) && (fi->state == ST_L2_7))) {
1579 l2mgr(l2, MDL_ERROR_IND, (void *) 'K');
1580 establishlink(fi);
1581 test_and_clear_bit(FLG_L3_INIT, &l2->flag);
1582 }
1583 dev_kfree_skb(skb);
1584 }
1585
1586 static void
l2_st24_tei_remove(struct FsmInst * fi,int event,void * arg)1587 l2_st24_tei_remove(struct FsmInst *fi, int event, void *arg)
1588 {
1589 struct layer2 *l2 = fi->userdata;
1590
1591 skb_queue_purge(&l2->ui_queue);
1592 l2->tei = GROUP_TEI;
1593 mISDN_FsmChangeState(fi, ST_L2_1);
1594 }
1595
1596 static void
l2_st3_tei_remove(struct FsmInst * fi,int event,void * arg)1597 l2_st3_tei_remove(struct FsmInst *fi, int event, void *arg)
1598 {
1599 struct layer2 *l2 = fi->userdata;
1600
1601 skb_queue_purge(&l2->ui_queue);
1602 l2->tei = GROUP_TEI;
1603 l2up_create(l2, DL_RELEASE_IND, 0, NULL);
1604 mISDN_FsmChangeState(fi, ST_L2_1);
1605 }
1606
1607 static void
l2_st5_tei_remove(struct FsmInst * fi,int event,void * arg)1608 l2_st5_tei_remove(struct FsmInst *fi, int event, void *arg)
1609 {
1610 struct layer2 *l2 = fi->userdata;
1611
1612 skb_queue_purge(&l2->i_queue);
1613 skb_queue_purge(&l2->ui_queue);
1614 freewin(l2);
1615 l2->tei = GROUP_TEI;
1616 stop_t200(l2, 17);
1617 st5_dl_release_l2l3(l2);
1618 mISDN_FsmChangeState(fi, ST_L2_1);
1619 }
1620
1621 static void
l2_st6_tei_remove(struct FsmInst * fi,int event,void * arg)1622 l2_st6_tei_remove(struct FsmInst *fi, int event, void *arg)
1623 {
1624 struct layer2 *l2 = fi->userdata;
1625
1626 skb_queue_purge(&l2->ui_queue);
1627 l2->tei = GROUP_TEI;
1628 stop_t200(l2, 18);
1629 l2up_create(l2, DL_RELEASE_IND, 0, NULL);
1630 mISDN_FsmChangeState(fi, ST_L2_1);
1631 }
1632
1633 static void
l2_tei_remove(struct FsmInst * fi,int event,void * arg)1634 l2_tei_remove(struct FsmInst *fi, int event, void *arg)
1635 {
1636 struct layer2 *l2 = fi->userdata;
1637
1638 skb_queue_purge(&l2->i_queue);
1639 skb_queue_purge(&l2->ui_queue);
1640 freewin(l2);
1641 l2->tei = GROUP_TEI;
1642 stop_t200(l2, 17);
1643 mISDN_FsmDelTimer(&l2->t203, 19);
1644 l2up_create(l2, DL_RELEASE_IND, 0, NULL);
1645 /* mISDN_queue_data(&l2->inst, l2->inst.id | MSG_BROADCAST,
1646 * MGR_SHORTSTATUS_IND, SSTATUS_L2_RELEASED,
1647 * 0, NULL, 0);
1648 */
1649 mISDN_FsmChangeState(fi, ST_L2_1);
1650 }
1651
1652 static void
l2_st14_persistent_da(struct FsmInst * fi,int event,void * arg)1653 l2_st14_persistent_da(struct FsmInst *fi, int event, void *arg)
1654 {
1655 struct layer2 *l2 = fi->userdata;
1656 struct sk_buff *skb = arg;
1657
1658 skb_queue_purge(&l2->i_queue);
1659 skb_queue_purge(&l2->ui_queue);
1660 if (test_and_clear_bit(FLG_ESTAB_PEND, &l2->flag))
1661 l2up(l2, DL_RELEASE_IND, skb);
1662 else
1663 dev_kfree_skb(skb);
1664 }
1665
1666 static void
l2_st5_persistent_da(struct FsmInst * fi,int event,void * arg)1667 l2_st5_persistent_da(struct FsmInst *fi, int event, void *arg)
1668 {
1669 struct layer2 *l2 = fi->userdata;
1670 struct sk_buff *skb = arg;
1671
1672 skb_queue_purge(&l2->i_queue);
1673 skb_queue_purge(&l2->ui_queue);
1674 freewin(l2);
1675 stop_t200(l2, 19);
1676 st5_dl_release_l2l3(l2);
1677 mISDN_FsmChangeState(fi, ST_L2_4);
1678 if (l2->tm)
1679 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1680 dev_kfree_skb(skb);
1681 }
1682
1683 static void
l2_st6_persistent_da(struct FsmInst * fi,int event,void * arg)1684 l2_st6_persistent_da(struct FsmInst *fi, int event, void *arg)
1685 {
1686 struct layer2 *l2 = fi->userdata;
1687 struct sk_buff *skb = arg;
1688
1689 skb_queue_purge(&l2->ui_queue);
1690 stop_t200(l2, 20);
1691 l2up(l2, DL_RELEASE_CNF, skb);
1692 mISDN_FsmChangeState(fi, ST_L2_4);
1693 if (l2->tm)
1694 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1695 }
1696
1697 static void
l2_persistent_da(struct FsmInst * fi,int event,void * arg)1698 l2_persistent_da(struct FsmInst *fi, int event, void *arg)
1699 {
1700 struct layer2 *l2 = fi->userdata;
1701 struct sk_buff *skb = arg;
1702
1703 skb_queue_purge(&l2->i_queue);
1704 skb_queue_purge(&l2->ui_queue);
1705 freewin(l2);
1706 stop_t200(l2, 19);
1707 mISDN_FsmDelTimer(&l2->t203, 19);
1708 l2up(l2, DL_RELEASE_IND, skb);
1709 mISDN_FsmChangeState(fi, ST_L2_4);
1710 if (l2->tm)
1711 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1712 }
1713
1714 static void
l2_set_own_busy(struct FsmInst * fi,int event,void * arg)1715 l2_set_own_busy(struct FsmInst *fi, int event, void *arg)
1716 {
1717 struct layer2 *l2 = fi->userdata;
1718 struct sk_buff *skb = arg;
1719
1720 if (!test_and_set_bit(FLG_OWN_BUSY, &l2->flag)) {
1721 enquiry_cr(l2, RNR, RSP, 0);
1722 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1723 }
1724 dev_kfree_skb(skb);
1725 }
1726
1727 static void
l2_clear_own_busy(struct FsmInst * fi,int event,void * arg)1728 l2_clear_own_busy(struct FsmInst *fi, int event, void *arg)
1729 {
1730 struct layer2 *l2 = fi->userdata;
1731 struct sk_buff *skb = arg;
1732
1733 if (!test_and_clear_bit(FLG_OWN_BUSY, &l2->flag)) {
1734 enquiry_cr(l2, RR, RSP, 0);
1735 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1736 }
1737 dev_kfree_skb(skb);
1738 }
1739
1740 static void
l2_frame_error(struct FsmInst * fi,int event,void * arg)1741 l2_frame_error(struct FsmInst *fi, int event, void *arg)
1742 {
1743 struct layer2 *l2 = fi->userdata;
1744
1745 l2mgr(l2, MDL_ERROR_IND, arg);
1746 }
1747
1748 static void
l2_frame_error_reest(struct FsmInst * fi,int event,void * arg)1749 l2_frame_error_reest(struct FsmInst *fi, int event, void *arg)
1750 {
1751 struct layer2 *l2 = fi->userdata;
1752
1753 l2mgr(l2, MDL_ERROR_IND, arg);
1754 establishlink(fi);
1755 test_and_clear_bit(FLG_L3_INIT, &l2->flag);
1756 }
1757
1758 static struct FsmNode L2FnList[] =
1759 {
1760 {ST_L2_1, EV_L2_DL_ESTABLISH_REQ, l2_mdl_assign},
1761 {ST_L2_2, EV_L2_DL_ESTABLISH_REQ, l2_go_st3},
1762 {ST_L2_4, EV_L2_DL_ESTABLISH_REQ, l2_establish},
1763 {ST_L2_5, EV_L2_DL_ESTABLISH_REQ, l2_discard_i_setl3},
1764 {ST_L2_7, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
1765 {ST_L2_8, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
1766 {ST_L2_4, EV_L2_DL_RELEASE_REQ, l2_release},
1767 {ST_L2_5, EV_L2_DL_RELEASE_REQ, l2_pend_rel},
1768 {ST_L2_7, EV_L2_DL_RELEASE_REQ, l2_disconnect},
1769 {ST_L2_8, EV_L2_DL_RELEASE_REQ, l2_disconnect},
1770 {ST_L2_5, EV_L2_DL_DATA, l2_feed_i_if_reest},
1771 {ST_L2_7, EV_L2_DL_DATA, l2_feed_i_pull},
1772 {ST_L2_8, EV_L2_DL_DATA, l2_feed_iqueue},
1773 {ST_L2_1, EV_L2_DL_UNITDATA, l2_queue_ui_assign},
1774 {ST_L2_2, EV_L2_DL_UNITDATA, l2_queue_ui},
1775 {ST_L2_3, EV_L2_DL_UNITDATA, l2_queue_ui},
1776 {ST_L2_4, EV_L2_DL_UNITDATA, l2_send_ui},
1777 {ST_L2_5, EV_L2_DL_UNITDATA, l2_send_ui},
1778 {ST_L2_6, EV_L2_DL_UNITDATA, l2_send_ui},
1779 {ST_L2_7, EV_L2_DL_UNITDATA, l2_send_ui},
1780 {ST_L2_8, EV_L2_DL_UNITDATA, l2_send_ui},
1781 {ST_L2_1, EV_L2_MDL_ASSIGN, l2_got_tei},
1782 {ST_L2_2, EV_L2_MDL_ASSIGN, l2_got_tei},
1783 {ST_L2_3, EV_L2_MDL_ASSIGN, l2_got_tei},
1784 {ST_L2_2, EV_L2_MDL_ERROR, l2_st24_tei_remove},
1785 {ST_L2_3, EV_L2_MDL_ERROR, l2_st3_tei_remove},
1786 {ST_L2_4, EV_L2_MDL_REMOVE, l2_st24_tei_remove},
1787 {ST_L2_5, EV_L2_MDL_REMOVE, l2_st5_tei_remove},
1788 {ST_L2_6, EV_L2_MDL_REMOVE, l2_st6_tei_remove},
1789 {ST_L2_7, EV_L2_MDL_REMOVE, l2_tei_remove},
1790 {ST_L2_8, EV_L2_MDL_REMOVE, l2_tei_remove},
1791 {ST_L2_4, EV_L2_SABME, l2_start_multi},
1792 {ST_L2_5, EV_L2_SABME, l2_send_UA},
1793 {ST_L2_6, EV_L2_SABME, l2_send_DM},
1794 {ST_L2_7, EV_L2_SABME, l2_restart_multi},
1795 {ST_L2_8, EV_L2_SABME, l2_restart_multi},
1796 {ST_L2_4, EV_L2_DISC, l2_send_DM},
1797 {ST_L2_5, EV_L2_DISC, l2_send_DM},
1798 {ST_L2_6, EV_L2_DISC, l2_send_UA},
1799 {ST_L2_7, EV_L2_DISC, l2_stop_multi},
1800 {ST_L2_8, EV_L2_DISC, l2_stop_multi},
1801 {ST_L2_4, EV_L2_UA, l2_mdl_error_ua},
1802 {ST_L2_5, EV_L2_UA, l2_connected},
1803 {ST_L2_6, EV_L2_UA, l2_released},
1804 {ST_L2_7, EV_L2_UA, l2_mdl_error_ua},
1805 {ST_L2_8, EV_L2_UA, l2_mdl_error_ua},
1806 {ST_L2_4, EV_L2_DM, l2_reestablish},
1807 {ST_L2_5, EV_L2_DM, l2_st5_dm_release},
1808 {ST_L2_6, EV_L2_DM, l2_st6_dm_release},
1809 {ST_L2_7, EV_L2_DM, l2_mdl_error_dm},
1810 {ST_L2_8, EV_L2_DM, l2_st8_mdl_error_dm},
1811 {ST_L2_1, EV_L2_UI, l2_got_ui},
1812 {ST_L2_2, EV_L2_UI, l2_got_ui},
1813 {ST_L2_3, EV_L2_UI, l2_got_ui},
1814 {ST_L2_4, EV_L2_UI, l2_got_ui},
1815 {ST_L2_5, EV_L2_UI, l2_got_ui},
1816 {ST_L2_6, EV_L2_UI, l2_got_ui},
1817 {ST_L2_7, EV_L2_UI, l2_got_ui},
1818 {ST_L2_8, EV_L2_UI, l2_got_ui},
1819 {ST_L2_7, EV_L2_FRMR, l2_got_FRMR},
1820 {ST_L2_8, EV_L2_FRMR, l2_got_FRMR},
1821 {ST_L2_7, EV_L2_SUPER, l2_st7_got_super},
1822 {ST_L2_8, EV_L2_SUPER, l2_st8_got_super},
1823 {ST_L2_7, EV_L2_I, l2_got_iframe},
1824 {ST_L2_8, EV_L2_I, l2_got_iframe},
1825 {ST_L2_5, EV_L2_T200, l2_timeout},
1826 {ST_L2_6, EV_L2_T200, l2_timeout},
1827 {ST_L2_7, EV_L2_T200, l2_timeout},
1828 {ST_L2_8, EV_L2_T200, l2_timeout},
1829 {ST_L2_7, EV_L2_T203, l2_timeout},
1830 {ST_L2_5, EV_L2_T200I, l2_st5_tout_200},
1831 {ST_L2_6, EV_L2_T200I, l2_st6_tout_200},
1832 {ST_L2_7, EV_L2_T200I, l2_st7_tout_200},
1833 {ST_L2_8, EV_L2_T200I, l2_st8_tout_200},
1834 {ST_L2_7, EV_L2_T203I, l2_st7_tout_203},
1835 {ST_L2_7, EV_L2_ACK_PULL, l2_pull_iqueue},
1836 {ST_L2_7, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
1837 {ST_L2_8, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
1838 {ST_L2_7, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
1839 {ST_L2_8, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
1840 {ST_L2_4, EV_L2_FRAME_ERROR, l2_frame_error},
1841 {ST_L2_5, EV_L2_FRAME_ERROR, l2_frame_error},
1842 {ST_L2_6, EV_L2_FRAME_ERROR, l2_frame_error},
1843 {ST_L2_7, EV_L2_FRAME_ERROR, l2_frame_error_reest},
1844 {ST_L2_8, EV_L2_FRAME_ERROR, l2_frame_error_reest},
1845 {ST_L2_1, EV_L1_DEACTIVATE, l2_st14_persistent_da},
1846 {ST_L2_2, EV_L1_DEACTIVATE, l2_st24_tei_remove},
1847 {ST_L2_3, EV_L1_DEACTIVATE, l2_st3_tei_remove},
1848 {ST_L2_4, EV_L1_DEACTIVATE, l2_st14_persistent_da},
1849 {ST_L2_5, EV_L1_DEACTIVATE, l2_st5_persistent_da},
1850 {ST_L2_6, EV_L1_DEACTIVATE, l2_st6_persistent_da},
1851 {ST_L2_7, EV_L1_DEACTIVATE, l2_persistent_da},
1852 {ST_L2_8, EV_L1_DEACTIVATE, l2_persistent_da},
1853 };
1854
1855 static int
ph_data_indication(struct layer2 * l2,struct mISDNhead * hh,struct sk_buff * skb)1856 ph_data_indication(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb)
1857 {
1858 u_char *datap = skb->data;
1859 int ret = -EINVAL;
1860 int psapi, ptei;
1861 u_int l;
1862 int c = 0;
1863
1864 l = l2addrsize(l2);
1865 if (skb->len <= l) {
1866 mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *) 'N');
1867 return ret;
1868 }
1869 if (test_bit(FLG_LAPD, &l2->flag)) { /* Maybe not needed */
1870 psapi = *datap++;
1871 ptei = *datap++;
1872 if ((psapi & 1) || !(ptei & 1)) {
1873 printk(KERN_WARNING
1874 "%s l2 D-channel frame wrong EA0/EA1\n",
1875 mISDNDevName4ch(&l2->ch));
1876 return ret;
1877 }
1878 psapi >>= 2;
1879 ptei >>= 1;
1880 if (psapi != l2->sapi) {
1881 /* not our business */
1882 if (*debug & DEBUG_L2)
1883 printk(KERN_DEBUG "%s: sapi %d/%d mismatch\n",
1884 mISDNDevName4ch(&l2->ch), psapi,
1885 l2->sapi);
1886 dev_kfree_skb(skb);
1887 return 0;
1888 }
1889 if ((ptei != l2->tei) && (ptei != GROUP_TEI)) {
1890 /* not our business */
1891 if (*debug & DEBUG_L2)
1892 printk(KERN_DEBUG "%s: tei %d/%d mismatch\n",
1893 mISDNDevName4ch(&l2->ch), ptei, l2->tei);
1894 dev_kfree_skb(skb);
1895 return 0;
1896 }
1897 } else
1898 datap += l;
1899 if (!(*datap & 1)) { /* I-Frame */
1900 c = iframe_error(l2, skb);
1901 if (!c)
1902 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_I, skb);
1903 } else if (IsSFrame(datap, l2)) { /* S-Frame */
1904 c = super_error(l2, skb);
1905 if (!c)
1906 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_SUPER, skb);
1907 } else if (IsUI(datap)) {
1908 c = UI_error(l2, skb);
1909 if (!c)
1910 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_UI, skb);
1911 } else if (IsSABME(datap, l2)) {
1912 c = unnum_error(l2, skb, CMD);
1913 if (!c)
1914 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_SABME, skb);
1915 } else if (IsUA(datap)) {
1916 c = unnum_error(l2, skb, RSP);
1917 if (!c)
1918 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_UA, skb);
1919 } else if (IsDISC(datap)) {
1920 c = unnum_error(l2, skb, CMD);
1921 if (!c)
1922 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DISC, skb);
1923 } else if (IsDM(datap)) {
1924 c = unnum_error(l2, skb, RSP);
1925 if (!c)
1926 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DM, skb);
1927 } else if (IsFRMR(datap)) {
1928 c = FRMR_error(l2, skb);
1929 if (!c)
1930 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_FRMR, skb);
1931 } else
1932 c = 'L';
1933 if (c) {
1934 printk(KERN_WARNING "%s:l2 D-channel frame error %c\n",
1935 mISDNDevName4ch(&l2->ch), c);
1936 mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *)(long)c);
1937 }
1938 return ret;
1939 }
1940
1941 static int
l2_send(struct mISDNchannel * ch,struct sk_buff * skb)1942 l2_send(struct mISDNchannel *ch, struct sk_buff *skb)
1943 {
1944 struct layer2 *l2 = container_of(ch, struct layer2, ch);
1945 struct mISDNhead *hh = mISDN_HEAD_P(skb);
1946 int ret = -EINVAL;
1947
1948 if (*debug & DEBUG_L2_RECV)
1949 printk(KERN_DEBUG "%s: %s prim(%x) id(%x) sapi(%d) tei(%d)\n",
1950 __func__, mISDNDevName4ch(&l2->ch), hh->prim, hh->id,
1951 l2->sapi, l2->tei);
1952 if (hh->prim == DL_INTERN_MSG) {
1953 struct mISDNhead *chh = hh + 1; /* saved copy */
1954
1955 *hh = *chh;
1956 if (*debug & DEBUG_L2_RECV)
1957 printk(KERN_DEBUG "%s: prim(%x) id(%x) internal msg\n",
1958 mISDNDevName4ch(&l2->ch), hh->prim, hh->id);
1959 }
1960 switch (hh->prim) {
1961 case PH_DATA_IND:
1962 ret = ph_data_indication(l2, hh, skb);
1963 break;
1964 case PH_DATA_CNF:
1965 ret = ph_data_confirm(l2, hh, skb);
1966 break;
1967 case PH_ACTIVATE_IND:
1968 test_and_set_bit(FLG_L1_ACTIV, &l2->flag);
1969 l2up_create(l2, MPH_ACTIVATE_IND, 0, NULL);
1970 if (test_and_clear_bit(FLG_ESTAB_PEND, &l2->flag))
1971 ret = mISDN_FsmEvent(&l2->l2m,
1972 EV_L2_DL_ESTABLISH_REQ, skb);
1973 break;
1974 case PH_DEACTIVATE_IND:
1975 test_and_clear_bit(FLG_L1_ACTIV, &l2->flag);
1976 l2up_create(l2, MPH_DEACTIVATE_IND, 0, NULL);
1977 ret = mISDN_FsmEvent(&l2->l2m, EV_L1_DEACTIVATE, skb);
1978 break;
1979 case MPH_INFORMATION_IND:
1980 if (!l2->up)
1981 break;
1982 ret = l2->up->send(l2->up, skb);
1983 break;
1984 case DL_DATA_REQ:
1985 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_DATA, skb);
1986 break;
1987 case DL_UNITDATA_REQ:
1988 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_UNITDATA, skb);
1989 break;
1990 case DL_ESTABLISH_REQ:
1991 if (test_bit(FLG_LAPB, &l2->flag))
1992 test_and_set_bit(FLG_ORIG, &l2->flag);
1993 if (test_bit(FLG_L1_ACTIV, &l2->flag)) {
1994 if (test_bit(FLG_LAPD, &l2->flag) ||
1995 test_bit(FLG_ORIG, &l2->flag))
1996 ret = mISDN_FsmEvent(&l2->l2m,
1997 EV_L2_DL_ESTABLISH_REQ, skb);
1998 } else {
1999 if (test_bit(FLG_LAPD, &l2->flag) ||
2000 test_bit(FLG_ORIG, &l2->flag)) {
2001 test_and_set_bit(FLG_ESTAB_PEND,
2002 &l2->flag);
2003 }
2004 ret = l2down(l2, PH_ACTIVATE_REQ, l2_newid(l2),
2005 skb);
2006 }
2007 break;
2008 case DL_RELEASE_REQ:
2009 if (test_bit(FLG_LAPB, &l2->flag))
2010 l2down_create(l2, PH_DEACTIVATE_REQ,
2011 l2_newid(l2), 0, NULL);
2012 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_RELEASE_REQ,
2013 skb);
2014 break;
2015 case DL_TIMER200_IND:
2016 mISDN_FsmEvent(&l2->l2m, EV_L2_T200I, NULL);
2017 break;
2018 case DL_TIMER203_IND:
2019 mISDN_FsmEvent(&l2->l2m, EV_L2_T203I, NULL);
2020 break;
2021 default:
2022 if (*debug & DEBUG_L2)
2023 l2m_debug(&l2->l2m, "l2 unknown pr %04x",
2024 hh->prim);
2025 }
2026 if (ret) {
2027 dev_kfree_skb(skb);
2028 ret = 0;
2029 }
2030 return ret;
2031 }
2032
2033 int
tei_l2(struct layer2 * l2,u_int cmd,u_long arg)2034 tei_l2(struct layer2 *l2, u_int cmd, u_long arg)
2035 {
2036 int ret = -EINVAL;
2037
2038 if (*debug & DEBUG_L2_TEI)
2039 printk(KERN_DEBUG "%s: cmd(%x) in %s\n",
2040 mISDNDevName4ch(&l2->ch), cmd, __func__);
2041 switch (cmd) {
2042 case (MDL_ASSIGN_REQ):
2043 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ASSIGN, (void *)arg);
2044 break;
2045 case (MDL_REMOVE_REQ):
2046 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_REMOVE, NULL);
2047 break;
2048 case (MDL_ERROR_IND):
2049 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL);
2050 break;
2051 case (MDL_ERROR_RSP):
2052 /* ETS 300-125 5.3.2.1 Test: TC13010 */
2053 printk(KERN_NOTICE "%s: MDL_ERROR|REQ (tei_l2)\n",
2054 mISDNDevName4ch(&l2->ch));
2055 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL);
2056 break;
2057 }
2058 return ret;
2059 }
2060
2061 static void
release_l2(struct layer2 * l2)2062 release_l2(struct layer2 *l2)
2063 {
2064 mISDN_FsmDelTimer(&l2->t200, 21);
2065 mISDN_FsmDelTimer(&l2->t203, 16);
2066 skb_queue_purge(&l2->i_queue);
2067 skb_queue_purge(&l2->ui_queue);
2068 skb_queue_purge(&l2->down_queue);
2069 ReleaseWin(l2);
2070 if (test_bit(FLG_LAPD, &l2->flag)) {
2071 TEIrelease(l2);
2072 if (l2->ch.st)
2073 l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D,
2074 CLOSE_CHANNEL, NULL);
2075 }
2076 kfree(l2);
2077 }
2078
2079 static int
l2_ctrl(struct mISDNchannel * ch,u_int cmd,void * arg)2080 l2_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
2081 {
2082 struct layer2 *l2 = container_of(ch, struct layer2, ch);
2083 u_int info;
2084
2085 if (*debug & DEBUG_L2_CTRL)
2086 printk(KERN_DEBUG "%s: %s cmd(%x)\n",
2087 mISDNDevName4ch(ch), __func__, cmd);
2088
2089 switch (cmd) {
2090 case OPEN_CHANNEL:
2091 if (test_bit(FLG_LAPD, &l2->flag)) {
2092 set_channel_address(&l2->ch, l2->sapi, l2->tei);
2093 info = DL_INFO_L2_CONNECT;
2094 l2up_create(l2, DL_INFORMATION_IND,
2095 sizeof(info), &info);
2096 }
2097 break;
2098 case CLOSE_CHANNEL:
2099 if (l2->ch.peer)
2100 l2->ch.peer->ctrl(l2->ch.peer, CLOSE_CHANNEL, NULL);
2101 release_l2(l2);
2102 break;
2103 }
2104 return 0;
2105 }
2106
2107 struct layer2 *
create_l2(struct mISDNchannel * ch,u_int protocol,u_long options,int tei,int sapi)2108 create_l2(struct mISDNchannel *ch, u_int protocol, u_long options, int tei,
2109 int sapi)
2110 {
2111 struct layer2 *l2;
2112 struct channel_req rq;
2113
2114 l2 = kzalloc(sizeof(struct layer2), GFP_KERNEL);
2115 if (!l2) {
2116 printk(KERN_ERR "kzalloc layer2 failed\n");
2117 return NULL;
2118 }
2119 l2->next_id = 1;
2120 l2->down_id = MISDN_ID_NONE;
2121 l2->up = ch;
2122 l2->ch.st = ch->st;
2123 l2->ch.send = l2_send;
2124 l2->ch.ctrl = l2_ctrl;
2125 switch (protocol) {
2126 case ISDN_P_LAPD_NT:
2127 test_and_set_bit(FLG_LAPD, &l2->flag);
2128 test_and_set_bit(FLG_LAPD_NET, &l2->flag);
2129 test_and_set_bit(FLG_MOD128, &l2->flag);
2130 l2->sapi = sapi;
2131 l2->maxlen = MAX_DFRAME_LEN;
2132 if (test_bit(OPTION_L2_PMX, &options))
2133 l2->window = 7;
2134 else
2135 l2->window = 1;
2136 if (test_bit(OPTION_L2_PTP, &options))
2137 test_and_set_bit(FLG_PTP, &l2->flag);
2138 if (test_bit(OPTION_L2_FIXEDTEI, &options))
2139 test_and_set_bit(FLG_FIXED_TEI, &l2->flag);
2140 l2->tei = tei;
2141 l2->T200 = 1000;
2142 l2->N200 = 3;
2143 l2->T203 = 10000;
2144 if (test_bit(OPTION_L2_PMX, &options))
2145 rq.protocol = ISDN_P_NT_E1;
2146 else
2147 rq.protocol = ISDN_P_NT_S0;
2148 rq.adr.channel = 0;
2149 l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, OPEN_CHANNEL, &rq);
2150 break;
2151 case ISDN_P_LAPD_TE:
2152 test_and_set_bit(FLG_LAPD, &l2->flag);
2153 test_and_set_bit(FLG_MOD128, &l2->flag);
2154 test_and_set_bit(FLG_ORIG, &l2->flag);
2155 l2->sapi = sapi;
2156 l2->maxlen = MAX_DFRAME_LEN;
2157 if (test_bit(OPTION_L2_PMX, &options))
2158 l2->window = 7;
2159 else
2160 l2->window = 1;
2161 if (test_bit(OPTION_L2_PTP, &options))
2162 test_and_set_bit(FLG_PTP, &l2->flag);
2163 if (test_bit(OPTION_L2_FIXEDTEI, &options))
2164 test_and_set_bit(FLG_FIXED_TEI, &l2->flag);
2165 l2->tei = tei;
2166 l2->T200 = 1000;
2167 l2->N200 = 3;
2168 l2->T203 = 10000;
2169 if (test_bit(OPTION_L2_PMX, &options))
2170 rq.protocol = ISDN_P_TE_E1;
2171 else
2172 rq.protocol = ISDN_P_TE_S0;
2173 rq.adr.channel = 0;
2174 l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, OPEN_CHANNEL, &rq);
2175 break;
2176 case ISDN_P_B_X75SLP:
2177 test_and_set_bit(FLG_LAPB, &l2->flag);
2178 l2->window = 7;
2179 l2->maxlen = MAX_DATA_SIZE;
2180 l2->T200 = 1000;
2181 l2->N200 = 4;
2182 l2->T203 = 5000;
2183 l2->addr.A = 3;
2184 l2->addr.B = 1;
2185 break;
2186 default:
2187 printk(KERN_ERR "layer2 create failed prt %x\n",
2188 protocol);
2189 kfree(l2);
2190 return NULL;
2191 }
2192 skb_queue_head_init(&l2->i_queue);
2193 skb_queue_head_init(&l2->ui_queue);
2194 skb_queue_head_init(&l2->down_queue);
2195 skb_queue_head_init(&l2->tmp_queue);
2196 InitWin(l2);
2197 l2->l2m.fsm = &l2fsm;
2198 if (test_bit(FLG_LAPB, &l2->flag) ||
2199 test_bit(FLG_FIXED_TEI, &l2->flag) ||
2200 test_bit(FLG_LAPD_NET, &l2->flag))
2201 l2->l2m.state = ST_L2_4;
2202 else
2203 l2->l2m.state = ST_L2_1;
2204 l2->l2m.debug = *debug;
2205 l2->l2m.userdata = l2;
2206 l2->l2m.userint = 0;
2207 l2->l2m.printdebug = l2m_debug;
2208
2209 mISDN_FsmInitTimer(&l2->l2m, &l2->t200);
2210 mISDN_FsmInitTimer(&l2->l2m, &l2->t203);
2211 return l2;
2212 }
2213
2214 static int
x75create(struct channel_req * crq)2215 x75create(struct channel_req *crq)
2216 {
2217 struct layer2 *l2;
2218
2219 if (crq->protocol != ISDN_P_B_X75SLP)
2220 return -EPROTONOSUPPORT;
2221 l2 = create_l2(crq->ch, crq->protocol, 0, 0, 0);
2222 if (!l2)
2223 return -ENOMEM;
2224 crq->ch = &l2->ch;
2225 crq->protocol = ISDN_P_B_HDLC;
2226 return 0;
2227 }
2228
2229 static struct Bprotocol X75SLP = {
2230 .Bprotocols = (1 << (ISDN_P_B_X75SLP & ISDN_P_B_MASK)),
2231 .name = "X75SLP",
2232 .create = x75create
2233 };
2234
2235 int
Isdnl2_Init(u_int * deb)2236 Isdnl2_Init(u_int *deb)
2237 {
2238 int res;
2239 debug = deb;
2240 mISDN_register_Bprotocol(&X75SLP);
2241 l2fsm.state_count = L2_STATE_COUNT;
2242 l2fsm.event_count = L2_EVENT_COUNT;
2243 l2fsm.strEvent = strL2Event;
2244 l2fsm.strState = strL2State;
2245 res = mISDN_FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList));
2246 if (res)
2247 goto error;
2248 res = TEIInit(deb);
2249 if (res)
2250 goto error_fsm;
2251 return 0;
2252
2253 error_fsm:
2254 mISDN_FsmFree(&l2fsm);
2255 error:
2256 mISDN_unregister_Bprotocol(&X75SLP);
2257 return res;
2258 }
2259
2260 void
Isdnl2_cleanup(void)2261 Isdnl2_cleanup(void)
2262 {
2263 mISDN_unregister_Bprotocol(&X75SLP);
2264 TEIFree();
2265 mISDN_FsmFree(&l2fsm);
2266 }
2267