xref: /openbmc/linux/drivers/media/dvb-core/dvb_net.c (revision aa1d19f1)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * dvb_net.c
4  *
5  * Copyright (C) 2001 Convergence integrated media GmbH
6  *                    Ralph Metzler <ralph@convergence.de>
7  * Copyright (C) 2002 Ralph Metzler <rjkm@metzlerbros.de>
8  *
9  * ULE Decapsulation code:
10  * Copyright (C) 2003, 2004 gcs - Global Communication & Services GmbH.
11  *                      and Department of Scientific Computing
12  *                          Paris Lodron University of Salzburg.
13  *                          Hilmar Linder <hlinder@cosy.sbg.ac.at>
14  *                      and Wolfram Stering <wstering@cosy.sbg.ac.at>
15  *
16  * ULE Decaps according to RFC 4326.
17  */
18 
19 /*
20  * ULE ChangeLog:
21  * Feb 2004: hl/ws v1: Implementing draft-fair-ipdvb-ule-01.txt
22  *
23  * Dec 2004: hl/ws v2: Implementing draft-ietf-ipdvb-ule-03.txt:
24  *                       ULE Extension header handling.
25  *                     Bugreports by Moritz Vieth and Hanno Tersteegen,
26  *                       Fraunhofer Institute for Open Communication Systems
27  *                       Competence Center for Advanced Satellite Communications.
28  *                     Bugfixes and robustness improvements.
29  *                     Filtering on dest MAC addresses, if present (D-Bit = 0)
30  *                     DVB_ULE_DEBUG compile-time option.
31  * Apr 2006: cp v3:    Bugfixes and compliency with RFC 4326 (ULE) by
32  *                       Christian Praehauser <cpraehaus@cosy.sbg.ac.at>,
33  *                       Paris Lodron University of Salzburg.
34  */
35 
36 /*
37  * FIXME / TODO (dvb_net.c):
38  *
39  * Unloading does not work for 2.6.9 kernels: a refcount doesn't go to zero.
40  *
41  */
42 
43 #define pr_fmt(fmt) "dvb_net: " fmt
44 
45 #include <linux/module.h>
46 #include <linux/kernel.h>
47 #include <linux/netdevice.h>
48 #include <linux/etherdevice.h>
49 #include <linux/dvb/net.h>
50 #include <linux/uio.h>
51 #include <linux/uaccess.h>
52 #include <linux/crc32.h>
53 #include <linux/mutex.h>
54 #include <linux/sched.h>
55 
56 #include <media/dvb_demux.h>
57 #include <media/dvb_net.h>
58 
59 static inline __u32 iov_crc32( __u32 c, struct kvec *iov, unsigned int cnt )
60 {
61 	unsigned int j;
62 	for (j = 0; j < cnt; j++)
63 		c = crc32_be( c, iov[j].iov_base, iov[j].iov_len );
64 	return c;
65 }
66 
67 
68 #define DVB_NET_MULTICAST_MAX 10
69 
70 #ifdef DVB_ULE_DEBUG
71 /*
72  * The code inside DVB_ULE_DEBUG keeps a history of the
73  * last 100 TS cells processed.
74  */
75 static unsigned char ule_hist[100*TS_SZ] = { 0 };
76 static unsigned char *ule_where = ule_hist, ule_dump;
77 
78 static void hexdump(const unsigned char *buf, unsigned short len)
79 {
80 	print_hex_dump_debug("", DUMP_PREFIX_OFFSET, 16, 1, buf, len, true);
81 }
82 #endif
83 
84 struct dvb_net_priv {
85 	int in_use;
86 	u16 pid;
87 	struct net_device *net;
88 	struct dvb_net *host;
89 	struct dmx_demux *demux;
90 	struct dmx_section_feed *secfeed;
91 	struct dmx_section_filter *secfilter;
92 	struct dmx_ts_feed *tsfeed;
93 	int multi_num;
94 	struct dmx_section_filter *multi_secfilter[DVB_NET_MULTICAST_MAX];
95 	unsigned char multi_macs[DVB_NET_MULTICAST_MAX][6];
96 	int rx_mode;
97 #define RX_MODE_UNI 0
98 #define RX_MODE_MULTI 1
99 #define RX_MODE_ALL_MULTI 2
100 #define RX_MODE_PROMISC 3
101 	struct work_struct set_multicast_list_wq;
102 	struct work_struct restart_net_feed_wq;
103 	unsigned char feedtype;			/* Either FEED_TYPE_ or FEED_TYPE_ULE */
104 	int need_pusi;				/* Set to 1, if synchronization on PUSI required. */
105 	unsigned char tscc;			/* TS continuity counter after sync on PUSI. */
106 	struct sk_buff *ule_skb;		/* ULE SNDU decodes into this buffer. */
107 	unsigned char *ule_next_hdr;		/* Pointer into skb to next ULE extension header. */
108 	unsigned short ule_sndu_len;		/* ULE SNDU length in bytes, w/o D-Bit. */
109 	unsigned short ule_sndu_type;		/* ULE SNDU type field, complete. */
110 	unsigned char ule_sndu_type_1;		/* ULE SNDU type field, if split across 2 TS cells. */
111 	unsigned char ule_dbit;			/* Whether the DestMAC address present
112 						 * or not (bit is set). */
113 	unsigned char ule_bridged;		/* Whether the ULE_BRIDGED extension header was found. */
114 	int ule_sndu_remain;			/* Nr. of bytes still required for current ULE SNDU. */
115 	unsigned long ts_count;			/* Current ts cell counter. */
116 	struct mutex mutex;
117 };
118 
119 
120 /*
121  *	Determine the packet's protocol ID. The rule here is that we
122  *	assume 802.3 if the type field is short enough to be a length.
123  *	This is normal practice and works for any 'now in use' protocol.
124  *
125  *  stolen from eth.c out of the linux kernel, hacked for dvb-device
126  *  by Michael Holzt <kju@debian.org>
127  */
128 static __be16 dvb_net_eth_type_trans(struct sk_buff *skb,
129 				      struct net_device *dev)
130 {
131 	struct ethhdr *eth;
132 	unsigned char *rawp;
133 
134 	skb_reset_mac_header(skb);
135 	skb_pull(skb,dev->hard_header_len);
136 	eth = eth_hdr(skb);
137 
138 	if (*eth->h_dest & 1) {
139 		if(ether_addr_equal(eth->h_dest,dev->broadcast))
140 			skb->pkt_type=PACKET_BROADCAST;
141 		else
142 			skb->pkt_type=PACKET_MULTICAST;
143 	}
144 
145 	if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
146 		return eth->h_proto;
147 
148 	rawp = skb->data;
149 
150 	/*
151 	 *	This is a magic hack to spot IPX packets. Older Novell breaks
152 	 *	the protocol design and runs IPX over 802.3 without an 802.2 LLC
153 	 *	layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
154 	 *	won't work for fault tolerant netware but does for the rest.
155 	 */
156 	if (*(unsigned short *)rawp == 0xFFFF)
157 		return htons(ETH_P_802_3);
158 
159 	/*
160 	 *	Real 802.2 LLC
161 	 */
162 	return htons(ETH_P_802_2);
163 }
164 
165 #define TS_SZ	188
166 #define TS_SYNC	0x47
167 #define TS_TEI	0x80
168 #define TS_SC	0xC0
169 #define TS_PUSI	0x40
170 #define TS_AF_A	0x20
171 #define TS_AF_D	0x10
172 
173 /* ULE Extension Header handlers. */
174 
175 #define ULE_TEST	0
176 #define ULE_BRIDGED	1
177 
178 #define ULE_OPTEXTHDR_PADDING 0
179 
180 static int ule_test_sndu( struct dvb_net_priv *p )
181 {
182 	return -1;
183 }
184 
185 static int ule_bridged_sndu( struct dvb_net_priv *p )
186 {
187 	struct ethhdr *hdr = (struct ethhdr*) p->ule_next_hdr;
188 	if(ntohs(hdr->h_proto) < ETH_P_802_3_MIN) {
189 		int framelen = p->ule_sndu_len - ((p->ule_next_hdr+sizeof(struct ethhdr)) - p->ule_skb->data);
190 		/* A frame Type < ETH_P_802_3_MIN for a bridged frame, introduces a LLC Length field. */
191 		if(framelen != ntohs(hdr->h_proto)) {
192 			return -1;
193 		}
194 	}
195 	/* Note:
196 	 * From RFC4326:
197 	 *  "A bridged SNDU is a Mandatory Extension Header of Type 1.
198 	 *   It must be the final (or only) extension header specified in the header chain of a SNDU."
199 	 * The 'ule_bridged' flag will cause the extension header processing loop to terminate.
200 	 */
201 	p->ule_bridged = 1;
202 	return 0;
203 }
204 
205 static int ule_exthdr_padding(struct dvb_net_priv *p)
206 {
207 	return 0;
208 }
209 
210 /*
211  * Handle ULE extension headers.
212  *  Function is called after a successful CRC32 verification of an ULE SNDU to complete its decoding.
213  *  Returns: >= 0: nr. of bytes consumed by next extension header
214  *	     -1:   Mandatory extension header that is not recognized or TEST SNDU; discard.
215  */
216 static int handle_one_ule_extension( struct dvb_net_priv *p )
217 {
218 	/* Table of mandatory extension header handlers.  The header type is the index. */
219 	static int (*ule_mandatory_ext_handlers[255])( struct dvb_net_priv *p ) =
220 		{ [0] = ule_test_sndu, [1] = ule_bridged_sndu, [2] = NULL,  };
221 
222 	/* Table of optional extension header handlers.  The header type is the index. */
223 	static int (*ule_optional_ext_handlers[255])( struct dvb_net_priv *p ) =
224 		{ [0] = ule_exthdr_padding, [1] = NULL, };
225 
226 	int ext_len = 0;
227 	unsigned char hlen = (p->ule_sndu_type & 0x0700) >> 8;
228 	unsigned char htype = p->ule_sndu_type & 0x00FF;
229 
230 	/* Discriminate mandatory and optional extension headers. */
231 	if (hlen == 0) {
232 		/* Mandatory extension header */
233 		if (ule_mandatory_ext_handlers[htype]) {
234 			ext_len = ule_mandatory_ext_handlers[htype]( p );
235 			if(ext_len >= 0) {
236 				p->ule_next_hdr += ext_len;
237 				if (!p->ule_bridged) {
238 					p->ule_sndu_type = ntohs(*(__be16 *)p->ule_next_hdr);
239 					p->ule_next_hdr += 2;
240 				} else {
241 					p->ule_sndu_type = ntohs(*(__be16 *)(p->ule_next_hdr + ((p->ule_dbit ? 2 : 3) * ETH_ALEN)));
242 					/* This assures the extension handling loop will terminate. */
243 				}
244 			}
245 			// else: extension handler failed or SNDU should be discarded
246 		} else
247 			ext_len = -1;	/* SNDU has to be discarded. */
248 	} else {
249 		/* Optional extension header.  Calculate the length. */
250 		ext_len = hlen << 1;
251 		/* Process the optional extension header according to its type. */
252 		if (ule_optional_ext_handlers[htype])
253 			(void)ule_optional_ext_handlers[htype]( p );
254 		p->ule_next_hdr += ext_len;
255 		p->ule_sndu_type = ntohs( *(__be16 *)(p->ule_next_hdr-2) );
256 		/*
257 		 * note: the length of the next header type is included in the
258 		 * length of THIS optional extension header
259 		 */
260 	}
261 
262 	return ext_len;
263 }
264 
265 static int handle_ule_extensions( struct dvb_net_priv *p )
266 {
267 	int total_ext_len = 0, l;
268 
269 	p->ule_next_hdr = p->ule_skb->data;
270 	do {
271 		l = handle_one_ule_extension( p );
272 		if (l < 0)
273 			return l;	/* Stop extension header processing and discard SNDU. */
274 		total_ext_len += l;
275 		pr_debug("ule_next_hdr=%p, ule_sndu_type=%i, l=%i, total_ext_len=%i\n",
276 			 p->ule_next_hdr, (int)p->ule_sndu_type,
277 			 l, total_ext_len);
278 
279 	} while (p->ule_sndu_type < ETH_P_802_3_MIN);
280 
281 	return total_ext_len;
282 }
283 
284 
285 /* Prepare for a new ULE SNDU: reset the decoder state. */
286 static inline void reset_ule( struct dvb_net_priv *p )
287 {
288 	p->ule_skb = NULL;
289 	p->ule_next_hdr = NULL;
290 	p->ule_sndu_len = 0;
291 	p->ule_sndu_type = 0;
292 	p->ule_sndu_type_1 = 0;
293 	p->ule_sndu_remain = 0;
294 	p->ule_dbit = 0xFF;
295 	p->ule_bridged = 0;
296 }
297 
298 /*
299  * Decode ULE SNDUs according to draft-ietf-ipdvb-ule-03.txt from a sequence of
300  * TS cells of a single PID.
301  */
302 
303 struct dvb_net_ule_handle {
304 	struct net_device *dev;
305 	struct dvb_net_priv *priv;
306 	struct ethhdr *ethh;
307 	const u8 *buf;
308 	size_t buf_len;
309 	unsigned long skipped;
310 	const u8 *ts, *ts_end, *from_where;
311 	u8 ts_remain, how_much, new_ts;
312 	bool error;
313 };
314 
315 static int dvb_net_ule_new_ts_cell(struct dvb_net_ule_handle *h)
316 {
317 	/* We are about to process a new TS cell. */
318 
319 #ifdef DVB_ULE_DEBUG
320 	if (ule_where >= &ule_hist[100*TS_SZ])
321 		ule_where = ule_hist;
322 	memcpy(ule_where, h->ts, TS_SZ);
323 	if (ule_dump) {
324 		hexdump(ule_where, TS_SZ);
325 		ule_dump = 0;
326 	}
327 	ule_where += TS_SZ;
328 #endif
329 
330 	/*
331 	 * Check TS h->error conditions: sync_byte, transport_error_indicator,
332 	 * scrambling_control .
333 	 */
334 	if ((h->ts[0] != TS_SYNC) || (h->ts[1] & TS_TEI) ||
335 	    ((h->ts[3] & TS_SC) != 0)) {
336 		pr_warn("%lu: Invalid TS cell: SYNC %#x, TEI %u, SC %#x.\n",
337 			h->priv->ts_count, h->ts[0],
338 			(h->ts[1] & TS_TEI) >> 7,
339 			(h->ts[3] & TS_SC) >> 6);
340 
341 		/* Drop partly decoded SNDU, reset state, resync on PUSI. */
342 		if (h->priv->ule_skb) {
343 			dev_kfree_skb(h->priv->ule_skb);
344 			/* Prepare for next SNDU. */
345 			h->dev->stats.rx_errors++;
346 			h->dev->stats.rx_frame_errors++;
347 		}
348 		reset_ule(h->priv);
349 		h->priv->need_pusi = 1;
350 
351 		/* Continue with next TS cell. */
352 		h->ts += TS_SZ;
353 		h->priv->ts_count++;
354 		return 1;
355 	}
356 
357 	h->ts_remain = 184;
358 	h->from_where = h->ts + 4;
359 
360 	return 0;
361 }
362 
363 static int dvb_net_ule_ts_pusi(struct dvb_net_ule_handle *h)
364 {
365 	if (h->ts[1] & TS_PUSI) {
366 		/* Find beginning of first ULE SNDU in current TS cell. */
367 		/* Synchronize continuity counter. */
368 		h->priv->tscc = h->ts[3] & 0x0F;
369 		/* There is a pointer field here. */
370 		if (h->ts[4] > h->ts_remain) {
371 			pr_err("%lu: Invalid ULE packet (pointer field %d)\n",
372 				h->priv->ts_count, h->ts[4]);
373 			h->ts += TS_SZ;
374 			h->priv->ts_count++;
375 			return 1;
376 		}
377 		/* Skip to destination of pointer field. */
378 		h->from_where = &h->ts[5] + h->ts[4];
379 		h->ts_remain -= 1 + h->ts[4];
380 		h->skipped = 0;
381 	} else {
382 		h->skipped++;
383 		h->ts += TS_SZ;
384 		h->priv->ts_count++;
385 		return 1;
386 	}
387 
388 	return 0;
389 }
390 
391 static int dvb_net_ule_new_ts(struct dvb_net_ule_handle *h)
392 {
393 	/* Check continuity counter. */
394 	if ((h->ts[3] & 0x0F) == h->priv->tscc)
395 		h->priv->tscc = (h->priv->tscc + 1) & 0x0F;
396 	else {
397 		/* TS discontinuity handling: */
398 		pr_warn("%lu: TS discontinuity: got %#x, expected %#x.\n",
399 			h->priv->ts_count, h->ts[3] & 0x0F,
400 			h->priv->tscc);
401 		/* Drop partly decoded SNDU, reset state, resync on PUSI. */
402 		if (h->priv->ule_skb) {
403 			dev_kfree_skb(h->priv->ule_skb);
404 			/* Prepare for next SNDU. */
405 			// reset_ule(h->priv);  moved to below.
406 			h->dev->stats.rx_errors++;
407 			h->dev->stats.rx_frame_errors++;
408 		}
409 		reset_ule(h->priv);
410 		/* skip to next PUSI. */
411 		h->priv->need_pusi = 1;
412 		return 1;
413 	}
414 	/*
415 	 * If we still have an incomplete payload, but PUSI is
416 	 * set; some TS cells are missing.
417 	 * This is only possible here, if we missed exactly 16 TS
418 	 * cells (continuity counter wrap).
419 	 */
420 	if (h->ts[1] & TS_PUSI) {
421 		if (!h->priv->need_pusi) {
422 			if (!(*h->from_where < (h->ts_remain-1)) ||
423 			    *h->from_where != h->priv->ule_sndu_remain) {
424 				/*
425 				 * Pointer field is invalid.
426 				 * Drop this TS cell and any started ULE SNDU.
427 				 */
428 				pr_warn("%lu: Invalid pointer field: %u.\n",
429 					h->priv->ts_count,
430 					*h->from_where);
431 
432 				/*
433 				 * Drop partly decoded SNDU, reset state,
434 				 * resync on PUSI.
435 				 */
436 				if (h->priv->ule_skb) {
437 					h->error = true;
438 					dev_kfree_skb(h->priv->ule_skb);
439 				}
440 
441 				if (h->error || h->priv->ule_sndu_remain) {
442 					h->dev->stats.rx_errors++;
443 					h->dev->stats.rx_frame_errors++;
444 					h->error = false;
445 				}
446 
447 				reset_ule(h->priv);
448 				h->priv->need_pusi = 1;
449 				return 1;
450 			}
451 			/*
452 			 * Skip pointer field (we're processing a
453 			 * packed payload).
454 			 */
455 			h->from_where += 1;
456 			h->ts_remain -= 1;
457 		} else
458 			h->priv->need_pusi = 0;
459 
460 		if (h->priv->ule_sndu_remain > 183) {
461 			/*
462 			 * Current SNDU lacks more data than there
463 			 * could be available in the current TS cell.
464 			 */
465 			h->dev->stats.rx_errors++;
466 			h->dev->stats.rx_length_errors++;
467 			pr_warn("%lu: Expected %d more SNDU bytes, but got PUSI (pf %d, h->ts_remain %d).  Flushing incomplete payload.\n",
468 				h->priv->ts_count,
469 				h->priv->ule_sndu_remain,
470 				h->ts[4], h->ts_remain);
471 			dev_kfree_skb(h->priv->ule_skb);
472 			/* Prepare for next SNDU. */
473 			reset_ule(h->priv);
474 			/*
475 			 * Resync: go to where pointer field points to:
476 			 * start of next ULE SNDU.
477 			 */
478 			h->from_where += h->ts[4];
479 			h->ts_remain -= h->ts[4];
480 		}
481 	}
482 	return 0;
483 }
484 
485 
486 /*
487  * Start a new payload with skb.
488  * Find ULE header.  It is only guaranteed that the
489  * length field (2 bytes) is contained in the current
490  * TS.
491  * Check h.ts_remain has to be >= 2 here.
492  */
493 static int dvb_net_ule_new_payload(struct dvb_net_ule_handle *h)
494 {
495 	if (h->ts_remain < 2) {
496 		pr_warn("Invalid payload packing: only %d bytes left in TS.  Resyncing.\n",
497 			h->ts_remain);
498 		h->priv->ule_sndu_len = 0;
499 		h->priv->need_pusi = 1;
500 		h->ts += TS_SZ;
501 		return 1;
502 	}
503 
504 	if (!h->priv->ule_sndu_len) {
505 		/* Got at least two bytes, thus extrace the SNDU length. */
506 		h->priv->ule_sndu_len = h->from_where[0] << 8 |
507 					h->from_where[1];
508 		if (h->priv->ule_sndu_len & 0x8000) {
509 			/* D-Bit is set: no dest mac present. */
510 			h->priv->ule_sndu_len &= 0x7FFF;
511 			h->priv->ule_dbit = 1;
512 		} else
513 			h->priv->ule_dbit = 0;
514 
515 		if (h->priv->ule_sndu_len < 5) {
516 			pr_warn("%lu: Invalid ULE SNDU length %u. Resyncing.\n",
517 				h->priv->ts_count,
518 				h->priv->ule_sndu_len);
519 			h->dev->stats.rx_errors++;
520 			h->dev->stats.rx_length_errors++;
521 			h->priv->ule_sndu_len = 0;
522 			h->priv->need_pusi = 1;
523 			h->new_ts = 1;
524 			h->ts += TS_SZ;
525 			h->priv->ts_count++;
526 			return 1;
527 		}
528 		h->ts_remain -= 2;	/* consume the 2 bytes SNDU length. */
529 		h->from_where += 2;
530 	}
531 
532 	h->priv->ule_sndu_remain = h->priv->ule_sndu_len + 2;
533 	/*
534 	 * State of current TS:
535 	 *   h->ts_remain (remaining bytes in the current TS cell)
536 	 *   0	ule_type is not available now, we need the next TS cell
537 	 *   1	the first byte of the ule_type is present
538 	 * >=2	full ULE header present, maybe some payload data as well.
539 	 */
540 	switch (h->ts_remain) {
541 	case 1:
542 		h->priv->ule_sndu_remain--;
543 		h->priv->ule_sndu_type = h->from_where[0] << 8;
544 
545 		/* first byte of ule_type is set. */
546 		h->priv->ule_sndu_type_1 = 1;
547 		h->ts_remain -= 1;
548 		h->from_where += 1;
549 		/* fallthrough */
550 	case 0:
551 		h->new_ts = 1;
552 		h->ts += TS_SZ;
553 		h->priv->ts_count++;
554 		return 1;
555 
556 	default: /* complete ULE header is present in current TS. */
557 		/* Extract ULE type field. */
558 		if (h->priv->ule_sndu_type_1) {
559 			h->priv->ule_sndu_type_1 = 0;
560 			h->priv->ule_sndu_type |= h->from_where[0];
561 			h->from_where += 1; /* points to payload start. */
562 			h->ts_remain -= 1;
563 		} else {
564 			/* Complete type is present in new TS. */
565 			h->priv->ule_sndu_type = h->from_where[0] << 8 |
566 						 h->from_where[1];
567 			h->from_where += 2; /* points to payload start. */
568 			h->ts_remain -= 2;
569 		}
570 		break;
571 	}
572 
573 	/*
574 	 * Allocate the skb (decoder target buffer) with the correct size,
575 	 * as follows:
576 	 *
577 	 * prepare for the largest case: bridged SNDU with MAC address
578 	 * (dbit = 0).
579 	 */
580 	h->priv->ule_skb = dev_alloc_skb(h->priv->ule_sndu_len +
581 					 ETH_HLEN + ETH_ALEN);
582 	if (!h->priv->ule_skb) {
583 		pr_notice("%s: Memory squeeze, dropping packet.\n",
584 			  h->dev->name);
585 		h->dev->stats.rx_dropped++;
586 		return -1;
587 	}
588 
589 	/* This includes the CRC32 _and_ dest mac, if !dbit. */
590 	h->priv->ule_sndu_remain = h->priv->ule_sndu_len;
591 	h->priv->ule_skb->dev = h->dev;
592 	/*
593 	 * Leave space for Ethernet or bridged SNDU header
594 	 * (eth hdr plus one MAC addr).
595 	 */
596 	skb_reserve(h->priv->ule_skb, ETH_HLEN + ETH_ALEN);
597 
598 	return 0;
599 }
600 
601 
602 static int dvb_net_ule_should_drop(struct dvb_net_ule_handle *h)
603 {
604 	static const u8 bc_addr[ETH_ALEN] = { [0 ... ETH_ALEN - 1] = 0xff };
605 
606 	/*
607 	 * The destination MAC address is the next data in the skb.  It comes
608 	 * before any extension headers.
609 	 *
610 	 * Check if the payload of this SNDU should be passed up the stack.
611 	 */
612 	if (h->priv->rx_mode == RX_MODE_PROMISC)
613 		return 0;
614 
615 	if (h->priv->ule_skb->data[0] & 0x01) {
616 		/* multicast or broadcast */
617 		if (!ether_addr_equal(h->priv->ule_skb->data, bc_addr)) {
618 			/* multicast */
619 			if (h->priv->rx_mode == RX_MODE_MULTI) {
620 				int i;
621 
622 				for (i = 0; i < h->priv->multi_num &&
623 				     !ether_addr_equal(h->priv->ule_skb->data,
624 						       h->priv->multi_macs[i]);
625 				     i++)
626 					;
627 				if (i == h->priv->multi_num)
628 					return 1;
629 			} else if (h->priv->rx_mode != RX_MODE_ALL_MULTI)
630 				return 1; /* no broadcast; */
631 			/*
632 			 * else:
633 			 * all multicast mode: accept all multicast packets
634 			 */
635 		}
636 		/* else: broadcast */
637 	} else if (!ether_addr_equal(h->priv->ule_skb->data, h->dev->dev_addr))
638 		return 1;
639 
640 	return 0;
641 }
642 
643 
644 static void dvb_net_ule_check_crc(struct dvb_net_ule_handle *h,
645 				  struct kvec iov[3],
646 				  u32 ule_crc, u32 expected_crc)
647 {
648 	u8 dest_addr[ETH_ALEN];
649 
650 	if (ule_crc != expected_crc) {
651 		pr_warn("%lu: CRC32 check FAILED: %08x / %08x, SNDU len %d type %#x, ts_remain %d, next 2: %x.\n",
652 			h->priv->ts_count, ule_crc, expected_crc,
653 			h->priv->ule_sndu_len, h->priv->ule_sndu_type,
654 			h->ts_remain,
655 			h->ts_remain > 2 ?
656 				*(unsigned short *)h->from_where : 0);
657 
658 	#ifdef DVB_ULE_DEBUG
659 		hexdump(iov[0].iov_base, iov[0].iov_len);
660 		hexdump(iov[1].iov_base, iov[1].iov_len);
661 		hexdump(iov[2].iov_base, iov[2].iov_len);
662 
663 		if (ule_where == ule_hist) {
664 			hexdump(&ule_hist[98*TS_SZ], TS_SZ);
665 			hexdump(&ule_hist[99*TS_SZ], TS_SZ);
666 		} else if (ule_where == &ule_hist[TS_SZ]) {
667 			hexdump(&ule_hist[99*TS_SZ], TS_SZ);
668 			hexdump(ule_hist, TS_SZ);
669 		} else {
670 			hexdump(ule_where - TS_SZ - TS_SZ, TS_SZ);
671 			hexdump(ule_where - TS_SZ, TS_SZ);
672 		}
673 		ule_dump = 1;
674 	#endif
675 
676 		h->dev->stats.rx_errors++;
677 		h->dev->stats.rx_crc_errors++;
678 		dev_kfree_skb(h->priv->ule_skb);
679 
680 		return;
681 	}
682 
683 	/* CRC32 verified OK. */
684 
685 	/* CRC32 was OK, so remove it from skb. */
686 	h->priv->ule_skb->tail -= 4;
687 	h->priv->ule_skb->len -= 4;
688 
689 	if (!h->priv->ule_dbit) {
690 		if (dvb_net_ule_should_drop(h)) {
691 			netdev_dbg(h->dev,
692 				   "Dropping SNDU: MAC destination address does not match: dest addr: %pM, h->dev addr: %pM\n",
693 				   h->priv->ule_skb->data, h->dev->dev_addr);
694 			dev_kfree_skb(h->priv->ule_skb);
695 			return;
696 		}
697 
698 		skb_copy_from_linear_data(h->priv->ule_skb, dest_addr,
699 					  ETH_ALEN);
700 		skb_pull(h->priv->ule_skb, ETH_ALEN);
701 	} else {
702 		/* dest_addr buffer is only valid if h->priv->ule_dbit == 0 */
703 		eth_zero_addr(dest_addr);
704 	}
705 
706 	/* Handle ULE Extension Headers. */
707 	if (h->priv->ule_sndu_type < ETH_P_802_3_MIN) {
708 		/* There is an extension header.  Handle it accordingly. */
709 		int l = handle_ule_extensions(h->priv);
710 
711 		if (l < 0) {
712 			/*
713 			 * Mandatory extension header unknown or TEST SNDU.
714 			 * Drop it.
715 			 */
716 
717 			// pr_warn("Dropping SNDU, extension headers.\n" );
718 			dev_kfree_skb(h->priv->ule_skb);
719 			return;
720 		}
721 		skb_pull(h->priv->ule_skb, l);
722 	}
723 
724 	/*
725 	 * Construct/assure correct ethernet header.
726 	 * Note: in bridged mode (h->priv->ule_bridged != 0)
727 	 * we already have the (original) ethernet
728 	 * header at the start of the payload (after
729 	 * optional dest. address and any extension
730 	 * headers).
731 	 */
732 	if (!h->priv->ule_bridged) {
733 		skb_push(h->priv->ule_skb, ETH_HLEN);
734 		h->ethh = (struct ethhdr *)h->priv->ule_skb->data;
735 		memcpy(h->ethh->h_dest, dest_addr, ETH_ALEN);
736 		eth_zero_addr(h->ethh->h_source);
737 		h->ethh->h_proto = htons(h->priv->ule_sndu_type);
738 	}
739 	/* else:  skb is in correct state; nothing to do. */
740 	h->priv->ule_bridged = 0;
741 
742 	/* Stuff into kernel's protocol stack. */
743 	h->priv->ule_skb->protocol = dvb_net_eth_type_trans(h->priv->ule_skb,
744 							   h->dev);
745 	/*
746 	 * If D-bit is set (i.e. destination MAC address not present),
747 	 * receive the packet anyhow.
748 	 */
749 #if 0
750 	if (h->priv->ule_dbit && skb->pkt_type == PACKET_OTHERHOST)
751 		h->priv->ule_skb->pkt_type = PACKET_HOST;
752 #endif
753 	h->dev->stats.rx_packets++;
754 	h->dev->stats.rx_bytes += h->priv->ule_skb->len;
755 	netif_rx(h->priv->ule_skb);
756 }
757 
758 static void dvb_net_ule(struct net_device *dev, const u8 *buf, size_t buf_len)
759 {
760 	int ret;
761 	struct dvb_net_ule_handle h = {
762 		.dev = dev,
763 		.priv = netdev_priv(dev),
764 		.ethh = NULL,
765 		.buf = buf,
766 		.buf_len = buf_len,
767 		.skipped = 0L,
768 		.ts = NULL,
769 		.ts_end = NULL,
770 		.from_where = NULL,
771 		.ts_remain = 0,
772 		.how_much = 0,
773 		.new_ts = 1,
774 		.error = false,
775 	};
776 
777 	/*
778 	 * For all TS cells in current buffer.
779 	 * Appearently, we are called for every single TS cell.
780 	 */
781 	for (h.ts = h.buf, h.ts_end = h.buf + h.buf_len;
782 	     h.ts < h.ts_end; /* no incr. */) {
783 		if (h.new_ts) {
784 			/* We are about to process a new TS cell. */
785 			if (dvb_net_ule_new_ts_cell(&h))
786 				continue;
787 		}
788 
789 		/* Synchronize on PUSI, if required. */
790 		if (h.priv->need_pusi) {
791 			if (dvb_net_ule_ts_pusi(&h))
792 				continue;
793 		}
794 
795 		if (h.new_ts) {
796 			if (dvb_net_ule_new_ts(&h))
797 				continue;
798 		}
799 
800 		/* Check if new payload needs to be started. */
801 		if (h.priv->ule_skb == NULL) {
802 			ret = dvb_net_ule_new_payload(&h);
803 			if (ret < 0)
804 				return;
805 			if (ret)
806 				continue;
807 		}
808 
809 		/* Copy data into our current skb. */
810 		h.how_much = min(h.priv->ule_sndu_remain, (int)h.ts_remain);
811 		skb_put_data(h.priv->ule_skb, h.from_where, h.how_much);
812 		h.priv->ule_sndu_remain -= h.how_much;
813 		h.ts_remain -= h.how_much;
814 		h.from_where += h.how_much;
815 
816 		/* Check for complete payload. */
817 		if (h.priv->ule_sndu_remain <= 0) {
818 			/* Check CRC32, we've got it in our skb already. */
819 			__be16 ulen = htons(h.priv->ule_sndu_len);
820 			__be16 utype = htons(h.priv->ule_sndu_type);
821 			const u8 *tail;
822 			struct kvec iov[3] = {
823 				{ &ulen, sizeof ulen },
824 				{ &utype, sizeof utype },
825 				{ h.priv->ule_skb->data,
826 				  h.priv->ule_skb->len - 4 }
827 			};
828 			u32 ule_crc = ~0L, expected_crc;
829 			if (h.priv->ule_dbit) {
830 				/* Set D-bit for CRC32 verification,
831 				 * if it was set originally. */
832 				ulen |= htons(0x8000);
833 			}
834 
835 			ule_crc = iov_crc32(ule_crc, iov, 3);
836 			tail = skb_tail_pointer(h.priv->ule_skb);
837 			expected_crc = *(tail - 4) << 24 |
838 				       *(tail - 3) << 16 |
839 				       *(tail - 2) << 8 |
840 				       *(tail - 1);
841 
842 			dvb_net_ule_check_crc(&h, iov, ule_crc, expected_crc);
843 
844 			/* Prepare for next SNDU. */
845 			reset_ule(h.priv);
846 		}
847 
848 		/* More data in current TS (look at the bytes following the CRC32)? */
849 		if (h.ts_remain >= 2 && *((unsigned short *)h.from_where) != 0xFFFF) {
850 			/* Next ULE SNDU starts right there. */
851 			h.new_ts = 0;
852 			h.priv->ule_skb = NULL;
853 			h.priv->ule_sndu_type_1 = 0;
854 			h.priv->ule_sndu_len = 0;
855 			// pr_warn("More data in current TS: [%#x %#x %#x %#x]\n",
856 			//	*(h.from_where + 0), *(h.from_where + 1),
857 			//	*(h.from_where + 2), *(h.from_where + 3));
858 			// pr_warn("h.ts @ %p, stopped @ %p:\n", h.ts, h.from_where + 0);
859 			// hexdump(h.ts, 188);
860 		} else {
861 			h.new_ts = 1;
862 			h.ts += TS_SZ;
863 			h.priv->ts_count++;
864 			if (h.priv->ule_skb == NULL) {
865 				h.priv->need_pusi = 1;
866 				h.priv->ule_sndu_type_1 = 0;
867 				h.priv->ule_sndu_len = 0;
868 			}
869 		}
870 	}	/* for all available TS cells */
871 }
872 
873 static int dvb_net_ts_callback(const u8 *buffer1, size_t buffer1_len,
874 			       const u8 *buffer2, size_t buffer2_len,
875 			       struct dmx_ts_feed *feed,
876 			       u32 *buffer_flags)
877 {
878 	struct net_device *dev = feed->priv;
879 
880 	if (buffer2)
881 		pr_warn("buffer2 not NULL: %p.\n", buffer2);
882 	if (buffer1_len > 32768)
883 		pr_warn("length > 32k: %zu.\n", buffer1_len);
884 	/* pr_info("TS callback: %u bytes, %u TS cells @ %p.\n",
885 		  buffer1_len, buffer1_len / TS_SZ, buffer1); */
886 	dvb_net_ule(dev, buffer1, buffer1_len);
887 	return 0;
888 }
889 
890 
891 static void dvb_net_sec(struct net_device *dev,
892 			const u8 *pkt, int pkt_len)
893 {
894 	u8 *eth;
895 	struct sk_buff *skb;
896 	struct net_device_stats *stats = &dev->stats;
897 	int snap = 0;
898 
899 	/* note: pkt_len includes a 32bit checksum */
900 	if (pkt_len < 16) {
901 		pr_warn("%s: IP/MPE packet length = %d too small.\n",
902 			dev->name, pkt_len);
903 		stats->rx_errors++;
904 		stats->rx_length_errors++;
905 		return;
906 	}
907 /* it seems some ISPs manage to screw up here, so we have to
908  * relax the error checks... */
909 #if 0
910 	if ((pkt[5] & 0xfd) != 0xc1) {
911 		/* drop scrambled or broken packets */
912 #else
913 	if ((pkt[5] & 0x3c) != 0x00) {
914 		/* drop scrambled */
915 #endif
916 		stats->rx_errors++;
917 		stats->rx_crc_errors++;
918 		return;
919 	}
920 	if (pkt[5] & 0x02) {
921 		/* handle LLC/SNAP, see rfc-1042 */
922 		if (pkt_len < 24 || memcmp(&pkt[12], "\xaa\xaa\x03\0\0\0", 6)) {
923 			stats->rx_dropped++;
924 			return;
925 		}
926 		snap = 8;
927 	}
928 	if (pkt[7]) {
929 		/* FIXME: assemble datagram from multiple sections */
930 		stats->rx_errors++;
931 		stats->rx_frame_errors++;
932 		return;
933 	}
934 
935 	/* we have 14 byte ethernet header (ip header follows);
936 	 * 12 byte MPE header; 4 byte checksum; + 2 byte alignment, 8 byte LLC/SNAP
937 	 */
938 	if (!(skb = dev_alloc_skb(pkt_len - 4 - 12 + 14 + 2 - snap))) {
939 		//pr_notice("%s: Memory squeeze, dropping packet.\n", dev->name);
940 		stats->rx_dropped++;
941 		return;
942 	}
943 	skb_reserve(skb, 2);    /* longword align L3 header */
944 	skb->dev = dev;
945 
946 	/* copy L3 payload */
947 	eth = skb_put(skb, pkt_len - 12 - 4 + 14 - snap);
948 	memcpy(eth + 14, pkt + 12 + snap, pkt_len - 12 - 4 - snap);
949 
950 	/* create ethernet header: */
951 	eth[0]=pkt[0x0b];
952 	eth[1]=pkt[0x0a];
953 	eth[2]=pkt[0x09];
954 	eth[3]=pkt[0x08];
955 	eth[4]=pkt[0x04];
956 	eth[5]=pkt[0x03];
957 
958 	eth[6]=eth[7]=eth[8]=eth[9]=eth[10]=eth[11]=0;
959 
960 	if (snap) {
961 		eth[12] = pkt[18];
962 		eth[13] = pkt[19];
963 	} else {
964 		/* protocol numbers are from rfc-1700 or
965 		 * http://www.iana.org/assignments/ethernet-numbers
966 		 */
967 		if (pkt[12] >> 4 == 6) { /* version field from IP header */
968 			eth[12] = 0x86;	/* IPv6 */
969 			eth[13] = 0xdd;
970 		} else {
971 			eth[12] = 0x08;	/* IPv4 */
972 			eth[13] = 0x00;
973 		}
974 	}
975 
976 	skb->protocol = dvb_net_eth_type_trans(skb, dev);
977 
978 	stats->rx_packets++;
979 	stats->rx_bytes+=skb->len;
980 	netif_rx(skb);
981 }
982 
983 static int dvb_net_sec_callback(const u8 *buffer1, size_t buffer1_len,
984 		 const u8 *buffer2, size_t buffer2_len,
985 		 struct dmx_section_filter *filter, u32 *buffer_flags)
986 {
987 	struct net_device *dev = filter->priv;
988 
989 	/*
990 	 * we rely on the DVB API definition where exactly one complete
991 	 * section is delivered in buffer1
992 	 */
993 	dvb_net_sec (dev, buffer1, buffer1_len);
994 	return 0;
995 }
996 
997 static netdev_tx_t dvb_net_tx(struct sk_buff *skb, struct net_device *dev)
998 {
999 	dev_kfree_skb(skb);
1000 	return NETDEV_TX_OK;
1001 }
1002 
1003 static u8 mask_normal[6]={0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1004 static u8 mask_allmulti[6]={0xff, 0xff, 0xff, 0x00, 0x00, 0x00};
1005 static u8 mac_allmulti[6]={0x01, 0x00, 0x5e, 0x00, 0x00, 0x00};
1006 static u8 mask_promisc[6]={0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1007 
1008 static int dvb_net_filter_sec_set(struct net_device *dev,
1009 		   struct dmx_section_filter **secfilter,
1010 		   u8 *mac, u8 *mac_mask)
1011 {
1012 	struct dvb_net_priv *priv = netdev_priv(dev);
1013 	int ret;
1014 
1015 	*secfilter=NULL;
1016 	ret = priv->secfeed->allocate_filter(priv->secfeed, secfilter);
1017 	if (ret<0) {
1018 		pr_err("%s: could not get filter\n", dev->name);
1019 		return ret;
1020 	}
1021 
1022 	(*secfilter)->priv=(void *) dev;
1023 
1024 	memset((*secfilter)->filter_value, 0x00, DMX_MAX_FILTER_SIZE);
1025 	memset((*secfilter)->filter_mask,  0x00, DMX_MAX_FILTER_SIZE);
1026 	memset((*secfilter)->filter_mode,  0xff, DMX_MAX_FILTER_SIZE);
1027 
1028 	(*secfilter)->filter_value[0]=0x3e;
1029 	(*secfilter)->filter_value[3]=mac[5];
1030 	(*secfilter)->filter_value[4]=mac[4];
1031 	(*secfilter)->filter_value[8]=mac[3];
1032 	(*secfilter)->filter_value[9]=mac[2];
1033 	(*secfilter)->filter_value[10]=mac[1];
1034 	(*secfilter)->filter_value[11]=mac[0];
1035 
1036 	(*secfilter)->filter_mask[0] = 0xff;
1037 	(*secfilter)->filter_mask[3] = mac_mask[5];
1038 	(*secfilter)->filter_mask[4] = mac_mask[4];
1039 	(*secfilter)->filter_mask[8] = mac_mask[3];
1040 	(*secfilter)->filter_mask[9] = mac_mask[2];
1041 	(*secfilter)->filter_mask[10] = mac_mask[1];
1042 	(*secfilter)->filter_mask[11]=mac_mask[0];
1043 
1044 	netdev_dbg(dev, "filter mac=%pM mask=%pM\n", mac, mac_mask);
1045 
1046 	return 0;
1047 }
1048 
1049 static int dvb_net_feed_start(struct net_device *dev)
1050 {
1051 	int ret = 0, i;
1052 	struct dvb_net_priv *priv = netdev_priv(dev);
1053 	struct dmx_demux *demux = priv->demux;
1054 	unsigned char *mac = (unsigned char *) dev->dev_addr;
1055 
1056 	netdev_dbg(dev, "rx_mode %i\n", priv->rx_mode);
1057 	mutex_lock(&priv->mutex);
1058 	if (priv->tsfeed || priv->secfeed || priv->secfilter || priv->multi_secfilter[0])
1059 		pr_err("%s: BUG %d\n", __func__, __LINE__);
1060 
1061 	priv->secfeed=NULL;
1062 	priv->secfilter=NULL;
1063 	priv->tsfeed = NULL;
1064 
1065 	if (priv->feedtype == DVB_NET_FEEDTYPE_MPE) {
1066 		netdev_dbg(dev, "alloc secfeed\n");
1067 		ret=demux->allocate_section_feed(demux, &priv->secfeed,
1068 					 dvb_net_sec_callback);
1069 		if (ret<0) {
1070 			pr_err("%s: could not allocate section feed\n",
1071 			       dev->name);
1072 			goto error;
1073 		}
1074 
1075 		ret = priv->secfeed->set(priv->secfeed, priv->pid, 1);
1076 
1077 		if (ret<0) {
1078 			pr_err("%s: could not set section feed\n", dev->name);
1079 			priv->demux->release_section_feed(priv->demux, priv->secfeed);
1080 			priv->secfeed=NULL;
1081 			goto error;
1082 		}
1083 
1084 		if (priv->rx_mode != RX_MODE_PROMISC) {
1085 			netdev_dbg(dev, "set secfilter\n");
1086 			dvb_net_filter_sec_set(dev, &priv->secfilter, mac, mask_normal);
1087 		}
1088 
1089 		switch (priv->rx_mode) {
1090 		case RX_MODE_MULTI:
1091 			for (i = 0; i < priv->multi_num; i++) {
1092 				netdev_dbg(dev, "set multi_secfilter[%d]\n", i);
1093 				dvb_net_filter_sec_set(dev, &priv->multi_secfilter[i],
1094 						       priv->multi_macs[i], mask_normal);
1095 			}
1096 			break;
1097 		case RX_MODE_ALL_MULTI:
1098 			priv->multi_num=1;
1099 			netdev_dbg(dev, "set multi_secfilter[0]\n");
1100 			dvb_net_filter_sec_set(dev, &priv->multi_secfilter[0],
1101 					       mac_allmulti, mask_allmulti);
1102 			break;
1103 		case RX_MODE_PROMISC:
1104 			priv->multi_num=0;
1105 			netdev_dbg(dev, "set secfilter\n");
1106 			dvb_net_filter_sec_set(dev, &priv->secfilter, mac, mask_promisc);
1107 			break;
1108 		}
1109 
1110 		netdev_dbg(dev, "start filtering\n");
1111 		priv->secfeed->start_filtering(priv->secfeed);
1112 	} else if (priv->feedtype == DVB_NET_FEEDTYPE_ULE) {
1113 		ktime_t timeout = ns_to_ktime(10 * NSEC_PER_MSEC);
1114 
1115 		/* we have payloads encapsulated in TS */
1116 		netdev_dbg(dev, "alloc tsfeed\n");
1117 		ret = demux->allocate_ts_feed(demux, &priv->tsfeed, dvb_net_ts_callback);
1118 		if (ret < 0) {
1119 			pr_err("%s: could not allocate ts feed\n", dev->name);
1120 			goto error;
1121 		}
1122 
1123 		/* Set netdevice pointer for ts decaps callback. */
1124 		priv->tsfeed->priv = (void *)dev;
1125 		ret = priv->tsfeed->set(priv->tsfeed,
1126 					priv->pid, /* pid */
1127 					TS_PACKET, /* type */
1128 					DMX_PES_OTHER, /* pes type */
1129 					timeout    /* timeout */
1130 					);
1131 
1132 		if (ret < 0) {
1133 			pr_err("%s: could not set ts feed\n", dev->name);
1134 			priv->demux->release_ts_feed(priv->demux, priv->tsfeed);
1135 			priv->tsfeed = NULL;
1136 			goto error;
1137 		}
1138 
1139 		netdev_dbg(dev, "start filtering\n");
1140 		priv->tsfeed->start_filtering(priv->tsfeed);
1141 	} else
1142 		ret = -EINVAL;
1143 
1144 error:
1145 	mutex_unlock(&priv->mutex);
1146 	return ret;
1147 }
1148 
1149 static int dvb_net_feed_stop(struct net_device *dev)
1150 {
1151 	struct dvb_net_priv *priv = netdev_priv(dev);
1152 	int i, ret = 0;
1153 
1154 	mutex_lock(&priv->mutex);
1155 	if (priv->feedtype == DVB_NET_FEEDTYPE_MPE) {
1156 		if (priv->secfeed) {
1157 			if (priv->secfeed->is_filtering) {
1158 				netdev_dbg(dev, "stop secfeed\n");
1159 				priv->secfeed->stop_filtering(priv->secfeed);
1160 			}
1161 
1162 			if (priv->secfilter) {
1163 				netdev_dbg(dev, "release secfilter\n");
1164 				priv->secfeed->release_filter(priv->secfeed,
1165 							      priv->secfilter);
1166 				priv->secfilter=NULL;
1167 			}
1168 
1169 			for (i=0; i<priv->multi_num; i++) {
1170 				if (priv->multi_secfilter[i]) {
1171 					netdev_dbg(dev, "release multi_filter[%d]\n",
1172 						   i);
1173 					priv->secfeed->release_filter(priv->secfeed,
1174 								      priv->multi_secfilter[i]);
1175 					priv->multi_secfilter[i] = NULL;
1176 				}
1177 			}
1178 
1179 			priv->demux->release_section_feed(priv->demux, priv->secfeed);
1180 			priv->secfeed = NULL;
1181 		} else
1182 			pr_err("%s: no feed to stop\n", dev->name);
1183 	} else if (priv->feedtype == DVB_NET_FEEDTYPE_ULE) {
1184 		if (priv->tsfeed) {
1185 			if (priv->tsfeed->is_filtering) {
1186 				netdev_dbg(dev, "stop tsfeed\n");
1187 				priv->tsfeed->stop_filtering(priv->tsfeed);
1188 			}
1189 			priv->demux->release_ts_feed(priv->demux, priv->tsfeed);
1190 			priv->tsfeed = NULL;
1191 		}
1192 		else
1193 			pr_err("%s: no ts feed to stop\n", dev->name);
1194 	} else
1195 		ret = -EINVAL;
1196 	mutex_unlock(&priv->mutex);
1197 	return ret;
1198 }
1199 
1200 
1201 static int dvb_set_mc_filter(struct net_device *dev, unsigned char *addr)
1202 {
1203 	struct dvb_net_priv *priv = netdev_priv(dev);
1204 
1205 	if (priv->multi_num == DVB_NET_MULTICAST_MAX)
1206 		return -ENOMEM;
1207 
1208 	memcpy(priv->multi_macs[priv->multi_num], addr, ETH_ALEN);
1209 
1210 	priv->multi_num++;
1211 	return 0;
1212 }
1213 
1214 
1215 static void wq_set_multicast_list (struct work_struct *work)
1216 {
1217 	struct dvb_net_priv *priv =
1218 		container_of(work, struct dvb_net_priv, set_multicast_list_wq);
1219 	struct net_device *dev = priv->net;
1220 
1221 	dvb_net_feed_stop(dev);
1222 	priv->rx_mode = RX_MODE_UNI;
1223 	netif_addr_lock_bh(dev);
1224 
1225 	if (dev->flags & IFF_PROMISC) {
1226 		netdev_dbg(dev, "promiscuous mode\n");
1227 		priv->rx_mode = RX_MODE_PROMISC;
1228 	} else if ((dev->flags & IFF_ALLMULTI)) {
1229 		netdev_dbg(dev, "allmulti mode\n");
1230 		priv->rx_mode = RX_MODE_ALL_MULTI;
1231 	} else if (!netdev_mc_empty(dev)) {
1232 		struct netdev_hw_addr *ha;
1233 
1234 		netdev_dbg(dev, "set_mc_list, %d entries\n",
1235 			   netdev_mc_count(dev));
1236 
1237 		priv->rx_mode = RX_MODE_MULTI;
1238 		priv->multi_num = 0;
1239 
1240 		netdev_for_each_mc_addr(ha, dev)
1241 			dvb_set_mc_filter(dev, ha->addr);
1242 	}
1243 
1244 	netif_addr_unlock_bh(dev);
1245 	dvb_net_feed_start(dev);
1246 }
1247 
1248 
1249 static void dvb_net_set_multicast_list (struct net_device *dev)
1250 {
1251 	struct dvb_net_priv *priv = netdev_priv(dev);
1252 	schedule_work(&priv->set_multicast_list_wq);
1253 }
1254 
1255 
1256 static void wq_restart_net_feed (struct work_struct *work)
1257 {
1258 	struct dvb_net_priv *priv =
1259 		container_of(work, struct dvb_net_priv, restart_net_feed_wq);
1260 	struct net_device *dev = priv->net;
1261 
1262 	if (netif_running(dev)) {
1263 		dvb_net_feed_stop(dev);
1264 		dvb_net_feed_start(dev);
1265 	}
1266 }
1267 
1268 
1269 static int dvb_net_set_mac (struct net_device *dev, void *p)
1270 {
1271 	struct dvb_net_priv *priv = netdev_priv(dev);
1272 	struct sockaddr *addr=p;
1273 
1274 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1275 
1276 	if (netif_running(dev))
1277 		schedule_work(&priv->restart_net_feed_wq);
1278 
1279 	return 0;
1280 }
1281 
1282 
1283 static int dvb_net_open(struct net_device *dev)
1284 {
1285 	struct dvb_net_priv *priv = netdev_priv(dev);
1286 
1287 	priv->in_use++;
1288 	dvb_net_feed_start(dev);
1289 	return 0;
1290 }
1291 
1292 
1293 static int dvb_net_stop(struct net_device *dev)
1294 {
1295 	struct dvb_net_priv *priv = netdev_priv(dev);
1296 
1297 	priv->in_use--;
1298 	return dvb_net_feed_stop(dev);
1299 }
1300 
1301 static const struct header_ops dvb_header_ops = {
1302 	.create		= eth_header,
1303 	.parse		= eth_header_parse,
1304 };
1305 
1306 
1307 static const struct net_device_ops dvb_netdev_ops = {
1308 	.ndo_open		= dvb_net_open,
1309 	.ndo_stop		= dvb_net_stop,
1310 	.ndo_start_xmit		= dvb_net_tx,
1311 	.ndo_set_rx_mode	= dvb_net_set_multicast_list,
1312 	.ndo_set_mac_address    = dvb_net_set_mac,
1313 	.ndo_validate_addr	= eth_validate_addr,
1314 };
1315 
1316 static void dvb_net_setup(struct net_device *dev)
1317 {
1318 	ether_setup(dev);
1319 
1320 	dev->header_ops		= &dvb_header_ops;
1321 	dev->netdev_ops		= &dvb_netdev_ops;
1322 	dev->mtu		= 4096;
1323 	dev->max_mtu		= 4096;
1324 
1325 	dev->flags |= IFF_NOARP;
1326 }
1327 
1328 static int get_if(struct dvb_net *dvbnet)
1329 {
1330 	int i;
1331 
1332 	for (i=0; i<DVB_NET_DEVICES_MAX; i++)
1333 		if (!dvbnet->state[i])
1334 			break;
1335 
1336 	if (i == DVB_NET_DEVICES_MAX)
1337 		return -1;
1338 
1339 	dvbnet->state[i]=1;
1340 	return i;
1341 }
1342 
1343 static int dvb_net_add_if(struct dvb_net *dvbnet, u16 pid, u8 feedtype)
1344 {
1345 	struct net_device *net;
1346 	struct dvb_net_priv *priv;
1347 	int result;
1348 	int if_num;
1349 
1350 	if (feedtype != DVB_NET_FEEDTYPE_MPE && feedtype != DVB_NET_FEEDTYPE_ULE)
1351 		return -EINVAL;
1352 	if ((if_num = get_if(dvbnet)) < 0)
1353 		return -EINVAL;
1354 
1355 	net = alloc_netdev(sizeof(struct dvb_net_priv), "dvb",
1356 			   NET_NAME_UNKNOWN, dvb_net_setup);
1357 	if (!net)
1358 		return -ENOMEM;
1359 
1360 	if (dvbnet->dvbdev->id)
1361 		snprintf(net->name, IFNAMSIZ, "dvb%d%u%d",
1362 			 dvbnet->dvbdev->adapter->num, dvbnet->dvbdev->id, if_num);
1363 	else
1364 		/* compatibility fix to keep dvb0_0 format */
1365 		snprintf(net->name, IFNAMSIZ, "dvb%d_%d",
1366 			 dvbnet->dvbdev->adapter->num, if_num);
1367 
1368 	net->addr_len = 6;
1369 	memcpy(net->dev_addr, dvbnet->dvbdev->adapter->proposed_mac, 6);
1370 
1371 	dvbnet->device[if_num] = net;
1372 
1373 	priv = netdev_priv(net);
1374 	priv->net = net;
1375 	priv->demux = dvbnet->demux;
1376 	priv->pid = pid;
1377 	priv->rx_mode = RX_MODE_UNI;
1378 	priv->need_pusi = 1;
1379 	priv->tscc = 0;
1380 	priv->feedtype = feedtype;
1381 	reset_ule(priv);
1382 
1383 	INIT_WORK(&priv->set_multicast_list_wq, wq_set_multicast_list);
1384 	INIT_WORK(&priv->restart_net_feed_wq, wq_restart_net_feed);
1385 	mutex_init(&priv->mutex);
1386 
1387 	net->base_addr = pid;
1388 
1389 	if ((result = register_netdev(net)) < 0) {
1390 		dvbnet->device[if_num] = NULL;
1391 		free_netdev(net);
1392 		return result;
1393 	}
1394 	pr_info("created network interface %s\n", net->name);
1395 
1396 	return if_num;
1397 }
1398 
1399 static int dvb_net_remove_if(struct dvb_net *dvbnet, unsigned long num)
1400 {
1401 	struct net_device *net = dvbnet->device[num];
1402 	struct dvb_net_priv *priv;
1403 
1404 	if (!dvbnet->state[num])
1405 		return -EINVAL;
1406 	priv = netdev_priv(net);
1407 	if (priv->in_use)
1408 		return -EBUSY;
1409 
1410 	dvb_net_stop(net);
1411 	flush_work(&priv->set_multicast_list_wq);
1412 	flush_work(&priv->restart_net_feed_wq);
1413 	pr_info("removed network interface %s\n", net->name);
1414 	unregister_netdev(net);
1415 	dvbnet->state[num]=0;
1416 	dvbnet->device[num] = NULL;
1417 	free_netdev(net);
1418 
1419 	return 0;
1420 }
1421 
1422 static int dvb_net_do_ioctl(struct file *file,
1423 		  unsigned int cmd, void *parg)
1424 {
1425 	struct dvb_device *dvbdev = file->private_data;
1426 	struct dvb_net *dvbnet = dvbdev->priv;
1427 	int ret = 0;
1428 
1429 	if (((file->f_flags&O_ACCMODE)==O_RDONLY))
1430 		return -EPERM;
1431 
1432 	if (mutex_lock_interruptible(&dvbnet->ioctl_mutex))
1433 		return -ERESTARTSYS;
1434 
1435 	switch (cmd) {
1436 	case NET_ADD_IF:
1437 	{
1438 		struct dvb_net_if *dvbnetif = parg;
1439 		int result;
1440 
1441 		if (!capable(CAP_SYS_ADMIN)) {
1442 			ret = -EPERM;
1443 			goto ioctl_error;
1444 		}
1445 
1446 		if (!try_module_get(dvbdev->adapter->module)) {
1447 			ret = -EPERM;
1448 			goto ioctl_error;
1449 		}
1450 
1451 		result=dvb_net_add_if(dvbnet, dvbnetif->pid, dvbnetif->feedtype);
1452 		if (result<0) {
1453 			module_put(dvbdev->adapter->module);
1454 			ret = result;
1455 			goto ioctl_error;
1456 		}
1457 		dvbnetif->if_num=result;
1458 		break;
1459 	}
1460 	case NET_GET_IF:
1461 	{
1462 		struct net_device *netdev;
1463 		struct dvb_net_priv *priv_data;
1464 		struct dvb_net_if *dvbnetif = parg;
1465 
1466 		if (dvbnetif->if_num >= DVB_NET_DEVICES_MAX ||
1467 		    !dvbnet->state[dvbnetif->if_num]) {
1468 			ret = -EINVAL;
1469 			goto ioctl_error;
1470 		}
1471 
1472 		netdev = dvbnet->device[dvbnetif->if_num];
1473 
1474 		priv_data = netdev_priv(netdev);
1475 		dvbnetif->pid=priv_data->pid;
1476 		dvbnetif->feedtype=priv_data->feedtype;
1477 		break;
1478 	}
1479 	case NET_REMOVE_IF:
1480 	{
1481 		if (!capable(CAP_SYS_ADMIN)) {
1482 			ret = -EPERM;
1483 			goto ioctl_error;
1484 		}
1485 		if ((unsigned long) parg >= DVB_NET_DEVICES_MAX) {
1486 			ret = -EINVAL;
1487 			goto ioctl_error;
1488 		}
1489 		ret = dvb_net_remove_if(dvbnet, (unsigned long) parg);
1490 		if (!ret)
1491 			module_put(dvbdev->adapter->module);
1492 		break;
1493 	}
1494 
1495 	/* binary compatibility cruft */
1496 	case __NET_ADD_IF_OLD:
1497 	{
1498 		struct __dvb_net_if_old *dvbnetif = parg;
1499 		int result;
1500 
1501 		if (!capable(CAP_SYS_ADMIN)) {
1502 			ret = -EPERM;
1503 			goto ioctl_error;
1504 		}
1505 
1506 		if (!try_module_get(dvbdev->adapter->module)) {
1507 			ret = -EPERM;
1508 			goto ioctl_error;
1509 		}
1510 
1511 		result=dvb_net_add_if(dvbnet, dvbnetif->pid, DVB_NET_FEEDTYPE_MPE);
1512 		if (result<0) {
1513 			module_put(dvbdev->adapter->module);
1514 			ret = result;
1515 			goto ioctl_error;
1516 		}
1517 		dvbnetif->if_num=result;
1518 		break;
1519 	}
1520 	case __NET_GET_IF_OLD:
1521 	{
1522 		struct net_device *netdev;
1523 		struct dvb_net_priv *priv_data;
1524 		struct __dvb_net_if_old *dvbnetif = parg;
1525 
1526 		if (dvbnetif->if_num >= DVB_NET_DEVICES_MAX ||
1527 		    !dvbnet->state[dvbnetif->if_num]) {
1528 			ret = -EINVAL;
1529 			goto ioctl_error;
1530 		}
1531 
1532 		netdev = dvbnet->device[dvbnetif->if_num];
1533 
1534 		priv_data = netdev_priv(netdev);
1535 		dvbnetif->pid=priv_data->pid;
1536 		break;
1537 	}
1538 	default:
1539 		ret = -ENOTTY;
1540 		break;
1541 	}
1542 
1543 ioctl_error:
1544 	mutex_unlock(&dvbnet->ioctl_mutex);
1545 	return ret;
1546 }
1547 
1548 static long dvb_net_ioctl(struct file *file,
1549 	      unsigned int cmd, unsigned long arg)
1550 {
1551 	return dvb_usercopy(file, cmd, arg, dvb_net_do_ioctl);
1552 }
1553 
1554 static int dvb_net_close(struct inode *inode, struct file *file)
1555 {
1556 	struct dvb_device *dvbdev = file->private_data;
1557 	struct dvb_net *dvbnet = dvbdev->priv;
1558 
1559 	dvb_generic_release(inode, file);
1560 
1561 	if(dvbdev->users == 1 && dvbnet->exit == 1)
1562 		wake_up(&dvbdev->wait_queue);
1563 	return 0;
1564 }
1565 
1566 
1567 static const struct file_operations dvb_net_fops = {
1568 	.owner = THIS_MODULE,
1569 	.unlocked_ioctl = dvb_net_ioctl,
1570 	.open =	dvb_generic_open,
1571 	.release = dvb_net_close,
1572 	.llseek = noop_llseek,
1573 };
1574 
1575 static const struct dvb_device dvbdev_net = {
1576 	.priv = NULL,
1577 	.users = 1,
1578 	.writers = 1,
1579 #if defined(CONFIG_MEDIA_CONTROLLER_DVB)
1580 	.name = "dvb-net",
1581 #endif
1582 	.fops = &dvb_net_fops,
1583 };
1584 
1585 void dvb_net_release (struct dvb_net *dvbnet)
1586 {
1587 	int i;
1588 
1589 	dvbnet->exit = 1;
1590 	if (dvbnet->dvbdev->users < 1)
1591 		wait_event(dvbnet->dvbdev->wait_queue,
1592 				dvbnet->dvbdev->users==1);
1593 
1594 	dvb_unregister_device(dvbnet->dvbdev);
1595 
1596 	for (i=0; i<DVB_NET_DEVICES_MAX; i++) {
1597 		if (!dvbnet->state[i])
1598 			continue;
1599 		dvb_net_remove_if(dvbnet, i);
1600 	}
1601 }
1602 EXPORT_SYMBOL(dvb_net_release);
1603 
1604 
1605 int dvb_net_init (struct dvb_adapter *adap, struct dvb_net *dvbnet,
1606 		  struct dmx_demux *dmx)
1607 {
1608 	int i;
1609 
1610 	mutex_init(&dvbnet->ioctl_mutex);
1611 	dvbnet->demux = dmx;
1612 
1613 	for (i=0; i<DVB_NET_DEVICES_MAX; i++)
1614 		dvbnet->state[i] = 0;
1615 
1616 	return dvb_register_device(adap, &dvbnet->dvbdev, &dvbdev_net,
1617 			     dvbnet, DVB_DEVICE_NET, 0);
1618 }
1619 EXPORT_SYMBOL(dvb_net_init);
1620