xref: /openbmc/linux/drivers/media/dvb-core/dvb_net.c (revision 3180449e)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * dvb_net.c
4  *
5  * Copyright (C) 2001 Convergence integrated media GmbH
6  *                    Ralph Metzler <ralph@convergence.de>
7  * Copyright (C) 2002 Ralph Metzler <rjkm@metzlerbros.de>
8  *
9  * ULE Decapsulation code:
10  * Copyright (C) 2003, 2004 gcs - Global Communication & Services GmbH.
11  *                      and Department of Scientific Computing
12  *                          Paris Lodron University of Salzburg.
13  *                          Hilmar Linder <hlinder@cosy.sbg.ac.at>
14  *                      and Wolfram Stering <wstering@cosy.sbg.ac.at>
15  *
16  * ULE Decaps according to RFC 4326.
17  */
18 
19 /*
20  * ULE ChangeLog:
21  * Feb 2004: hl/ws v1: Implementing draft-fair-ipdvb-ule-01.txt
22  *
23  * Dec 2004: hl/ws v2: Implementing draft-ietf-ipdvb-ule-03.txt:
24  *                       ULE Extension header handling.
25  *                     Bugreports by Moritz Vieth and Hanno Tersteegen,
26  *                       Fraunhofer Institute for Open Communication Systems
27  *                       Competence Center for Advanced Satellite Communications.
28  *                     Bugfixes and robustness improvements.
29  *                     Filtering on dest MAC addresses, if present (D-Bit = 0)
30  *                     DVB_ULE_DEBUG compile-time option.
31  * Apr 2006: cp v3:    Bugfixes and compliency with RFC 4326 (ULE) by
32  *                       Christian Praehauser <cpraehaus@cosy.sbg.ac.at>,
33  *                       Paris Lodron University of Salzburg.
34  */
35 
36 /*
37  * FIXME / TODO (dvb_net.c):
38  *
39  * Unloading does not work for 2.6.9 kernels: a refcount doesn't go to zero.
40  *
41  */
42 
43 #define pr_fmt(fmt) "dvb_net: " fmt
44 
45 #include <linux/module.h>
46 #include <linux/kernel.h>
47 #include <linux/netdevice.h>
48 #include <linux/nospec.h>
49 #include <linux/etherdevice.h>
50 #include <linux/dvb/net.h>
51 #include <linux/uio.h>
52 #include <linux/uaccess.h>
53 #include <linux/crc32.h>
54 #include <linux/mutex.h>
55 #include <linux/sched.h>
56 
57 #include <media/dvb_demux.h>
58 #include <media/dvb_net.h>
59 
60 static inline __u32 iov_crc32( __u32 c, struct kvec *iov, unsigned int cnt )
61 {
62 	unsigned int j;
63 	for (j = 0; j < cnt; j++)
64 		c = crc32_be( c, iov[j].iov_base, iov[j].iov_len );
65 	return c;
66 }
67 
68 
69 #define DVB_NET_MULTICAST_MAX 10
70 
71 #ifdef DVB_ULE_DEBUG
72 /*
73  * The code inside DVB_ULE_DEBUG keeps a history of the
74  * last 100 TS cells processed.
75  */
76 static unsigned char ule_hist[100*TS_SZ] = { 0 };
77 static unsigned char *ule_where = ule_hist, ule_dump;
78 
79 static void hexdump(const unsigned char *buf, unsigned short len)
80 {
81 	print_hex_dump_debug("", DUMP_PREFIX_OFFSET, 16, 1, buf, len, true);
82 }
83 #endif
84 
85 struct dvb_net_priv {
86 	int in_use;
87 	u16 pid;
88 	struct net_device *net;
89 	struct dvb_net *host;
90 	struct dmx_demux *demux;
91 	struct dmx_section_feed *secfeed;
92 	struct dmx_section_filter *secfilter;
93 	struct dmx_ts_feed *tsfeed;
94 	int multi_num;
95 	struct dmx_section_filter *multi_secfilter[DVB_NET_MULTICAST_MAX];
96 	unsigned char multi_macs[DVB_NET_MULTICAST_MAX][6];
97 	int rx_mode;
98 #define RX_MODE_UNI 0
99 #define RX_MODE_MULTI 1
100 #define RX_MODE_ALL_MULTI 2
101 #define RX_MODE_PROMISC 3
102 	struct work_struct set_multicast_list_wq;
103 	struct work_struct restart_net_feed_wq;
104 	unsigned char feedtype;			/* Either FEED_TYPE_ or FEED_TYPE_ULE */
105 	int need_pusi;				/* Set to 1, if synchronization on PUSI required. */
106 	unsigned char tscc;			/* TS continuity counter after sync on PUSI. */
107 	struct sk_buff *ule_skb;		/* ULE SNDU decodes into this buffer. */
108 	unsigned char *ule_next_hdr;		/* Pointer into skb to next ULE extension header. */
109 	unsigned short ule_sndu_len;		/* ULE SNDU length in bytes, w/o D-Bit. */
110 	unsigned short ule_sndu_type;		/* ULE SNDU type field, complete. */
111 	unsigned char ule_sndu_type_1;		/* ULE SNDU type field, if split across 2 TS cells. */
112 	unsigned char ule_dbit;			/* Whether the DestMAC address present
113 						 * or not (bit is set). */
114 	unsigned char ule_bridged;		/* Whether the ULE_BRIDGED extension header was found. */
115 	int ule_sndu_remain;			/* Nr. of bytes still required for current ULE SNDU. */
116 	unsigned long ts_count;			/* Current ts cell counter. */
117 	struct mutex mutex;
118 };
119 
120 
121 /*
122  *	Determine the packet's protocol ID. The rule here is that we
123  *	assume 802.3 if the type field is short enough to be a length.
124  *	This is normal practice and works for any 'now in use' protocol.
125  *
126  *  stolen from eth.c out of the linux kernel, hacked for dvb-device
127  *  by Michael Holzt <kju@debian.org>
128  */
129 static __be16 dvb_net_eth_type_trans(struct sk_buff *skb,
130 				      struct net_device *dev)
131 {
132 	struct ethhdr *eth;
133 	unsigned char *rawp;
134 
135 	skb_reset_mac_header(skb);
136 	skb_pull(skb,dev->hard_header_len);
137 	eth = eth_hdr(skb);
138 
139 	if (*eth->h_dest & 1) {
140 		if(ether_addr_equal(eth->h_dest,dev->broadcast))
141 			skb->pkt_type=PACKET_BROADCAST;
142 		else
143 			skb->pkt_type=PACKET_MULTICAST;
144 	}
145 
146 	if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
147 		return eth->h_proto;
148 
149 	rawp = skb->data;
150 
151 	/*
152 	 *	This is a magic hack to spot IPX packets. Older Novell breaks
153 	 *	the protocol design and runs IPX over 802.3 without an 802.2 LLC
154 	 *	layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
155 	 *	won't work for fault tolerant netware but does for the rest.
156 	 */
157 	if (*(unsigned short *)rawp == 0xFFFF)
158 		return htons(ETH_P_802_3);
159 
160 	/*
161 	 *	Real 802.2 LLC
162 	 */
163 	return htons(ETH_P_802_2);
164 }
165 
166 #define TS_SZ	188
167 #define TS_SYNC	0x47
168 #define TS_TEI	0x80
169 #define TS_SC	0xC0
170 #define TS_PUSI	0x40
171 #define TS_AF_A	0x20
172 #define TS_AF_D	0x10
173 
174 /* ULE Extension Header handlers. */
175 
176 #define ULE_TEST	0
177 #define ULE_BRIDGED	1
178 
179 #define ULE_OPTEXTHDR_PADDING 0
180 
181 static int ule_test_sndu( struct dvb_net_priv *p )
182 {
183 	return -1;
184 }
185 
186 static int ule_bridged_sndu( struct dvb_net_priv *p )
187 {
188 	struct ethhdr *hdr = (struct ethhdr*) p->ule_next_hdr;
189 	if(ntohs(hdr->h_proto) < ETH_P_802_3_MIN) {
190 		int framelen = p->ule_sndu_len - ((p->ule_next_hdr+sizeof(struct ethhdr)) - p->ule_skb->data);
191 		/* A frame Type < ETH_P_802_3_MIN for a bridged frame, introduces a LLC Length field. */
192 		if(framelen != ntohs(hdr->h_proto)) {
193 			return -1;
194 		}
195 	}
196 	/* Note:
197 	 * From RFC4326:
198 	 *  "A bridged SNDU is a Mandatory Extension Header of Type 1.
199 	 *   It must be the final (or only) extension header specified in the header chain of a SNDU."
200 	 * The 'ule_bridged' flag will cause the extension header processing loop to terminate.
201 	 */
202 	p->ule_bridged = 1;
203 	return 0;
204 }
205 
206 static int ule_exthdr_padding(struct dvb_net_priv *p)
207 {
208 	return 0;
209 }
210 
211 /*
212  * Handle ULE extension headers.
213  *  Function is called after a successful CRC32 verification of an ULE SNDU to complete its decoding.
214  *  Returns: >= 0: nr. of bytes consumed by next extension header
215  *	     -1:   Mandatory extension header that is not recognized or TEST SNDU; discard.
216  */
217 static int handle_one_ule_extension( struct dvb_net_priv *p )
218 {
219 	/* Table of mandatory extension header handlers.  The header type is the index. */
220 	static int (*ule_mandatory_ext_handlers[255])( struct dvb_net_priv *p ) =
221 		{ [0] = ule_test_sndu, [1] = ule_bridged_sndu, [2] = NULL,  };
222 
223 	/* Table of optional extension header handlers.  The header type is the index. */
224 	static int (*ule_optional_ext_handlers[255])( struct dvb_net_priv *p ) =
225 		{ [0] = ule_exthdr_padding, [1] = NULL, };
226 
227 	int ext_len = 0;
228 	unsigned char hlen = (p->ule_sndu_type & 0x0700) >> 8;
229 	unsigned char htype = p->ule_sndu_type & 0x00FF;
230 
231 	/* Discriminate mandatory and optional extension headers. */
232 	if (hlen == 0) {
233 		/* Mandatory extension header */
234 		if (ule_mandatory_ext_handlers[htype]) {
235 			ext_len = ule_mandatory_ext_handlers[htype]( p );
236 			if(ext_len >= 0) {
237 				p->ule_next_hdr += ext_len;
238 				if (!p->ule_bridged) {
239 					p->ule_sndu_type = ntohs(*(__be16 *)p->ule_next_hdr);
240 					p->ule_next_hdr += 2;
241 				} else {
242 					p->ule_sndu_type = ntohs(*(__be16 *)(p->ule_next_hdr + ((p->ule_dbit ? 2 : 3) * ETH_ALEN)));
243 					/* This assures the extension handling loop will terminate. */
244 				}
245 			}
246 			// else: extension handler failed or SNDU should be discarded
247 		} else
248 			ext_len = -1;	/* SNDU has to be discarded. */
249 	} else {
250 		/* Optional extension header.  Calculate the length. */
251 		ext_len = hlen << 1;
252 		/* Process the optional extension header according to its type. */
253 		if (ule_optional_ext_handlers[htype])
254 			(void)ule_optional_ext_handlers[htype]( p );
255 		p->ule_next_hdr += ext_len;
256 		p->ule_sndu_type = ntohs( *(__be16 *)(p->ule_next_hdr-2) );
257 		/*
258 		 * note: the length of the next header type is included in the
259 		 * length of THIS optional extension header
260 		 */
261 	}
262 
263 	return ext_len;
264 }
265 
266 static int handle_ule_extensions( struct dvb_net_priv *p )
267 {
268 	int total_ext_len = 0, l;
269 
270 	p->ule_next_hdr = p->ule_skb->data;
271 	do {
272 		l = handle_one_ule_extension( p );
273 		if (l < 0)
274 			return l;	/* Stop extension header processing and discard SNDU. */
275 		total_ext_len += l;
276 		pr_debug("ule_next_hdr=%p, ule_sndu_type=%i, l=%i, total_ext_len=%i\n",
277 			 p->ule_next_hdr, (int)p->ule_sndu_type,
278 			 l, total_ext_len);
279 
280 	} while (p->ule_sndu_type < ETH_P_802_3_MIN);
281 
282 	return total_ext_len;
283 }
284 
285 
286 /* Prepare for a new ULE SNDU: reset the decoder state. */
287 static inline void reset_ule( struct dvb_net_priv *p )
288 {
289 	p->ule_skb = NULL;
290 	p->ule_next_hdr = NULL;
291 	p->ule_sndu_len = 0;
292 	p->ule_sndu_type = 0;
293 	p->ule_sndu_type_1 = 0;
294 	p->ule_sndu_remain = 0;
295 	p->ule_dbit = 0xFF;
296 	p->ule_bridged = 0;
297 }
298 
299 /*
300  * Decode ULE SNDUs according to draft-ietf-ipdvb-ule-03.txt from a sequence of
301  * TS cells of a single PID.
302  */
303 
304 struct dvb_net_ule_handle {
305 	struct net_device *dev;
306 	struct dvb_net_priv *priv;
307 	struct ethhdr *ethh;
308 	const u8 *buf;
309 	size_t buf_len;
310 	unsigned long skipped;
311 	const u8 *ts, *ts_end, *from_where;
312 	u8 ts_remain, how_much, new_ts;
313 	bool error;
314 };
315 
316 static int dvb_net_ule_new_ts_cell(struct dvb_net_ule_handle *h)
317 {
318 	/* We are about to process a new TS cell. */
319 
320 #ifdef DVB_ULE_DEBUG
321 	if (ule_where >= &ule_hist[100*TS_SZ])
322 		ule_where = ule_hist;
323 	memcpy(ule_where, h->ts, TS_SZ);
324 	if (ule_dump) {
325 		hexdump(ule_where, TS_SZ);
326 		ule_dump = 0;
327 	}
328 	ule_where += TS_SZ;
329 #endif
330 
331 	/*
332 	 * Check TS h->error conditions: sync_byte, transport_error_indicator,
333 	 * scrambling_control .
334 	 */
335 	if ((h->ts[0] != TS_SYNC) || (h->ts[1] & TS_TEI) ||
336 	    ((h->ts[3] & TS_SC) != 0)) {
337 		pr_warn("%lu: Invalid TS cell: SYNC %#x, TEI %u, SC %#x.\n",
338 			h->priv->ts_count, h->ts[0],
339 			(h->ts[1] & TS_TEI) >> 7,
340 			(h->ts[3] & TS_SC) >> 6);
341 
342 		/* Drop partly decoded SNDU, reset state, resync on PUSI. */
343 		if (h->priv->ule_skb) {
344 			dev_kfree_skb(h->priv->ule_skb);
345 			/* Prepare for next SNDU. */
346 			h->dev->stats.rx_errors++;
347 			h->dev->stats.rx_frame_errors++;
348 		}
349 		reset_ule(h->priv);
350 		h->priv->need_pusi = 1;
351 
352 		/* Continue with next TS cell. */
353 		h->ts += TS_SZ;
354 		h->priv->ts_count++;
355 		return 1;
356 	}
357 
358 	h->ts_remain = 184;
359 	h->from_where = h->ts + 4;
360 
361 	return 0;
362 }
363 
364 static int dvb_net_ule_ts_pusi(struct dvb_net_ule_handle *h)
365 {
366 	if (h->ts[1] & TS_PUSI) {
367 		/* Find beginning of first ULE SNDU in current TS cell. */
368 		/* Synchronize continuity counter. */
369 		h->priv->tscc = h->ts[3] & 0x0F;
370 		/* There is a pointer field here. */
371 		if (h->ts[4] > h->ts_remain) {
372 			pr_err("%lu: Invalid ULE packet (pointer field %d)\n",
373 				h->priv->ts_count, h->ts[4]);
374 			h->ts += TS_SZ;
375 			h->priv->ts_count++;
376 			return 1;
377 		}
378 		/* Skip to destination of pointer field. */
379 		h->from_where = &h->ts[5] + h->ts[4];
380 		h->ts_remain -= 1 + h->ts[4];
381 		h->skipped = 0;
382 	} else {
383 		h->skipped++;
384 		h->ts += TS_SZ;
385 		h->priv->ts_count++;
386 		return 1;
387 	}
388 
389 	return 0;
390 }
391 
392 static int dvb_net_ule_new_ts(struct dvb_net_ule_handle *h)
393 {
394 	/* Check continuity counter. */
395 	if ((h->ts[3] & 0x0F) == h->priv->tscc)
396 		h->priv->tscc = (h->priv->tscc + 1) & 0x0F;
397 	else {
398 		/* TS discontinuity handling: */
399 		pr_warn("%lu: TS discontinuity: got %#x, expected %#x.\n",
400 			h->priv->ts_count, h->ts[3] & 0x0F,
401 			h->priv->tscc);
402 		/* Drop partly decoded SNDU, reset state, resync on PUSI. */
403 		if (h->priv->ule_skb) {
404 			dev_kfree_skb(h->priv->ule_skb);
405 			/* Prepare for next SNDU. */
406 			// reset_ule(h->priv);  moved to below.
407 			h->dev->stats.rx_errors++;
408 			h->dev->stats.rx_frame_errors++;
409 		}
410 		reset_ule(h->priv);
411 		/* skip to next PUSI. */
412 		h->priv->need_pusi = 1;
413 		return 1;
414 	}
415 	/*
416 	 * If we still have an incomplete payload, but PUSI is
417 	 * set; some TS cells are missing.
418 	 * This is only possible here, if we missed exactly 16 TS
419 	 * cells (continuity counter wrap).
420 	 */
421 	if (h->ts[1] & TS_PUSI) {
422 		if (!h->priv->need_pusi) {
423 			if (!(*h->from_where < (h->ts_remain-1)) ||
424 			    *h->from_where != h->priv->ule_sndu_remain) {
425 				/*
426 				 * Pointer field is invalid.
427 				 * Drop this TS cell and any started ULE SNDU.
428 				 */
429 				pr_warn("%lu: Invalid pointer field: %u.\n",
430 					h->priv->ts_count,
431 					*h->from_where);
432 
433 				/*
434 				 * Drop partly decoded SNDU, reset state,
435 				 * resync on PUSI.
436 				 */
437 				if (h->priv->ule_skb) {
438 					h->error = true;
439 					dev_kfree_skb(h->priv->ule_skb);
440 				}
441 
442 				if (h->error || h->priv->ule_sndu_remain) {
443 					h->dev->stats.rx_errors++;
444 					h->dev->stats.rx_frame_errors++;
445 					h->error = false;
446 				}
447 
448 				reset_ule(h->priv);
449 				h->priv->need_pusi = 1;
450 				return 1;
451 			}
452 			/*
453 			 * Skip pointer field (we're processing a
454 			 * packed payload).
455 			 */
456 			h->from_where += 1;
457 			h->ts_remain -= 1;
458 		} else
459 			h->priv->need_pusi = 0;
460 
461 		if (h->priv->ule_sndu_remain > 183) {
462 			/*
463 			 * Current SNDU lacks more data than there
464 			 * could be available in the current TS cell.
465 			 */
466 			h->dev->stats.rx_errors++;
467 			h->dev->stats.rx_length_errors++;
468 			pr_warn("%lu: Expected %d more SNDU bytes, but got PUSI (pf %d, h->ts_remain %d).  Flushing incomplete payload.\n",
469 				h->priv->ts_count,
470 				h->priv->ule_sndu_remain,
471 				h->ts[4], h->ts_remain);
472 			dev_kfree_skb(h->priv->ule_skb);
473 			/* Prepare for next SNDU. */
474 			reset_ule(h->priv);
475 			/*
476 			 * Resync: go to where pointer field points to:
477 			 * start of next ULE SNDU.
478 			 */
479 			h->from_where += h->ts[4];
480 			h->ts_remain -= h->ts[4];
481 		}
482 	}
483 	return 0;
484 }
485 
486 
487 /*
488  * Start a new payload with skb.
489  * Find ULE header.  It is only guaranteed that the
490  * length field (2 bytes) is contained in the current
491  * TS.
492  * Check h.ts_remain has to be >= 2 here.
493  */
494 static int dvb_net_ule_new_payload(struct dvb_net_ule_handle *h)
495 {
496 	if (h->ts_remain < 2) {
497 		pr_warn("Invalid payload packing: only %d bytes left in TS.  Resyncing.\n",
498 			h->ts_remain);
499 		h->priv->ule_sndu_len = 0;
500 		h->priv->need_pusi = 1;
501 		h->ts += TS_SZ;
502 		return 1;
503 	}
504 
505 	if (!h->priv->ule_sndu_len) {
506 		/* Got at least two bytes, thus extrace the SNDU length. */
507 		h->priv->ule_sndu_len = h->from_where[0] << 8 |
508 					h->from_where[1];
509 		if (h->priv->ule_sndu_len & 0x8000) {
510 			/* D-Bit is set: no dest mac present. */
511 			h->priv->ule_sndu_len &= 0x7FFF;
512 			h->priv->ule_dbit = 1;
513 		} else
514 			h->priv->ule_dbit = 0;
515 
516 		if (h->priv->ule_sndu_len < 5) {
517 			pr_warn("%lu: Invalid ULE SNDU length %u. Resyncing.\n",
518 				h->priv->ts_count,
519 				h->priv->ule_sndu_len);
520 			h->dev->stats.rx_errors++;
521 			h->dev->stats.rx_length_errors++;
522 			h->priv->ule_sndu_len = 0;
523 			h->priv->need_pusi = 1;
524 			h->new_ts = 1;
525 			h->ts += TS_SZ;
526 			h->priv->ts_count++;
527 			return 1;
528 		}
529 		h->ts_remain -= 2;	/* consume the 2 bytes SNDU length. */
530 		h->from_where += 2;
531 	}
532 
533 	h->priv->ule_sndu_remain = h->priv->ule_sndu_len + 2;
534 	/*
535 	 * State of current TS:
536 	 *   h->ts_remain (remaining bytes in the current TS cell)
537 	 *   0	ule_type is not available now, we need the next TS cell
538 	 *   1	the first byte of the ule_type is present
539 	 * >=2	full ULE header present, maybe some payload data as well.
540 	 */
541 	switch (h->ts_remain) {
542 	case 1:
543 		h->priv->ule_sndu_remain--;
544 		h->priv->ule_sndu_type = h->from_where[0] << 8;
545 
546 		/* first byte of ule_type is set. */
547 		h->priv->ule_sndu_type_1 = 1;
548 		h->ts_remain -= 1;
549 		h->from_where += 1;
550 		fallthrough;
551 	case 0:
552 		h->new_ts = 1;
553 		h->ts += TS_SZ;
554 		h->priv->ts_count++;
555 		return 1;
556 
557 	default: /* complete ULE header is present in current TS. */
558 		/* Extract ULE type field. */
559 		if (h->priv->ule_sndu_type_1) {
560 			h->priv->ule_sndu_type_1 = 0;
561 			h->priv->ule_sndu_type |= h->from_where[0];
562 			h->from_where += 1; /* points to payload start. */
563 			h->ts_remain -= 1;
564 		} else {
565 			/* Complete type is present in new TS. */
566 			h->priv->ule_sndu_type = h->from_where[0] << 8 |
567 						 h->from_where[1];
568 			h->from_where += 2; /* points to payload start. */
569 			h->ts_remain -= 2;
570 		}
571 		break;
572 	}
573 
574 	/*
575 	 * Allocate the skb (decoder target buffer) with the correct size,
576 	 * as follows:
577 	 *
578 	 * prepare for the largest case: bridged SNDU with MAC address
579 	 * (dbit = 0).
580 	 */
581 	h->priv->ule_skb = dev_alloc_skb(h->priv->ule_sndu_len +
582 					 ETH_HLEN + ETH_ALEN);
583 	if (!h->priv->ule_skb) {
584 		pr_notice("%s: Memory squeeze, dropping packet.\n",
585 			  h->dev->name);
586 		h->dev->stats.rx_dropped++;
587 		return -1;
588 	}
589 
590 	/* This includes the CRC32 _and_ dest mac, if !dbit. */
591 	h->priv->ule_sndu_remain = h->priv->ule_sndu_len;
592 	h->priv->ule_skb->dev = h->dev;
593 	/*
594 	 * Leave space for Ethernet or bridged SNDU header
595 	 * (eth hdr plus one MAC addr).
596 	 */
597 	skb_reserve(h->priv->ule_skb, ETH_HLEN + ETH_ALEN);
598 
599 	return 0;
600 }
601 
602 
603 static int dvb_net_ule_should_drop(struct dvb_net_ule_handle *h)
604 {
605 	static const u8 bc_addr[ETH_ALEN] = { [0 ... ETH_ALEN - 1] = 0xff };
606 
607 	/*
608 	 * The destination MAC address is the next data in the skb.  It comes
609 	 * before any extension headers.
610 	 *
611 	 * Check if the payload of this SNDU should be passed up the stack.
612 	 */
613 	if (h->priv->rx_mode == RX_MODE_PROMISC)
614 		return 0;
615 
616 	if (h->priv->ule_skb->data[0] & 0x01) {
617 		/* multicast or broadcast */
618 		if (!ether_addr_equal(h->priv->ule_skb->data, bc_addr)) {
619 			/* multicast */
620 			if (h->priv->rx_mode == RX_MODE_MULTI) {
621 				int i;
622 
623 				for (i = 0; i < h->priv->multi_num &&
624 				     !ether_addr_equal(h->priv->ule_skb->data,
625 						       h->priv->multi_macs[i]);
626 				     i++)
627 					;
628 				if (i == h->priv->multi_num)
629 					return 1;
630 			} else if (h->priv->rx_mode != RX_MODE_ALL_MULTI)
631 				return 1; /* no broadcast; */
632 			/*
633 			 * else:
634 			 * all multicast mode: accept all multicast packets
635 			 */
636 		}
637 		/* else: broadcast */
638 	} else if (!ether_addr_equal(h->priv->ule_skb->data, h->dev->dev_addr))
639 		return 1;
640 
641 	return 0;
642 }
643 
644 
645 static void dvb_net_ule_check_crc(struct dvb_net_ule_handle *h,
646 				  struct kvec iov[3],
647 				  u32 ule_crc, u32 expected_crc)
648 {
649 	u8 dest_addr[ETH_ALEN];
650 
651 	if (ule_crc != expected_crc) {
652 		pr_warn("%lu: CRC32 check FAILED: %08x / %08x, SNDU len %d type %#x, ts_remain %d, next 2: %x.\n",
653 			h->priv->ts_count, ule_crc, expected_crc,
654 			h->priv->ule_sndu_len, h->priv->ule_sndu_type,
655 			h->ts_remain,
656 			h->ts_remain > 2 ?
657 				*(unsigned short *)h->from_where : 0);
658 
659 	#ifdef DVB_ULE_DEBUG
660 		hexdump(iov[0].iov_base, iov[0].iov_len);
661 		hexdump(iov[1].iov_base, iov[1].iov_len);
662 		hexdump(iov[2].iov_base, iov[2].iov_len);
663 
664 		if (ule_where == ule_hist) {
665 			hexdump(&ule_hist[98*TS_SZ], TS_SZ);
666 			hexdump(&ule_hist[99*TS_SZ], TS_SZ);
667 		} else if (ule_where == &ule_hist[TS_SZ]) {
668 			hexdump(&ule_hist[99*TS_SZ], TS_SZ);
669 			hexdump(ule_hist, TS_SZ);
670 		} else {
671 			hexdump(ule_where - TS_SZ - TS_SZ, TS_SZ);
672 			hexdump(ule_where - TS_SZ, TS_SZ);
673 		}
674 		ule_dump = 1;
675 	#endif
676 
677 		h->dev->stats.rx_errors++;
678 		h->dev->stats.rx_crc_errors++;
679 		dev_kfree_skb(h->priv->ule_skb);
680 
681 		return;
682 	}
683 
684 	/* CRC32 verified OK. */
685 
686 	/* CRC32 was OK, so remove it from skb. */
687 	h->priv->ule_skb->tail -= 4;
688 	h->priv->ule_skb->len -= 4;
689 
690 	if (!h->priv->ule_dbit) {
691 		if (dvb_net_ule_should_drop(h)) {
692 			netdev_dbg(h->dev,
693 				   "Dropping SNDU: MAC destination address does not match: dest addr: %pM, h->dev addr: %pM\n",
694 				   h->priv->ule_skb->data, h->dev->dev_addr);
695 			dev_kfree_skb(h->priv->ule_skb);
696 			return;
697 		}
698 
699 		skb_copy_from_linear_data(h->priv->ule_skb, dest_addr,
700 					  ETH_ALEN);
701 		skb_pull(h->priv->ule_skb, ETH_ALEN);
702 	} else {
703 		/* dest_addr buffer is only valid if h->priv->ule_dbit == 0 */
704 		eth_zero_addr(dest_addr);
705 	}
706 
707 	/* Handle ULE Extension Headers. */
708 	if (h->priv->ule_sndu_type < ETH_P_802_3_MIN) {
709 		/* There is an extension header.  Handle it accordingly. */
710 		int l = handle_ule_extensions(h->priv);
711 
712 		if (l < 0) {
713 			/*
714 			 * Mandatory extension header unknown or TEST SNDU.
715 			 * Drop it.
716 			 */
717 
718 			// pr_warn("Dropping SNDU, extension headers.\n" );
719 			dev_kfree_skb(h->priv->ule_skb);
720 			return;
721 		}
722 		skb_pull(h->priv->ule_skb, l);
723 	}
724 
725 	/*
726 	 * Construct/assure correct ethernet header.
727 	 * Note: in bridged mode (h->priv->ule_bridged != 0)
728 	 * we already have the (original) ethernet
729 	 * header at the start of the payload (after
730 	 * optional dest. address and any extension
731 	 * headers).
732 	 */
733 	if (!h->priv->ule_bridged) {
734 		skb_push(h->priv->ule_skb, ETH_HLEN);
735 		h->ethh = (struct ethhdr *)h->priv->ule_skb->data;
736 		memcpy(h->ethh->h_dest, dest_addr, ETH_ALEN);
737 		eth_zero_addr(h->ethh->h_source);
738 		h->ethh->h_proto = htons(h->priv->ule_sndu_type);
739 	}
740 	/* else:  skb is in correct state; nothing to do. */
741 	h->priv->ule_bridged = 0;
742 
743 	/* Stuff into kernel's protocol stack. */
744 	h->priv->ule_skb->protocol = dvb_net_eth_type_trans(h->priv->ule_skb,
745 							   h->dev);
746 	/*
747 	 * If D-bit is set (i.e. destination MAC address not present),
748 	 * receive the packet anyhow.
749 	 */
750 #if 0
751 	if (h->priv->ule_dbit && skb->pkt_type == PACKET_OTHERHOST)
752 		h->priv->ule_skb->pkt_type = PACKET_HOST;
753 #endif
754 	h->dev->stats.rx_packets++;
755 	h->dev->stats.rx_bytes += h->priv->ule_skb->len;
756 	netif_rx(h->priv->ule_skb);
757 }
758 
759 static void dvb_net_ule(struct net_device *dev, const u8 *buf, size_t buf_len)
760 {
761 	int ret;
762 	struct dvb_net_ule_handle h = {
763 		.dev = dev,
764 		.priv = netdev_priv(dev),
765 		.ethh = NULL,
766 		.buf = buf,
767 		.buf_len = buf_len,
768 		.skipped = 0L,
769 		.ts = NULL,
770 		.ts_end = NULL,
771 		.from_where = NULL,
772 		.ts_remain = 0,
773 		.how_much = 0,
774 		.new_ts = 1,
775 		.error = false,
776 	};
777 
778 	/*
779 	 * For all TS cells in current buffer.
780 	 * Appearently, we are called for every single TS cell.
781 	 */
782 	for (h.ts = h.buf, h.ts_end = h.buf + h.buf_len;
783 	     h.ts < h.ts_end; /* no incr. */) {
784 		if (h.new_ts) {
785 			/* We are about to process a new TS cell. */
786 			if (dvb_net_ule_new_ts_cell(&h))
787 				continue;
788 		}
789 
790 		/* Synchronize on PUSI, if required. */
791 		if (h.priv->need_pusi) {
792 			if (dvb_net_ule_ts_pusi(&h))
793 				continue;
794 		}
795 
796 		if (h.new_ts) {
797 			if (dvb_net_ule_new_ts(&h))
798 				continue;
799 		}
800 
801 		/* Check if new payload needs to be started. */
802 		if (h.priv->ule_skb == NULL) {
803 			ret = dvb_net_ule_new_payload(&h);
804 			if (ret < 0)
805 				return;
806 			if (ret)
807 				continue;
808 		}
809 
810 		/* Copy data into our current skb. */
811 		h.how_much = min(h.priv->ule_sndu_remain, (int)h.ts_remain);
812 		skb_put_data(h.priv->ule_skb, h.from_where, h.how_much);
813 		h.priv->ule_sndu_remain -= h.how_much;
814 		h.ts_remain -= h.how_much;
815 		h.from_where += h.how_much;
816 
817 		/* Check for complete payload. */
818 		if (h.priv->ule_sndu_remain <= 0) {
819 			/* Check CRC32, we've got it in our skb already. */
820 			__be16 ulen = htons(h.priv->ule_sndu_len);
821 			__be16 utype = htons(h.priv->ule_sndu_type);
822 			const u8 *tail;
823 			struct kvec iov[3] = {
824 				{ &ulen, sizeof ulen },
825 				{ &utype, sizeof utype },
826 				{ h.priv->ule_skb->data,
827 				  h.priv->ule_skb->len - 4 }
828 			};
829 			u32 ule_crc = ~0L, expected_crc;
830 			if (h.priv->ule_dbit) {
831 				/* Set D-bit for CRC32 verification,
832 				 * if it was set originally. */
833 				ulen |= htons(0x8000);
834 			}
835 
836 			ule_crc = iov_crc32(ule_crc, iov, 3);
837 			tail = skb_tail_pointer(h.priv->ule_skb);
838 			expected_crc = *(tail - 4) << 24 |
839 				       *(tail - 3) << 16 |
840 				       *(tail - 2) << 8 |
841 				       *(tail - 1);
842 
843 			dvb_net_ule_check_crc(&h, iov, ule_crc, expected_crc);
844 
845 			/* Prepare for next SNDU. */
846 			reset_ule(h.priv);
847 		}
848 
849 		/* More data in current TS (look at the bytes following the CRC32)? */
850 		if (h.ts_remain >= 2 && *((unsigned short *)h.from_where) != 0xFFFF) {
851 			/* Next ULE SNDU starts right there. */
852 			h.new_ts = 0;
853 			h.priv->ule_skb = NULL;
854 			h.priv->ule_sndu_type_1 = 0;
855 			h.priv->ule_sndu_len = 0;
856 			// pr_warn("More data in current TS: [%#x %#x %#x %#x]\n",
857 			//	*(h.from_where + 0), *(h.from_where + 1),
858 			//	*(h.from_where + 2), *(h.from_where + 3));
859 			// pr_warn("h.ts @ %p, stopped @ %p:\n", h.ts, h.from_where + 0);
860 			// hexdump(h.ts, 188);
861 		} else {
862 			h.new_ts = 1;
863 			h.ts += TS_SZ;
864 			h.priv->ts_count++;
865 			if (h.priv->ule_skb == NULL) {
866 				h.priv->need_pusi = 1;
867 				h.priv->ule_sndu_type_1 = 0;
868 				h.priv->ule_sndu_len = 0;
869 			}
870 		}
871 	}	/* for all available TS cells */
872 }
873 
874 static int dvb_net_ts_callback(const u8 *buffer1, size_t buffer1_len,
875 			       const u8 *buffer2, size_t buffer2_len,
876 			       struct dmx_ts_feed *feed,
877 			       u32 *buffer_flags)
878 {
879 	struct net_device *dev = feed->priv;
880 
881 	if (buffer2)
882 		pr_warn("buffer2 not NULL: %p.\n", buffer2);
883 	if (buffer1_len > 32768)
884 		pr_warn("length > 32k: %zu.\n", buffer1_len);
885 	/* pr_info("TS callback: %u bytes, %u TS cells @ %p.\n",
886 		  buffer1_len, buffer1_len / TS_SZ, buffer1); */
887 	dvb_net_ule(dev, buffer1, buffer1_len);
888 	return 0;
889 }
890 
891 
892 static void dvb_net_sec(struct net_device *dev,
893 			const u8 *pkt, int pkt_len)
894 {
895 	u8 *eth;
896 	struct sk_buff *skb;
897 	struct net_device_stats *stats = &dev->stats;
898 	int snap = 0;
899 
900 	/* note: pkt_len includes a 32bit checksum */
901 	if (pkt_len < 16) {
902 		pr_warn("%s: IP/MPE packet length = %d too small.\n",
903 			dev->name, pkt_len);
904 		stats->rx_errors++;
905 		stats->rx_length_errors++;
906 		return;
907 	}
908 /* it seems some ISPs manage to screw up here, so we have to
909  * relax the error checks... */
910 #if 0
911 	if ((pkt[5] & 0xfd) != 0xc1) {
912 		/* drop scrambled or broken packets */
913 #else
914 	if ((pkt[5] & 0x3c) != 0x00) {
915 		/* drop scrambled */
916 #endif
917 		stats->rx_errors++;
918 		stats->rx_crc_errors++;
919 		return;
920 	}
921 	if (pkt[5] & 0x02) {
922 		/* handle LLC/SNAP, see rfc-1042 */
923 		if (pkt_len < 24 || memcmp(&pkt[12], "\xaa\xaa\x03\0\0\0", 6)) {
924 			stats->rx_dropped++;
925 			return;
926 		}
927 		snap = 8;
928 	}
929 	if (pkt[7]) {
930 		/* FIXME: assemble datagram from multiple sections */
931 		stats->rx_errors++;
932 		stats->rx_frame_errors++;
933 		return;
934 	}
935 
936 	/* we have 14 byte ethernet header (ip header follows);
937 	 * 12 byte MPE header; 4 byte checksum; + 2 byte alignment, 8 byte LLC/SNAP
938 	 */
939 	if (!(skb = dev_alloc_skb(pkt_len - 4 - 12 + 14 + 2 - snap))) {
940 		//pr_notice("%s: Memory squeeze, dropping packet.\n", dev->name);
941 		stats->rx_dropped++;
942 		return;
943 	}
944 	skb_reserve(skb, 2);    /* longword align L3 header */
945 	skb->dev = dev;
946 
947 	/* copy L3 payload */
948 	eth = skb_put(skb, pkt_len - 12 - 4 + 14 - snap);
949 	memcpy(eth + 14, pkt + 12 + snap, pkt_len - 12 - 4 - snap);
950 
951 	/* create ethernet header: */
952 	eth[0]=pkt[0x0b];
953 	eth[1]=pkt[0x0a];
954 	eth[2]=pkt[0x09];
955 	eth[3]=pkt[0x08];
956 	eth[4]=pkt[0x04];
957 	eth[5]=pkt[0x03];
958 
959 	eth[6]=eth[7]=eth[8]=eth[9]=eth[10]=eth[11]=0;
960 
961 	if (snap) {
962 		eth[12] = pkt[18];
963 		eth[13] = pkt[19];
964 	} else {
965 		/* protocol numbers are from rfc-1700 or
966 		 * http://www.iana.org/assignments/ethernet-numbers
967 		 */
968 		if (pkt[12] >> 4 == 6) { /* version field from IP header */
969 			eth[12] = 0x86;	/* IPv6 */
970 			eth[13] = 0xdd;
971 		} else {
972 			eth[12] = 0x08;	/* IPv4 */
973 			eth[13] = 0x00;
974 		}
975 	}
976 
977 	skb->protocol = dvb_net_eth_type_trans(skb, dev);
978 
979 	stats->rx_packets++;
980 	stats->rx_bytes+=skb->len;
981 	netif_rx(skb);
982 }
983 
984 static int dvb_net_sec_callback(const u8 *buffer1, size_t buffer1_len,
985 		 const u8 *buffer2, size_t buffer2_len,
986 		 struct dmx_section_filter *filter, u32 *buffer_flags)
987 {
988 	struct net_device *dev = filter->priv;
989 
990 	/*
991 	 * we rely on the DVB API definition where exactly one complete
992 	 * section is delivered in buffer1
993 	 */
994 	dvb_net_sec (dev, buffer1, buffer1_len);
995 	return 0;
996 }
997 
998 static netdev_tx_t dvb_net_tx(struct sk_buff *skb, struct net_device *dev)
999 {
1000 	dev_kfree_skb(skb);
1001 	return NETDEV_TX_OK;
1002 }
1003 
1004 static u8 mask_normal[6]={0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1005 static u8 mask_allmulti[6]={0xff, 0xff, 0xff, 0x00, 0x00, 0x00};
1006 static u8 mac_allmulti[6]={0x01, 0x00, 0x5e, 0x00, 0x00, 0x00};
1007 static u8 mask_promisc[6]={0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1008 
1009 static int dvb_net_filter_sec_set(struct net_device *dev,
1010 		   struct dmx_section_filter **secfilter,
1011 		   const u8 *mac, u8 *mac_mask)
1012 {
1013 	struct dvb_net_priv *priv = netdev_priv(dev);
1014 	int ret;
1015 
1016 	*secfilter=NULL;
1017 	ret = priv->secfeed->allocate_filter(priv->secfeed, secfilter);
1018 	if (ret<0) {
1019 		pr_err("%s: could not get filter\n", dev->name);
1020 		return ret;
1021 	}
1022 
1023 	(*secfilter)->priv=(void *) dev;
1024 
1025 	memset((*secfilter)->filter_value, 0x00, DMX_MAX_FILTER_SIZE);
1026 	memset((*secfilter)->filter_mask,  0x00, DMX_MAX_FILTER_SIZE);
1027 	memset((*secfilter)->filter_mode,  0xff, DMX_MAX_FILTER_SIZE);
1028 
1029 	(*secfilter)->filter_value[0]=0x3e;
1030 	(*secfilter)->filter_value[3]=mac[5];
1031 	(*secfilter)->filter_value[4]=mac[4];
1032 	(*secfilter)->filter_value[8]=mac[3];
1033 	(*secfilter)->filter_value[9]=mac[2];
1034 	(*secfilter)->filter_value[10]=mac[1];
1035 	(*secfilter)->filter_value[11]=mac[0];
1036 
1037 	(*secfilter)->filter_mask[0] = 0xff;
1038 	(*secfilter)->filter_mask[3] = mac_mask[5];
1039 	(*secfilter)->filter_mask[4] = mac_mask[4];
1040 	(*secfilter)->filter_mask[8] = mac_mask[3];
1041 	(*secfilter)->filter_mask[9] = mac_mask[2];
1042 	(*secfilter)->filter_mask[10] = mac_mask[1];
1043 	(*secfilter)->filter_mask[11]=mac_mask[0];
1044 
1045 	netdev_dbg(dev, "filter mac=%pM mask=%pM\n", mac, mac_mask);
1046 
1047 	return 0;
1048 }
1049 
1050 static int dvb_net_feed_start(struct net_device *dev)
1051 {
1052 	int ret = 0, i;
1053 	struct dvb_net_priv *priv = netdev_priv(dev);
1054 	struct dmx_demux *demux = priv->demux;
1055 	const unsigned char *mac = (const unsigned char *) dev->dev_addr;
1056 
1057 	netdev_dbg(dev, "rx_mode %i\n", priv->rx_mode);
1058 	mutex_lock(&priv->mutex);
1059 	if (priv->tsfeed || priv->secfeed || priv->secfilter || priv->multi_secfilter[0])
1060 		pr_err("%s: BUG %d\n", __func__, __LINE__);
1061 
1062 	priv->secfeed=NULL;
1063 	priv->secfilter=NULL;
1064 	priv->tsfeed = NULL;
1065 
1066 	if (priv->feedtype == DVB_NET_FEEDTYPE_MPE) {
1067 		netdev_dbg(dev, "alloc secfeed\n");
1068 		ret=demux->allocate_section_feed(demux, &priv->secfeed,
1069 					 dvb_net_sec_callback);
1070 		if (ret<0) {
1071 			pr_err("%s: could not allocate section feed\n",
1072 			       dev->name);
1073 			goto error;
1074 		}
1075 
1076 		ret = priv->secfeed->set(priv->secfeed, priv->pid, 1);
1077 
1078 		if (ret<0) {
1079 			pr_err("%s: could not set section feed\n", dev->name);
1080 			priv->demux->release_section_feed(priv->demux, priv->secfeed);
1081 			priv->secfeed=NULL;
1082 			goto error;
1083 		}
1084 
1085 		if (priv->rx_mode != RX_MODE_PROMISC) {
1086 			netdev_dbg(dev, "set secfilter\n");
1087 			dvb_net_filter_sec_set(dev, &priv->secfilter, mac, mask_normal);
1088 		}
1089 
1090 		switch (priv->rx_mode) {
1091 		case RX_MODE_MULTI:
1092 			for (i = 0; i < priv->multi_num; i++) {
1093 				netdev_dbg(dev, "set multi_secfilter[%d]\n", i);
1094 				dvb_net_filter_sec_set(dev, &priv->multi_secfilter[i],
1095 						       priv->multi_macs[i], mask_normal);
1096 			}
1097 			break;
1098 		case RX_MODE_ALL_MULTI:
1099 			priv->multi_num=1;
1100 			netdev_dbg(dev, "set multi_secfilter[0]\n");
1101 			dvb_net_filter_sec_set(dev, &priv->multi_secfilter[0],
1102 					       mac_allmulti, mask_allmulti);
1103 			break;
1104 		case RX_MODE_PROMISC:
1105 			priv->multi_num=0;
1106 			netdev_dbg(dev, "set secfilter\n");
1107 			dvb_net_filter_sec_set(dev, &priv->secfilter, mac, mask_promisc);
1108 			break;
1109 		}
1110 
1111 		netdev_dbg(dev, "start filtering\n");
1112 		priv->secfeed->start_filtering(priv->secfeed);
1113 	} else if (priv->feedtype == DVB_NET_FEEDTYPE_ULE) {
1114 		ktime_t timeout = ns_to_ktime(10 * NSEC_PER_MSEC);
1115 
1116 		/* we have payloads encapsulated in TS */
1117 		netdev_dbg(dev, "alloc tsfeed\n");
1118 		ret = demux->allocate_ts_feed(demux, &priv->tsfeed, dvb_net_ts_callback);
1119 		if (ret < 0) {
1120 			pr_err("%s: could not allocate ts feed\n", dev->name);
1121 			goto error;
1122 		}
1123 
1124 		/* Set netdevice pointer for ts decaps callback. */
1125 		priv->tsfeed->priv = (void *)dev;
1126 		ret = priv->tsfeed->set(priv->tsfeed,
1127 					priv->pid, /* pid */
1128 					TS_PACKET, /* type */
1129 					DMX_PES_OTHER, /* pes type */
1130 					timeout    /* timeout */
1131 					);
1132 
1133 		if (ret < 0) {
1134 			pr_err("%s: could not set ts feed\n", dev->name);
1135 			priv->demux->release_ts_feed(priv->demux, priv->tsfeed);
1136 			priv->tsfeed = NULL;
1137 			goto error;
1138 		}
1139 
1140 		netdev_dbg(dev, "start filtering\n");
1141 		priv->tsfeed->start_filtering(priv->tsfeed);
1142 	} else
1143 		ret = -EINVAL;
1144 
1145 error:
1146 	mutex_unlock(&priv->mutex);
1147 	return ret;
1148 }
1149 
1150 static int dvb_net_feed_stop(struct net_device *dev)
1151 {
1152 	struct dvb_net_priv *priv = netdev_priv(dev);
1153 	int i, ret = 0;
1154 
1155 	mutex_lock(&priv->mutex);
1156 	if (priv->feedtype == DVB_NET_FEEDTYPE_MPE) {
1157 		if (priv->secfeed) {
1158 			if (priv->secfeed->is_filtering) {
1159 				netdev_dbg(dev, "stop secfeed\n");
1160 				priv->secfeed->stop_filtering(priv->secfeed);
1161 			}
1162 
1163 			if (priv->secfilter) {
1164 				netdev_dbg(dev, "release secfilter\n");
1165 				priv->secfeed->release_filter(priv->secfeed,
1166 							      priv->secfilter);
1167 				priv->secfilter=NULL;
1168 			}
1169 
1170 			for (i=0; i<priv->multi_num; i++) {
1171 				if (priv->multi_secfilter[i]) {
1172 					netdev_dbg(dev, "release multi_filter[%d]\n",
1173 						   i);
1174 					priv->secfeed->release_filter(priv->secfeed,
1175 								      priv->multi_secfilter[i]);
1176 					priv->multi_secfilter[i] = NULL;
1177 				}
1178 			}
1179 
1180 			priv->demux->release_section_feed(priv->demux, priv->secfeed);
1181 			priv->secfeed = NULL;
1182 		} else
1183 			pr_err("%s: no feed to stop\n", dev->name);
1184 	} else if (priv->feedtype == DVB_NET_FEEDTYPE_ULE) {
1185 		if (priv->tsfeed) {
1186 			if (priv->tsfeed->is_filtering) {
1187 				netdev_dbg(dev, "stop tsfeed\n");
1188 				priv->tsfeed->stop_filtering(priv->tsfeed);
1189 			}
1190 			priv->demux->release_ts_feed(priv->demux, priv->tsfeed);
1191 			priv->tsfeed = NULL;
1192 		}
1193 		else
1194 			pr_err("%s: no ts feed to stop\n", dev->name);
1195 	} else
1196 		ret = -EINVAL;
1197 	mutex_unlock(&priv->mutex);
1198 	return ret;
1199 }
1200 
1201 
1202 static int dvb_set_mc_filter(struct net_device *dev, unsigned char *addr)
1203 {
1204 	struct dvb_net_priv *priv = netdev_priv(dev);
1205 
1206 	if (priv->multi_num == DVB_NET_MULTICAST_MAX)
1207 		return -ENOMEM;
1208 
1209 	memcpy(priv->multi_macs[priv->multi_num], addr, ETH_ALEN);
1210 
1211 	priv->multi_num++;
1212 	return 0;
1213 }
1214 
1215 
1216 static void wq_set_multicast_list (struct work_struct *work)
1217 {
1218 	struct dvb_net_priv *priv =
1219 		container_of(work, struct dvb_net_priv, set_multicast_list_wq);
1220 	struct net_device *dev = priv->net;
1221 
1222 	dvb_net_feed_stop(dev);
1223 	priv->rx_mode = RX_MODE_UNI;
1224 	netif_addr_lock_bh(dev);
1225 
1226 	if (dev->flags & IFF_PROMISC) {
1227 		netdev_dbg(dev, "promiscuous mode\n");
1228 		priv->rx_mode = RX_MODE_PROMISC;
1229 	} else if ((dev->flags & IFF_ALLMULTI)) {
1230 		netdev_dbg(dev, "allmulti mode\n");
1231 		priv->rx_mode = RX_MODE_ALL_MULTI;
1232 	} else if (!netdev_mc_empty(dev)) {
1233 		struct netdev_hw_addr *ha;
1234 
1235 		netdev_dbg(dev, "set_mc_list, %d entries\n",
1236 			   netdev_mc_count(dev));
1237 
1238 		priv->rx_mode = RX_MODE_MULTI;
1239 		priv->multi_num = 0;
1240 
1241 		netdev_for_each_mc_addr(ha, dev)
1242 			dvb_set_mc_filter(dev, ha->addr);
1243 	}
1244 
1245 	netif_addr_unlock_bh(dev);
1246 	dvb_net_feed_start(dev);
1247 }
1248 
1249 
1250 static void dvb_net_set_multicast_list (struct net_device *dev)
1251 {
1252 	struct dvb_net_priv *priv = netdev_priv(dev);
1253 	schedule_work(&priv->set_multicast_list_wq);
1254 }
1255 
1256 
1257 static void wq_restart_net_feed (struct work_struct *work)
1258 {
1259 	struct dvb_net_priv *priv =
1260 		container_of(work, struct dvb_net_priv, restart_net_feed_wq);
1261 	struct net_device *dev = priv->net;
1262 
1263 	if (netif_running(dev)) {
1264 		dvb_net_feed_stop(dev);
1265 		dvb_net_feed_start(dev);
1266 	}
1267 }
1268 
1269 
1270 static int dvb_net_set_mac (struct net_device *dev, void *p)
1271 {
1272 	struct dvb_net_priv *priv = netdev_priv(dev);
1273 	struct sockaddr *addr=p;
1274 
1275 	eth_hw_addr_set(dev, addr->sa_data);
1276 
1277 	if (netif_running(dev))
1278 		schedule_work(&priv->restart_net_feed_wq);
1279 
1280 	return 0;
1281 }
1282 
1283 
1284 static int dvb_net_open(struct net_device *dev)
1285 {
1286 	struct dvb_net_priv *priv = netdev_priv(dev);
1287 
1288 	priv->in_use++;
1289 	dvb_net_feed_start(dev);
1290 	return 0;
1291 }
1292 
1293 
1294 static int dvb_net_stop(struct net_device *dev)
1295 {
1296 	struct dvb_net_priv *priv = netdev_priv(dev);
1297 
1298 	priv->in_use--;
1299 	return dvb_net_feed_stop(dev);
1300 }
1301 
1302 static const struct header_ops dvb_header_ops = {
1303 	.create		= eth_header,
1304 	.parse		= eth_header_parse,
1305 };
1306 
1307 
1308 static const struct net_device_ops dvb_netdev_ops = {
1309 	.ndo_open		= dvb_net_open,
1310 	.ndo_stop		= dvb_net_stop,
1311 	.ndo_start_xmit		= dvb_net_tx,
1312 	.ndo_set_rx_mode	= dvb_net_set_multicast_list,
1313 	.ndo_set_mac_address    = dvb_net_set_mac,
1314 	.ndo_validate_addr	= eth_validate_addr,
1315 };
1316 
1317 static void dvb_net_setup(struct net_device *dev)
1318 {
1319 	ether_setup(dev);
1320 
1321 	dev->header_ops		= &dvb_header_ops;
1322 	dev->netdev_ops		= &dvb_netdev_ops;
1323 	dev->mtu		= 4096;
1324 	dev->max_mtu		= 4096;
1325 
1326 	dev->flags |= IFF_NOARP;
1327 }
1328 
1329 static int get_if(struct dvb_net *dvbnet)
1330 {
1331 	int i;
1332 
1333 	for (i=0; i<DVB_NET_DEVICES_MAX; i++)
1334 		if (!dvbnet->state[i])
1335 			break;
1336 
1337 	if (i == DVB_NET_DEVICES_MAX)
1338 		return -1;
1339 
1340 	dvbnet->state[i]=1;
1341 	return i;
1342 }
1343 
1344 static int dvb_net_add_if(struct dvb_net *dvbnet, u16 pid, u8 feedtype)
1345 {
1346 	struct net_device *net;
1347 	struct dvb_net_priv *priv;
1348 	int result;
1349 	int if_num;
1350 
1351 	if (feedtype != DVB_NET_FEEDTYPE_MPE && feedtype != DVB_NET_FEEDTYPE_ULE)
1352 		return -EINVAL;
1353 	if ((if_num = get_if(dvbnet)) < 0)
1354 		return -EINVAL;
1355 
1356 	net = alloc_netdev(sizeof(struct dvb_net_priv), "dvb",
1357 			   NET_NAME_UNKNOWN, dvb_net_setup);
1358 	if (!net)
1359 		return -ENOMEM;
1360 
1361 	if (dvbnet->dvbdev->id)
1362 		snprintf(net->name, IFNAMSIZ, "dvb%d%u%d",
1363 			 dvbnet->dvbdev->adapter->num, dvbnet->dvbdev->id, if_num);
1364 	else
1365 		/* compatibility fix to keep dvb0_0 format */
1366 		snprintf(net->name, IFNAMSIZ, "dvb%d_%d",
1367 			 dvbnet->dvbdev->adapter->num, if_num);
1368 
1369 	net->addr_len = 6;
1370 	eth_hw_addr_set(net, dvbnet->dvbdev->adapter->proposed_mac);
1371 
1372 	dvbnet->device[if_num] = net;
1373 
1374 	priv = netdev_priv(net);
1375 	priv->net = net;
1376 	priv->demux = dvbnet->demux;
1377 	priv->pid = pid;
1378 	priv->rx_mode = RX_MODE_UNI;
1379 	priv->need_pusi = 1;
1380 	priv->tscc = 0;
1381 	priv->feedtype = feedtype;
1382 	reset_ule(priv);
1383 
1384 	INIT_WORK(&priv->set_multicast_list_wq, wq_set_multicast_list);
1385 	INIT_WORK(&priv->restart_net_feed_wq, wq_restart_net_feed);
1386 	mutex_init(&priv->mutex);
1387 
1388 	net->base_addr = pid;
1389 
1390 	if ((result = register_netdev(net)) < 0) {
1391 		dvbnet->device[if_num] = NULL;
1392 		free_netdev(net);
1393 		return result;
1394 	}
1395 	pr_info("created network interface %s\n", net->name);
1396 
1397 	return if_num;
1398 }
1399 
1400 static int dvb_net_remove_if(struct dvb_net *dvbnet, unsigned long num)
1401 {
1402 	struct net_device *net = dvbnet->device[num];
1403 	struct dvb_net_priv *priv;
1404 
1405 	if (!dvbnet->state[num])
1406 		return -EINVAL;
1407 	priv = netdev_priv(net);
1408 	if (priv->in_use)
1409 		return -EBUSY;
1410 
1411 	dvb_net_stop(net);
1412 	flush_work(&priv->set_multicast_list_wq);
1413 	flush_work(&priv->restart_net_feed_wq);
1414 	pr_info("removed network interface %s\n", net->name);
1415 	unregister_netdev(net);
1416 	dvbnet->state[num]=0;
1417 	dvbnet->device[num] = NULL;
1418 	free_netdev(net);
1419 
1420 	return 0;
1421 }
1422 
1423 static int dvb_net_do_ioctl(struct file *file,
1424 		  unsigned int cmd, void *parg)
1425 {
1426 	struct dvb_device *dvbdev = file->private_data;
1427 	struct dvb_net *dvbnet = dvbdev->priv;
1428 	int ret = 0;
1429 
1430 	if (((file->f_flags&O_ACCMODE)==O_RDONLY))
1431 		return -EPERM;
1432 
1433 	if (mutex_lock_interruptible(&dvbnet->ioctl_mutex))
1434 		return -ERESTARTSYS;
1435 
1436 	switch (cmd) {
1437 	case NET_ADD_IF:
1438 	{
1439 		struct dvb_net_if *dvbnetif = parg;
1440 		int result;
1441 
1442 		if (!capable(CAP_SYS_ADMIN)) {
1443 			ret = -EPERM;
1444 			goto ioctl_error;
1445 		}
1446 
1447 		if (!try_module_get(dvbdev->adapter->module)) {
1448 			ret = -EPERM;
1449 			goto ioctl_error;
1450 		}
1451 
1452 		result=dvb_net_add_if(dvbnet, dvbnetif->pid, dvbnetif->feedtype);
1453 		if (result<0) {
1454 			module_put(dvbdev->adapter->module);
1455 			ret = result;
1456 			goto ioctl_error;
1457 		}
1458 		dvbnetif->if_num=result;
1459 		break;
1460 	}
1461 	case NET_GET_IF:
1462 	{
1463 		struct net_device *netdev;
1464 		struct dvb_net_priv *priv_data;
1465 		struct dvb_net_if *dvbnetif = parg;
1466 		int if_num = dvbnetif->if_num;
1467 
1468 		if (if_num >= DVB_NET_DEVICES_MAX) {
1469 			ret = -EINVAL;
1470 			goto ioctl_error;
1471 		}
1472 		if_num = array_index_nospec(if_num, DVB_NET_DEVICES_MAX);
1473 
1474 		if (!dvbnet->state[if_num]) {
1475 			ret = -EINVAL;
1476 			goto ioctl_error;
1477 		}
1478 
1479 		netdev = dvbnet->device[if_num];
1480 
1481 		priv_data = netdev_priv(netdev);
1482 		dvbnetif->pid=priv_data->pid;
1483 		dvbnetif->feedtype=priv_data->feedtype;
1484 		break;
1485 	}
1486 	case NET_REMOVE_IF:
1487 	{
1488 		if (!capable(CAP_SYS_ADMIN)) {
1489 			ret = -EPERM;
1490 			goto ioctl_error;
1491 		}
1492 		if ((unsigned long) parg >= DVB_NET_DEVICES_MAX) {
1493 			ret = -EINVAL;
1494 			goto ioctl_error;
1495 		}
1496 		ret = dvb_net_remove_if(dvbnet, (unsigned long) parg);
1497 		if (!ret)
1498 			module_put(dvbdev->adapter->module);
1499 		break;
1500 	}
1501 
1502 	/* binary compatibility cruft */
1503 	case __NET_ADD_IF_OLD:
1504 	{
1505 		struct __dvb_net_if_old *dvbnetif = parg;
1506 		int result;
1507 
1508 		if (!capable(CAP_SYS_ADMIN)) {
1509 			ret = -EPERM;
1510 			goto ioctl_error;
1511 		}
1512 
1513 		if (!try_module_get(dvbdev->adapter->module)) {
1514 			ret = -EPERM;
1515 			goto ioctl_error;
1516 		}
1517 
1518 		result=dvb_net_add_if(dvbnet, dvbnetif->pid, DVB_NET_FEEDTYPE_MPE);
1519 		if (result<0) {
1520 			module_put(dvbdev->adapter->module);
1521 			ret = result;
1522 			goto ioctl_error;
1523 		}
1524 		dvbnetif->if_num=result;
1525 		break;
1526 	}
1527 	case __NET_GET_IF_OLD:
1528 	{
1529 		struct net_device *netdev;
1530 		struct dvb_net_priv *priv_data;
1531 		struct __dvb_net_if_old *dvbnetif = parg;
1532 		int if_num = dvbnetif->if_num;
1533 
1534 		if (if_num >= DVB_NET_DEVICES_MAX) {
1535 			ret = -EINVAL;
1536 			goto ioctl_error;
1537 		}
1538 		if_num = array_index_nospec(if_num, DVB_NET_DEVICES_MAX);
1539 
1540 		if (!dvbnet->state[if_num]) {
1541 			ret = -EINVAL;
1542 			goto ioctl_error;
1543 		}
1544 
1545 		netdev = dvbnet->device[if_num];
1546 
1547 		priv_data = netdev_priv(netdev);
1548 		dvbnetif->pid=priv_data->pid;
1549 		break;
1550 	}
1551 	default:
1552 		ret = -ENOTTY;
1553 		break;
1554 	}
1555 
1556 ioctl_error:
1557 	mutex_unlock(&dvbnet->ioctl_mutex);
1558 	return ret;
1559 }
1560 
1561 static long dvb_net_ioctl(struct file *file,
1562 	      unsigned int cmd, unsigned long arg)
1563 {
1564 	return dvb_usercopy(file, cmd, arg, dvb_net_do_ioctl);
1565 }
1566 
1567 static int locked_dvb_net_open(struct inode *inode, struct file *file)
1568 {
1569 	struct dvb_device *dvbdev = file->private_data;
1570 	struct dvb_net *dvbnet = dvbdev->priv;
1571 	int ret;
1572 
1573 	if (mutex_lock_interruptible(&dvbnet->remove_mutex))
1574 		return -ERESTARTSYS;
1575 
1576 	if (dvbnet->exit) {
1577 		mutex_unlock(&dvbnet->remove_mutex);
1578 		return -ENODEV;
1579 	}
1580 
1581 	ret = dvb_generic_open(inode, file);
1582 
1583 	mutex_unlock(&dvbnet->remove_mutex);
1584 
1585 	return ret;
1586 }
1587 
1588 static int dvb_net_close(struct inode *inode, struct file *file)
1589 {
1590 	struct dvb_device *dvbdev = file->private_data;
1591 	struct dvb_net *dvbnet = dvbdev->priv;
1592 
1593 	mutex_lock(&dvbnet->remove_mutex);
1594 
1595 	dvb_generic_release(inode, file);
1596 
1597 	if (dvbdev->users == 1 && dvbnet->exit == 1) {
1598 		mutex_unlock(&dvbnet->remove_mutex);
1599 		wake_up(&dvbdev->wait_queue);
1600 	} else {
1601 		mutex_unlock(&dvbnet->remove_mutex);
1602 	}
1603 
1604 	return 0;
1605 }
1606 
1607 
1608 static const struct file_operations dvb_net_fops = {
1609 	.owner = THIS_MODULE,
1610 	.unlocked_ioctl = dvb_net_ioctl,
1611 	.open =	locked_dvb_net_open,
1612 	.release = dvb_net_close,
1613 	.llseek = noop_llseek,
1614 };
1615 
1616 static const struct dvb_device dvbdev_net = {
1617 	.priv = NULL,
1618 	.users = 1,
1619 	.writers = 1,
1620 #if defined(CONFIG_MEDIA_CONTROLLER_DVB)
1621 	.name = "dvb-net",
1622 #endif
1623 	.fops = &dvb_net_fops,
1624 };
1625 
1626 void dvb_net_release (struct dvb_net *dvbnet)
1627 {
1628 	int i;
1629 
1630 	mutex_lock(&dvbnet->remove_mutex);
1631 	dvbnet->exit = 1;
1632 	mutex_unlock(&dvbnet->remove_mutex);
1633 
1634 	if (dvbnet->dvbdev->users < 1)
1635 		wait_event(dvbnet->dvbdev->wait_queue,
1636 				dvbnet->dvbdev->users == 1);
1637 
1638 	dvb_unregister_device(dvbnet->dvbdev);
1639 
1640 	for (i=0; i<DVB_NET_DEVICES_MAX; i++) {
1641 		if (!dvbnet->state[i])
1642 			continue;
1643 		dvb_net_remove_if(dvbnet, i);
1644 	}
1645 }
1646 EXPORT_SYMBOL(dvb_net_release);
1647 
1648 
1649 int dvb_net_init (struct dvb_adapter *adap, struct dvb_net *dvbnet,
1650 		  struct dmx_demux *dmx)
1651 {
1652 	int i;
1653 
1654 	mutex_init(&dvbnet->ioctl_mutex);
1655 	mutex_init(&dvbnet->remove_mutex);
1656 	dvbnet->demux = dmx;
1657 
1658 	for (i=0; i<DVB_NET_DEVICES_MAX; i++)
1659 		dvbnet->state[i] = 0;
1660 
1661 	return dvb_register_device(adap, &dvbnet->dvbdev, &dvbdev_net,
1662 			     dvbnet, DVB_DEVICE_NET, 0);
1663 }
1664 EXPORT_SYMBOL(dvb_net_init);
1665