xref: /openbmc/linux/drivers/net/wan/ixp4xx_hss.c (revision 78c99ba1)
1 /*
2  * Intel IXP4xx HSS (synchronous serial port) driver for Linux
3  *
4  * Copyright (C) 2007-2008 Krzysztof Hałasa <khc@pm.waw.pl>
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of version 2 of the GNU General Public License
8  * as published by the Free Software Foundation.
9  */
10 
11 #include <linux/bitops.h>
12 #include <linux/cdev.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/dmapool.h>
15 #include <linux/fs.h>
16 #include <linux/hdlc.h>
17 #include <linux/io.h>
18 #include <linux/kernel.h>
19 #include <linux/platform_device.h>
20 #include <linux/poll.h>
21 #include <mach/npe.h>
22 #include <mach/qmgr.h>
23 
24 #define DEBUG_DESC		0
25 #define DEBUG_RX		0
26 #define DEBUG_TX		0
27 #define DEBUG_PKT_BYTES		0
28 #define DEBUG_CLOSE		0
29 
30 #define DRV_NAME		"ixp4xx_hss"
31 
32 #define PKT_EXTRA_FLAGS		0 /* orig 1 */
33 #define PKT_NUM_PIPES		1 /* 1, 2 or 4 */
34 #define PKT_PIPE_FIFO_SIZEW	4 /* total 4 dwords per HSS */
35 
36 #define RX_DESCS		16 /* also length of all RX queues */
37 #define TX_DESCS		16 /* also length of all TX queues */
38 
39 #define POOL_ALLOC_SIZE		(sizeof(struct desc) * (RX_DESCS + TX_DESCS))
40 #define RX_SIZE			(HDLC_MAX_MRU + 4) /* NPE needs more space */
41 #define MAX_CLOSE_WAIT		1000 /* microseconds */
42 #define HSS_COUNT		2
43 #define FRAME_SIZE		256 /* doesn't matter at this point */
44 #define FRAME_OFFSET		0
45 #define MAX_CHANNELS		(FRAME_SIZE / 8)
46 
47 #define NAPI_WEIGHT		16
48 
49 /* Queue IDs */
50 #define HSS0_CHL_RXTRIG_QUEUE	12	/* orig size = 32 dwords */
51 #define HSS0_PKT_RX_QUEUE	13	/* orig size = 32 dwords */
52 #define HSS0_PKT_TX0_QUEUE	14	/* orig size = 16 dwords */
53 #define HSS0_PKT_TX1_QUEUE	15
54 #define HSS0_PKT_TX2_QUEUE	16
55 #define HSS0_PKT_TX3_QUEUE	17
56 #define HSS0_PKT_RXFREE0_QUEUE	18	/* orig size = 16 dwords */
57 #define HSS0_PKT_RXFREE1_QUEUE	19
58 #define HSS0_PKT_RXFREE2_QUEUE	20
59 #define HSS0_PKT_RXFREE3_QUEUE	21
60 #define HSS0_PKT_TXDONE_QUEUE	22	/* orig size = 64 dwords */
61 
62 #define HSS1_CHL_RXTRIG_QUEUE	10
63 #define HSS1_PKT_RX_QUEUE	0
64 #define HSS1_PKT_TX0_QUEUE	5
65 #define HSS1_PKT_TX1_QUEUE	6
66 #define HSS1_PKT_TX2_QUEUE	7
67 #define HSS1_PKT_TX3_QUEUE	8
68 #define HSS1_PKT_RXFREE0_QUEUE	1
69 #define HSS1_PKT_RXFREE1_QUEUE	2
70 #define HSS1_PKT_RXFREE2_QUEUE	3
71 #define HSS1_PKT_RXFREE3_QUEUE	4
72 #define HSS1_PKT_TXDONE_QUEUE	9
73 
74 #define NPE_PKT_MODE_HDLC		0
75 #define NPE_PKT_MODE_RAW		1
76 #define NPE_PKT_MODE_56KMODE		2
77 #define NPE_PKT_MODE_56KENDIAN_MSB	4
78 
79 /* PKT_PIPE_HDLC_CFG_WRITE flags */
80 #define PKT_HDLC_IDLE_ONES		0x1 /* default = flags */
81 #define PKT_HDLC_CRC_32			0x2 /* default = CRC-16 */
82 #define PKT_HDLC_MSB_ENDIAN		0x4 /* default = LE */
83 
84 
85 /* hss_config, PCRs */
86 /* Frame sync sampling, default = active low */
87 #define PCR_FRM_SYNC_ACTIVE_HIGH	0x40000000
88 #define PCR_FRM_SYNC_FALLINGEDGE	0x80000000
89 #define PCR_FRM_SYNC_RISINGEDGE		0xC0000000
90 
91 /* Frame sync pin: input (default) or output generated off a given clk edge */
92 #define PCR_FRM_SYNC_OUTPUT_FALLING	0x20000000
93 #define PCR_FRM_SYNC_OUTPUT_RISING	0x30000000
94 
95 /* Frame and data clock sampling on edge, default = falling */
96 #define PCR_FCLK_EDGE_RISING		0x08000000
97 #define PCR_DCLK_EDGE_RISING		0x04000000
98 
99 /* Clock direction, default = input */
100 #define PCR_SYNC_CLK_DIR_OUTPUT		0x02000000
101 
102 /* Generate/Receive frame pulses, default = enabled */
103 #define PCR_FRM_PULSE_DISABLED		0x01000000
104 
105  /* Data rate is full (default) or half the configured clk speed */
106 #define PCR_HALF_CLK_RATE		0x00200000
107 
108 /* Invert data between NPE and HSS FIFOs? (default = no) */
109 #define PCR_DATA_POLARITY_INVERT	0x00100000
110 
111 /* TX/RX endianness, default = LSB */
112 #define PCR_MSB_ENDIAN			0x00080000
113 
114 /* Normal (default) / open drain mode (TX only) */
115 #define PCR_TX_PINS_OPEN_DRAIN		0x00040000
116 
117 /* No framing bit transmitted and expected on RX? (default = framing bit) */
118 #define PCR_SOF_NO_FBIT			0x00020000
119 
120 /* Drive data pins? */
121 #define PCR_TX_DATA_ENABLE		0x00010000
122 
123 /* Voice 56k type: drive the data pins low (default), high, high Z */
124 #define PCR_TX_V56K_HIGH		0x00002000
125 #define PCR_TX_V56K_HIGH_IMP		0x00004000
126 
127 /* Unassigned type: drive the data pins low (default), high, high Z */
128 #define PCR_TX_UNASS_HIGH		0x00000800
129 #define PCR_TX_UNASS_HIGH_IMP		0x00001000
130 
131 /* T1 @ 1.544MHz only: Fbit dictated in FIFO (default) or high Z */
132 #define PCR_TX_FB_HIGH_IMP		0x00000400
133 
134 /* 56k data endiannes - which bit unused: high (default) or low */
135 #define PCR_TX_56KE_BIT_0_UNUSED	0x00000200
136 
137 /* 56k data transmission type: 32/8 bit data (default) or 56K data */
138 #define PCR_TX_56KS_56K_DATA		0x00000100
139 
140 /* hss_config, cCR */
141 /* Number of packetized clients, default = 1 */
142 #define CCR_NPE_HFIFO_2_HDLC		0x04000000
143 #define CCR_NPE_HFIFO_3_OR_4HDLC	0x08000000
144 
145 /* default = no loopback */
146 #define CCR_LOOPBACK			0x02000000
147 
148 /* HSS number, default = 0 (first) */
149 #define CCR_SECOND_HSS			0x01000000
150 
151 
152 /* hss_config, clkCR: main:10, num:10, denom:12 */
153 #define CLK42X_SPEED_EXP	((0x3FF << 22) | (  2 << 12) |   15) /*65 KHz*/
154 
155 #define CLK42X_SPEED_512KHZ	((  130 << 22) | (  2 << 12) |   15)
156 #define CLK42X_SPEED_1536KHZ	((   43 << 22) | ( 18 << 12) |   47)
157 #define CLK42X_SPEED_1544KHZ	((   43 << 22) | ( 33 << 12) |  192)
158 #define CLK42X_SPEED_2048KHZ	((   32 << 22) | ( 34 << 12) |   63)
159 #define CLK42X_SPEED_4096KHZ	((   16 << 22) | ( 34 << 12) |  127)
160 #define CLK42X_SPEED_8192KHZ	((    8 << 22) | ( 34 << 12) |  255)
161 
162 #define CLK46X_SPEED_512KHZ	((  130 << 22) | ( 24 << 12) |  127)
163 #define CLK46X_SPEED_1536KHZ	((   43 << 22) | (152 << 12) |  383)
164 #define CLK46X_SPEED_1544KHZ	((   43 << 22) | ( 66 << 12) |  385)
165 #define CLK46X_SPEED_2048KHZ	((   32 << 22) | (280 << 12) |  511)
166 #define CLK46X_SPEED_4096KHZ	((   16 << 22) | (280 << 12) | 1023)
167 #define CLK46X_SPEED_8192KHZ	((    8 << 22) | (280 << 12) | 2047)
168 
169 
170 /* hss_config, LUT entries */
171 #define TDMMAP_UNASSIGNED	0
172 #define TDMMAP_HDLC		1	/* HDLC - packetized */
173 #define TDMMAP_VOICE56K		2	/* Voice56K - 7-bit channelized */
174 #define TDMMAP_VOICE64K		3	/* Voice64K - 8-bit channelized */
175 
176 /* offsets into HSS config */
177 #define HSS_CONFIG_TX_PCR	0x00 /* port configuration registers */
178 #define HSS_CONFIG_RX_PCR	0x04
179 #define HSS_CONFIG_CORE_CR	0x08 /* loopback control, HSS# */
180 #define HSS_CONFIG_CLOCK_CR	0x0C /* clock generator control */
181 #define HSS_CONFIG_TX_FCR	0x10 /* frame configuration registers */
182 #define HSS_CONFIG_RX_FCR	0x14
183 #define HSS_CONFIG_TX_LUT	0x18 /* channel look-up tables */
184 #define HSS_CONFIG_RX_LUT	0x38
185 
186 
187 /* NPE command codes */
188 /* writes the ConfigWord value to the location specified by offset */
189 #define PORT_CONFIG_WRITE		0x40
190 
191 /* triggers the NPE to load the contents of the configuration table */
192 #define PORT_CONFIG_LOAD		0x41
193 
194 /* triggers the NPE to return an HssErrorReadResponse message */
195 #define PORT_ERROR_READ			0x42
196 
197 /* triggers the NPE to reset internal status and enable the HssPacketized
198    operation for the flow specified by pPipe */
199 #define PKT_PIPE_FLOW_ENABLE		0x50
200 #define PKT_PIPE_FLOW_DISABLE		0x51
201 #define PKT_NUM_PIPES_WRITE		0x52
202 #define PKT_PIPE_FIFO_SIZEW_WRITE	0x53
203 #define PKT_PIPE_HDLC_CFG_WRITE		0x54
204 #define PKT_PIPE_IDLE_PATTERN_WRITE	0x55
205 #define PKT_PIPE_RX_SIZE_WRITE		0x56
206 #define PKT_PIPE_MODE_WRITE		0x57
207 
208 /* HDLC packet status values - desc->status */
209 #define ERR_SHUTDOWN		1 /* stop or shutdown occurrance */
210 #define ERR_HDLC_ALIGN		2 /* HDLC alignment error */
211 #define ERR_HDLC_FCS		3 /* HDLC Frame Check Sum error */
212 #define ERR_RXFREE_Q_EMPTY	4 /* RX-free queue became empty while receiving
213 				     this packet (if buf_len < pkt_len) */
214 #define ERR_HDLC_TOO_LONG	5 /* HDLC frame size too long */
215 #define ERR_HDLC_ABORT		6 /* abort sequence received */
216 #define ERR_DISCONNECTING	7 /* disconnect is in progress */
217 
218 
219 #ifdef __ARMEB__
220 typedef struct sk_buff buffer_t;
221 #define free_buffer dev_kfree_skb
222 #define free_buffer_irq dev_kfree_skb_irq
223 #else
224 typedef void buffer_t;
225 #define free_buffer kfree
226 #define free_buffer_irq kfree
227 #endif
228 
229 struct port {
230 	struct device *dev;
231 	struct npe *npe;
232 	struct net_device *netdev;
233 	struct napi_struct napi;
234 	struct hss_plat_info *plat;
235 	buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
236 	struct desc *desc_tab;	/* coherent */
237 	u32 desc_tab_phys;
238 	unsigned int id;
239 	unsigned int clock_type, clock_rate, loopback;
240 	unsigned int initialized, carrier;
241 	u8 hdlc_cfg;
242 };
243 
244 /* NPE message structure */
245 struct msg {
246 #ifdef __ARMEB__
247 	u8 cmd, unused, hss_port, index;
248 	union {
249 		struct { u8 data8a, data8b, data8c, data8d; };
250 		struct { u16 data16a, data16b; };
251 		struct { u32 data32; };
252 	};
253 #else
254 	u8 index, hss_port, unused, cmd;
255 	union {
256 		struct { u8 data8d, data8c, data8b, data8a; };
257 		struct { u16 data16b, data16a; };
258 		struct { u32 data32; };
259 	};
260 #endif
261 };
262 
263 /* HDLC packet descriptor */
264 struct desc {
265 	u32 next;		/* pointer to next buffer, unused */
266 
267 #ifdef __ARMEB__
268 	u16 buf_len;		/* buffer length */
269 	u16 pkt_len;		/* packet length */
270 	u32 data;		/* pointer to data buffer in RAM */
271 	u8 status;
272 	u8 error_count;
273 	u16 __reserved;
274 #else
275 	u16 pkt_len;		/* packet length */
276 	u16 buf_len;		/* buffer length */
277 	u32 data;		/* pointer to data buffer in RAM */
278 	u16 __reserved;
279 	u8 error_count;
280 	u8 status;
281 #endif
282 	u32 __reserved1[4];
283 };
284 
285 
286 #define rx_desc_phys(port, n)	((port)->desc_tab_phys +		\
287 				 (n) * sizeof(struct desc))
288 #define rx_desc_ptr(port, n)	(&(port)->desc_tab[n])
289 
290 #define tx_desc_phys(port, n)	((port)->desc_tab_phys +		\
291 				 ((n) + RX_DESCS) * sizeof(struct desc))
292 #define tx_desc_ptr(port, n)	(&(port)->desc_tab[(n) + RX_DESCS])
293 
294 /*****************************************************************************
295  * global variables
296  ****************************************************************************/
297 
298 static int ports_open;
299 static struct dma_pool *dma_pool;
300 static spinlock_t npe_lock;
301 
302 static const struct {
303 	int tx, txdone, rx, rxfree;
304 }queue_ids[2] = {{HSS0_PKT_TX0_QUEUE, HSS0_PKT_TXDONE_QUEUE, HSS0_PKT_RX_QUEUE,
305 		  HSS0_PKT_RXFREE0_QUEUE},
306 		 {HSS1_PKT_TX0_QUEUE, HSS1_PKT_TXDONE_QUEUE, HSS1_PKT_RX_QUEUE,
307 		  HSS1_PKT_RXFREE0_QUEUE},
308 };
309 
310 /*****************************************************************************
311  * utility functions
312  ****************************************************************************/
313 
314 static inline struct port* dev_to_port(struct net_device *dev)
315 {
316 	return dev_to_hdlc(dev)->priv;
317 }
318 
319 #ifndef __ARMEB__
320 static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
321 {
322 	int i;
323 	for (i = 0; i < cnt; i++)
324 		dest[i] = swab32(src[i]);
325 }
326 #endif
327 
328 /*****************************************************************************
329  * HSS access
330  ****************************************************************************/
331 
332 static void hss_npe_send(struct port *port, struct msg *msg, const char* what)
333 {
334 	u32 *val = (u32*)msg;
335 	if (npe_send_message(port->npe, msg, what)) {
336 		printk(KERN_CRIT "HSS-%i: unable to send command [%08X:%08X]"
337 		       " to %s\n", port->id, val[0], val[1],
338 		       npe_name(port->npe));
339 		BUG();
340 	}
341 }
342 
343 static void hss_config_set_lut(struct port *port)
344 {
345 	struct msg msg;
346 	int ch;
347 
348 	memset(&msg, 0, sizeof(msg));
349 	msg.cmd = PORT_CONFIG_WRITE;
350 	msg.hss_port = port->id;
351 
352 	for (ch = 0; ch < MAX_CHANNELS; ch++) {
353 		msg.data32 >>= 2;
354 		msg.data32 |= TDMMAP_HDLC << 30;
355 
356 		if (ch % 16 == 15) {
357 			msg.index = HSS_CONFIG_TX_LUT + ((ch / 4) & ~3);
358 			hss_npe_send(port, &msg, "HSS_SET_TX_LUT");
359 
360 			msg.index += HSS_CONFIG_RX_LUT - HSS_CONFIG_TX_LUT;
361 			hss_npe_send(port, &msg, "HSS_SET_RX_LUT");
362 		}
363 	}
364 }
365 
366 static void hss_config(struct port *port)
367 {
368 	struct msg msg;
369 
370 	memset(&msg, 0, sizeof(msg));
371 	msg.cmd = PORT_CONFIG_WRITE;
372 	msg.hss_port = port->id;
373 	msg.index = HSS_CONFIG_TX_PCR;
374 	msg.data32 = PCR_FRM_SYNC_OUTPUT_RISING | PCR_MSB_ENDIAN |
375 		PCR_TX_DATA_ENABLE | PCR_SOF_NO_FBIT;
376 	if (port->clock_type == CLOCK_INT)
377 		msg.data32 |= PCR_SYNC_CLK_DIR_OUTPUT;
378 	hss_npe_send(port, &msg, "HSS_SET_TX_PCR");
379 
380 	msg.index = HSS_CONFIG_RX_PCR;
381 	msg.data32 ^= PCR_TX_DATA_ENABLE | PCR_DCLK_EDGE_RISING;
382 	hss_npe_send(port, &msg, "HSS_SET_RX_PCR");
383 
384 	memset(&msg, 0, sizeof(msg));
385 	msg.cmd = PORT_CONFIG_WRITE;
386 	msg.hss_port = port->id;
387 	msg.index = HSS_CONFIG_CORE_CR;
388 	msg.data32 = (port->loopback ? CCR_LOOPBACK : 0) |
389 		(port->id ? CCR_SECOND_HSS : 0);
390 	hss_npe_send(port, &msg, "HSS_SET_CORE_CR");
391 
392 	memset(&msg, 0, sizeof(msg));
393 	msg.cmd = PORT_CONFIG_WRITE;
394 	msg.hss_port = port->id;
395 	msg.index = HSS_CONFIG_CLOCK_CR;
396 	msg.data32 = CLK42X_SPEED_2048KHZ /* FIXME */;
397 	hss_npe_send(port, &msg, "HSS_SET_CLOCK_CR");
398 
399 	memset(&msg, 0, sizeof(msg));
400 	msg.cmd = PORT_CONFIG_WRITE;
401 	msg.hss_port = port->id;
402 	msg.index = HSS_CONFIG_TX_FCR;
403 	msg.data16a = FRAME_OFFSET;
404 	msg.data16b = FRAME_SIZE - 1;
405 	hss_npe_send(port, &msg, "HSS_SET_TX_FCR");
406 
407 	memset(&msg, 0, sizeof(msg));
408 	msg.cmd = PORT_CONFIG_WRITE;
409 	msg.hss_port = port->id;
410 	msg.index = HSS_CONFIG_RX_FCR;
411 	msg.data16a = FRAME_OFFSET;
412 	msg.data16b = FRAME_SIZE - 1;
413 	hss_npe_send(port, &msg, "HSS_SET_RX_FCR");
414 
415 	hss_config_set_lut(port);
416 
417 	memset(&msg, 0, sizeof(msg));
418 	msg.cmd = PORT_CONFIG_LOAD;
419 	msg.hss_port = port->id;
420 	hss_npe_send(port, &msg, "HSS_LOAD_CONFIG");
421 
422 	if (npe_recv_message(port->npe, &msg, "HSS_LOAD_CONFIG") ||
423 	    /* HSS_LOAD_CONFIG for port #1 returns port_id = #4 */
424 	    msg.cmd != PORT_CONFIG_LOAD || msg.data32) {
425 		printk(KERN_CRIT "HSS-%i: HSS_LOAD_CONFIG failed\n",
426 		       port->id);
427 		BUG();
428 	}
429 
430 	/* HDLC may stop working without this - check FIXME */
431 	npe_recv_message(port->npe, &msg, "FLUSH_IT");
432 }
433 
434 static void hss_set_hdlc_cfg(struct port *port)
435 {
436 	struct msg msg;
437 
438 	memset(&msg, 0, sizeof(msg));
439 	msg.cmd = PKT_PIPE_HDLC_CFG_WRITE;
440 	msg.hss_port = port->id;
441 	msg.data8a = port->hdlc_cfg; /* rx_cfg */
442 	msg.data8b = port->hdlc_cfg | (PKT_EXTRA_FLAGS << 3); /* tx_cfg */
443 	hss_npe_send(port, &msg, "HSS_SET_HDLC_CFG");
444 }
445 
446 static u32 hss_get_status(struct port *port)
447 {
448 	struct msg msg;
449 
450 	memset(&msg, 0, sizeof(msg));
451 	msg.cmd = PORT_ERROR_READ;
452 	msg.hss_port = port->id;
453 	hss_npe_send(port, &msg, "PORT_ERROR_READ");
454 	if (npe_recv_message(port->npe, &msg, "PORT_ERROR_READ")) {
455 		printk(KERN_CRIT "HSS-%i: unable to read HSS status\n",
456 		       port->id);
457 		BUG();
458 	}
459 
460 	return msg.data32;
461 }
462 
463 static void hss_start_hdlc(struct port *port)
464 {
465 	struct msg msg;
466 
467 	memset(&msg, 0, sizeof(msg));
468 	msg.cmd = PKT_PIPE_FLOW_ENABLE;
469 	msg.hss_port = port->id;
470 	msg.data32 = 0;
471 	hss_npe_send(port, &msg, "HSS_ENABLE_PKT_PIPE");
472 }
473 
474 static void hss_stop_hdlc(struct port *port)
475 {
476 	struct msg msg;
477 
478 	memset(&msg, 0, sizeof(msg));
479 	msg.cmd = PKT_PIPE_FLOW_DISABLE;
480 	msg.hss_port = port->id;
481 	hss_npe_send(port, &msg, "HSS_DISABLE_PKT_PIPE");
482 	hss_get_status(port); /* make sure it's halted */
483 }
484 
485 static int hss_load_firmware(struct port *port)
486 {
487 	struct msg msg;
488 	int err;
489 
490 	if (port->initialized)
491 		return 0;
492 
493 	if (!npe_running(port->npe) &&
494 	    (err = npe_load_firmware(port->npe, npe_name(port->npe),
495 				     port->dev)))
496 		return err;
497 
498 	/* HDLC mode configuration */
499 	memset(&msg, 0, sizeof(msg));
500 	msg.cmd = PKT_NUM_PIPES_WRITE;
501 	msg.hss_port = port->id;
502 	msg.data8a = PKT_NUM_PIPES;
503 	hss_npe_send(port, &msg, "HSS_SET_PKT_PIPES");
504 
505 	msg.cmd = PKT_PIPE_FIFO_SIZEW_WRITE;
506 	msg.data8a = PKT_PIPE_FIFO_SIZEW;
507 	hss_npe_send(port, &msg, "HSS_SET_PKT_FIFO");
508 
509 	msg.cmd = PKT_PIPE_MODE_WRITE;
510 	msg.data8a = NPE_PKT_MODE_HDLC;
511 	/* msg.data8b = inv_mask */
512 	/* msg.data8c = or_mask */
513 	hss_npe_send(port, &msg, "HSS_SET_PKT_MODE");
514 
515 	msg.cmd = PKT_PIPE_RX_SIZE_WRITE;
516 	msg.data16a = HDLC_MAX_MRU; /* including CRC */
517 	hss_npe_send(port, &msg, "HSS_SET_PKT_RX_SIZE");
518 
519 	msg.cmd = PKT_PIPE_IDLE_PATTERN_WRITE;
520 	msg.data32 = 0x7F7F7F7F; /* ??? FIXME */
521 	hss_npe_send(port, &msg, "HSS_SET_PKT_IDLE");
522 
523 	port->initialized = 1;
524 	return 0;
525 }
526 
527 /*****************************************************************************
528  * packetized (HDLC) operation
529  ****************************************************************************/
530 
531 static inline void debug_pkt(struct net_device *dev, const char *func,
532 			     u8 *data, int len)
533 {
534 #if DEBUG_PKT_BYTES
535 	int i;
536 
537 	printk(KERN_DEBUG "%s: %s(%i)", dev->name, func, len);
538 	for (i = 0; i < len; i++) {
539 		if (i >= DEBUG_PKT_BYTES)
540 			break;
541 		printk("%s%02X", !(i % 4) ? " " : "", data[i]);
542 	}
543 	printk("\n");
544 #endif
545 }
546 
547 
548 static inline void debug_desc(u32 phys, struct desc *desc)
549 {
550 #if DEBUG_DESC
551 	printk(KERN_DEBUG "%X: %X %3X %3X %08X %X %X\n",
552 	       phys, desc->next, desc->buf_len, desc->pkt_len,
553 	       desc->data, desc->status, desc->error_count);
554 #endif
555 }
556 
557 static inline int queue_get_desc(unsigned int queue, struct port *port,
558 				 int is_tx)
559 {
560 	u32 phys, tab_phys, n_desc;
561 	struct desc *tab;
562 
563 	if (!(phys = qmgr_get_entry(queue)))
564 		return -1;
565 
566 	BUG_ON(phys & 0x1F);
567 	tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0);
568 	tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0);
569 	n_desc = (phys - tab_phys) / sizeof(struct desc);
570 	BUG_ON(n_desc >= (is_tx ? TX_DESCS : RX_DESCS));
571 	debug_desc(phys, &tab[n_desc]);
572 	BUG_ON(tab[n_desc].next);
573 	return n_desc;
574 }
575 
576 static inline void queue_put_desc(unsigned int queue, u32 phys,
577 				  struct desc *desc)
578 {
579 	debug_desc(phys, desc);
580 	BUG_ON(phys & 0x1F);
581 	qmgr_put_entry(queue, phys);
582 	BUG_ON(qmgr_stat_overflow(queue));
583 }
584 
585 
586 static inline void dma_unmap_tx(struct port *port, struct desc *desc)
587 {
588 #ifdef __ARMEB__
589 	dma_unmap_single(&port->netdev->dev, desc->data,
590 			 desc->buf_len, DMA_TO_DEVICE);
591 #else
592 	dma_unmap_single(&port->netdev->dev, desc->data & ~3,
593 			 ALIGN((desc->data & 3) + desc->buf_len, 4),
594 			 DMA_TO_DEVICE);
595 #endif
596 }
597 
598 
599 static void hss_hdlc_set_carrier(void *pdev, int carrier)
600 {
601 	struct net_device *netdev = pdev;
602 	struct port *port = dev_to_port(netdev);
603 	unsigned long flags;
604 
605 	spin_lock_irqsave(&npe_lock, flags);
606 	port->carrier = carrier;
607 	if (!port->loopback) {
608 		if (carrier)
609 			netif_carrier_on(netdev);
610 		else
611 			netif_carrier_off(netdev);
612 	}
613 	spin_unlock_irqrestore(&npe_lock, flags);
614 }
615 
616 static void hss_hdlc_rx_irq(void *pdev)
617 {
618 	struct net_device *dev = pdev;
619 	struct port *port = dev_to_port(dev);
620 
621 #if DEBUG_RX
622 	printk(KERN_DEBUG "%s: hss_hdlc_rx_irq\n", dev->name);
623 #endif
624 	qmgr_disable_irq(queue_ids[port->id].rx);
625 	napi_schedule(&port->napi);
626 }
627 
628 static int hss_hdlc_poll(struct napi_struct *napi, int budget)
629 {
630 	struct port *port = container_of(napi, struct port, napi);
631 	struct net_device *dev = port->netdev;
632 	unsigned int rxq = queue_ids[port->id].rx;
633 	unsigned int rxfreeq = queue_ids[port->id].rxfree;
634 	int received = 0;
635 
636 #if DEBUG_RX
637 	printk(KERN_DEBUG "%s: hss_hdlc_poll\n", dev->name);
638 #endif
639 
640 	while (received < budget) {
641 		struct sk_buff *skb;
642 		struct desc *desc;
643 		int n;
644 #ifdef __ARMEB__
645 		struct sk_buff *temp;
646 		u32 phys;
647 #endif
648 
649 		if ((n = queue_get_desc(rxq, port, 0)) < 0) {
650 #if DEBUG_RX
651 			printk(KERN_DEBUG "%s: hss_hdlc_poll"
652 			       " napi_complete\n", dev->name);
653 #endif
654 			napi_complete(napi);
655 			qmgr_enable_irq(rxq);
656 			if (!qmgr_stat_empty(rxq) &&
657 			    napi_reschedule(napi)) {
658 #if DEBUG_RX
659 				printk(KERN_DEBUG "%s: hss_hdlc_poll"
660 				       " napi_reschedule succeeded\n",
661 				       dev->name);
662 #endif
663 				qmgr_disable_irq(rxq);
664 				continue;
665 			}
666 #if DEBUG_RX
667 			printk(KERN_DEBUG "%s: hss_hdlc_poll all done\n",
668 			       dev->name);
669 #endif
670 			return received; /* all work done */
671 		}
672 
673 		desc = rx_desc_ptr(port, n);
674 #if 0 /* FIXME - error_count counts modulo 256, perhaps we should use it */
675 		if (desc->error_count)
676 			printk(KERN_DEBUG "%s: hss_hdlc_poll status 0x%02X"
677 			       " errors %u\n", dev->name, desc->status,
678 			       desc->error_count);
679 #endif
680 		skb = NULL;
681 		switch (desc->status) {
682 		case 0:
683 #ifdef __ARMEB__
684 			if ((skb = netdev_alloc_skb(dev, RX_SIZE)) != NULL) {
685 				phys = dma_map_single(&dev->dev, skb->data,
686 						      RX_SIZE,
687 						      DMA_FROM_DEVICE);
688 				if (dma_mapping_error(&dev->dev, phys)) {
689 					dev_kfree_skb(skb);
690 					skb = NULL;
691 				}
692 			}
693 #else
694 			skb = netdev_alloc_skb(dev, desc->pkt_len);
695 #endif
696 			if (!skb)
697 				dev->stats.rx_dropped++;
698 			break;
699 		case ERR_HDLC_ALIGN:
700 		case ERR_HDLC_ABORT:
701 			dev->stats.rx_frame_errors++;
702 			dev->stats.rx_errors++;
703 			break;
704 		case ERR_HDLC_FCS:
705 			dev->stats.rx_crc_errors++;
706 			dev->stats.rx_errors++;
707 			break;
708 		case ERR_HDLC_TOO_LONG:
709 			dev->stats.rx_length_errors++;
710 			dev->stats.rx_errors++;
711 			break;
712 		default:	/* FIXME - remove printk */
713 			printk(KERN_ERR "%s: hss_hdlc_poll: status 0x%02X"
714 			       " errors %u\n", dev->name, desc->status,
715 			       desc->error_count);
716 			dev->stats.rx_errors++;
717 		}
718 
719 		if (!skb) {
720 			/* put the desc back on RX-ready queue */
721 			desc->buf_len = RX_SIZE;
722 			desc->pkt_len = desc->status = 0;
723 			queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
724 			continue;
725 		}
726 
727 		/* process received frame */
728 #ifdef __ARMEB__
729 		temp = skb;
730 		skb = port->rx_buff_tab[n];
731 		dma_unmap_single(&dev->dev, desc->data,
732 				 RX_SIZE, DMA_FROM_DEVICE);
733 #else
734 		dma_sync_single(&dev->dev, desc->data,
735 				RX_SIZE, DMA_FROM_DEVICE);
736 		memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
737 			      ALIGN(desc->pkt_len, 4) / 4);
738 #endif
739 		skb_put(skb, desc->pkt_len);
740 
741 		debug_pkt(dev, "hss_hdlc_poll", skb->data, skb->len);
742 
743 		skb->protocol = hdlc_type_trans(skb, dev);
744 		dev->stats.rx_packets++;
745 		dev->stats.rx_bytes += skb->len;
746 		netif_receive_skb(skb);
747 
748 		/* put the new buffer on RX-free queue */
749 #ifdef __ARMEB__
750 		port->rx_buff_tab[n] = temp;
751 		desc->data = phys;
752 #endif
753 		desc->buf_len = RX_SIZE;
754 		desc->pkt_len = 0;
755 		queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
756 		received++;
757 	}
758 #if DEBUG_RX
759 	printk(KERN_DEBUG "hss_hdlc_poll: end, not all work done\n");
760 #endif
761 	return received;	/* not all work done */
762 }
763 
764 
765 static void hss_hdlc_txdone_irq(void *pdev)
766 {
767 	struct net_device *dev = pdev;
768 	struct port *port = dev_to_port(dev);
769 	int n_desc;
770 
771 #if DEBUG_TX
772 	printk(KERN_DEBUG DRV_NAME ": hss_hdlc_txdone_irq\n");
773 #endif
774 	while ((n_desc = queue_get_desc(queue_ids[port->id].txdone,
775 					port, 1)) >= 0) {
776 		struct desc *desc;
777 		int start;
778 
779 		desc = tx_desc_ptr(port, n_desc);
780 
781 		dev->stats.tx_packets++;
782 		dev->stats.tx_bytes += desc->pkt_len;
783 
784 		dma_unmap_tx(port, desc);
785 #if DEBUG_TX
786 		printk(KERN_DEBUG "%s: hss_hdlc_txdone_irq free %p\n",
787 		       dev->name, port->tx_buff_tab[n_desc]);
788 #endif
789 		free_buffer_irq(port->tx_buff_tab[n_desc]);
790 		port->tx_buff_tab[n_desc] = NULL;
791 
792 		start = qmgr_stat_empty(port->plat->txreadyq);
793 		queue_put_desc(port->plat->txreadyq,
794 			       tx_desc_phys(port, n_desc), desc);
795 		if (start) {
796 #if DEBUG_TX
797 			printk(KERN_DEBUG "%s: hss_hdlc_txdone_irq xmit"
798 			       " ready\n", dev->name);
799 #endif
800 			netif_wake_queue(dev);
801 		}
802 	}
803 }
804 
805 static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev)
806 {
807 	struct port *port = dev_to_port(dev);
808 	unsigned int txreadyq = port->plat->txreadyq;
809 	int len, offset, bytes, n;
810 	void *mem;
811 	u32 phys;
812 	struct desc *desc;
813 
814 #if DEBUG_TX
815 	printk(KERN_DEBUG "%s: hss_hdlc_xmit\n", dev->name);
816 #endif
817 
818 	if (unlikely(skb->len > HDLC_MAX_MRU)) {
819 		dev_kfree_skb(skb);
820 		dev->stats.tx_errors++;
821 		return NETDEV_TX_OK;
822 	}
823 
824 	debug_pkt(dev, "hss_hdlc_xmit", skb->data, skb->len);
825 
826 	len = skb->len;
827 #ifdef __ARMEB__
828 	offset = 0; /* no need to keep alignment */
829 	bytes = len;
830 	mem = skb->data;
831 #else
832 	offset = (int)skb->data & 3; /* keep 32-bit alignment */
833 	bytes = ALIGN(offset + len, 4);
834 	if (!(mem = kmalloc(bytes, GFP_ATOMIC))) {
835 		dev_kfree_skb(skb);
836 		dev->stats.tx_dropped++;
837 		return NETDEV_TX_OK;
838 	}
839 	memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
840 	dev_kfree_skb(skb);
841 #endif
842 
843 	phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE);
844 	if (dma_mapping_error(&dev->dev, phys)) {
845 #ifdef __ARMEB__
846 		dev_kfree_skb(skb);
847 #else
848 		kfree(mem);
849 #endif
850 		dev->stats.tx_dropped++;
851 		return NETDEV_TX_OK;
852 	}
853 
854 	n = queue_get_desc(txreadyq, port, 1);
855 	BUG_ON(n < 0);
856 	desc = tx_desc_ptr(port, n);
857 
858 #ifdef __ARMEB__
859 	port->tx_buff_tab[n] = skb;
860 #else
861 	port->tx_buff_tab[n] = mem;
862 #endif
863 	desc->data = phys + offset;
864 	desc->buf_len = desc->pkt_len = len;
865 
866 	wmb();
867 	queue_put_desc(queue_ids[port->id].tx, tx_desc_phys(port, n), desc);
868 	dev->trans_start = jiffies;
869 
870 	if (qmgr_stat_empty(txreadyq)) {
871 #if DEBUG_TX
872 		printk(KERN_DEBUG "%s: hss_hdlc_xmit queue full\n", dev->name);
873 #endif
874 		netif_stop_queue(dev);
875 		/* we could miss TX ready interrupt */
876 		if (!qmgr_stat_empty(txreadyq)) {
877 #if DEBUG_TX
878 			printk(KERN_DEBUG "%s: hss_hdlc_xmit ready again\n",
879 			       dev->name);
880 #endif
881 			netif_wake_queue(dev);
882 		}
883 	}
884 
885 #if DEBUG_TX
886 	printk(KERN_DEBUG "%s: hss_hdlc_xmit end\n", dev->name);
887 #endif
888 	return NETDEV_TX_OK;
889 }
890 
891 
892 static int request_hdlc_queues(struct port *port)
893 {
894 	int err;
895 
896 	err = qmgr_request_queue(queue_ids[port->id].rxfree, RX_DESCS, 0, 0,
897 				 "%s:RX-free", port->netdev->name);
898 	if (err)
899 		return err;
900 
901 	err = qmgr_request_queue(queue_ids[port->id].rx, RX_DESCS, 0, 0,
902 				 "%s:RX", port->netdev->name);
903 	if (err)
904 		goto rel_rxfree;
905 
906 	err = qmgr_request_queue(queue_ids[port->id].tx, TX_DESCS, 0, 0,
907 				 "%s:TX", port->netdev->name);
908 	if (err)
909 		goto rel_rx;
910 
911 	err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0,
912 				 "%s:TX-ready", port->netdev->name);
913 	if (err)
914 		goto rel_tx;
915 
916 	err = qmgr_request_queue(queue_ids[port->id].txdone, TX_DESCS, 0, 0,
917 				 "%s:TX-done", port->netdev->name);
918 	if (err)
919 		goto rel_txready;
920 	return 0;
921 
922 rel_txready:
923 	qmgr_release_queue(port->plat->txreadyq);
924 rel_tx:
925 	qmgr_release_queue(queue_ids[port->id].tx);
926 rel_rx:
927 	qmgr_release_queue(queue_ids[port->id].rx);
928 rel_rxfree:
929 	qmgr_release_queue(queue_ids[port->id].rxfree);
930 	printk(KERN_DEBUG "%s: unable to request hardware queues\n",
931 	       port->netdev->name);
932 	return err;
933 }
934 
935 static void release_hdlc_queues(struct port *port)
936 {
937 	qmgr_release_queue(queue_ids[port->id].rxfree);
938 	qmgr_release_queue(queue_ids[port->id].rx);
939 	qmgr_release_queue(queue_ids[port->id].txdone);
940 	qmgr_release_queue(queue_ids[port->id].tx);
941 	qmgr_release_queue(port->plat->txreadyq);
942 }
943 
944 static int init_hdlc_queues(struct port *port)
945 {
946 	int i;
947 
948 	if (!ports_open)
949 		if (!(dma_pool = dma_pool_create(DRV_NAME, NULL,
950 						 POOL_ALLOC_SIZE, 32, 0)))
951 			return -ENOMEM;
952 
953 	if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
954 					      &port->desc_tab_phys)))
955 		return -ENOMEM;
956 	memset(port->desc_tab, 0, POOL_ALLOC_SIZE);
957 	memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */
958 	memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab));
959 
960 	/* Setup RX buffers */
961 	for (i = 0; i < RX_DESCS; i++) {
962 		struct desc *desc = rx_desc_ptr(port, i);
963 		buffer_t *buff;
964 		void *data;
965 #ifdef __ARMEB__
966 		if (!(buff = netdev_alloc_skb(port->netdev, RX_SIZE)))
967 			return -ENOMEM;
968 		data = buff->data;
969 #else
970 		if (!(buff = kmalloc(RX_SIZE, GFP_KERNEL)))
971 			return -ENOMEM;
972 		data = buff;
973 #endif
974 		desc->buf_len = RX_SIZE;
975 		desc->data = dma_map_single(&port->netdev->dev, data,
976 					    RX_SIZE, DMA_FROM_DEVICE);
977 		if (dma_mapping_error(&port->netdev->dev, desc->data)) {
978 			free_buffer(buff);
979 			return -EIO;
980 		}
981 		port->rx_buff_tab[i] = buff;
982 	}
983 
984 	return 0;
985 }
986 
987 static void destroy_hdlc_queues(struct port *port)
988 {
989 	int i;
990 
991 	if (port->desc_tab) {
992 		for (i = 0; i < RX_DESCS; i++) {
993 			struct desc *desc = rx_desc_ptr(port, i);
994 			buffer_t *buff = port->rx_buff_tab[i];
995 			if (buff) {
996 				dma_unmap_single(&port->netdev->dev,
997 						 desc->data, RX_SIZE,
998 						 DMA_FROM_DEVICE);
999 				free_buffer(buff);
1000 			}
1001 		}
1002 		for (i = 0; i < TX_DESCS; i++) {
1003 			struct desc *desc = tx_desc_ptr(port, i);
1004 			buffer_t *buff = port->tx_buff_tab[i];
1005 			if (buff) {
1006 				dma_unmap_tx(port, desc);
1007 				free_buffer(buff);
1008 			}
1009 		}
1010 		dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys);
1011 		port->desc_tab = NULL;
1012 	}
1013 
1014 	if (!ports_open && dma_pool) {
1015 		dma_pool_destroy(dma_pool);
1016 		dma_pool = NULL;
1017 	}
1018 }
1019 
1020 static int hss_hdlc_open(struct net_device *dev)
1021 {
1022 	struct port *port = dev_to_port(dev);
1023 	unsigned long flags;
1024 	int i, err = 0;
1025 
1026 	if ((err = hdlc_open(dev)))
1027 		return err;
1028 
1029 	if ((err = hss_load_firmware(port)))
1030 		goto err_hdlc_close;
1031 
1032 	if ((err = request_hdlc_queues(port)))
1033 		goto err_hdlc_close;
1034 
1035 	if ((err = init_hdlc_queues(port)))
1036 		goto err_destroy_queues;
1037 
1038 	spin_lock_irqsave(&npe_lock, flags);
1039 	if (port->plat->open)
1040 		if ((err = port->plat->open(port->id, dev,
1041 					    hss_hdlc_set_carrier)))
1042 			goto err_unlock;
1043 	spin_unlock_irqrestore(&npe_lock, flags);
1044 
1045 	/* Populate queues with buffers, no failure after this point */
1046 	for (i = 0; i < TX_DESCS; i++)
1047 		queue_put_desc(port->plat->txreadyq,
1048 			       tx_desc_phys(port, i), tx_desc_ptr(port, i));
1049 
1050 	for (i = 0; i < RX_DESCS; i++)
1051 		queue_put_desc(queue_ids[port->id].rxfree,
1052 			       rx_desc_phys(port, i), rx_desc_ptr(port, i));
1053 
1054 	napi_enable(&port->napi);
1055 	netif_start_queue(dev);
1056 
1057 	qmgr_set_irq(queue_ids[port->id].rx, QUEUE_IRQ_SRC_NOT_EMPTY,
1058 		     hss_hdlc_rx_irq, dev);
1059 
1060 	qmgr_set_irq(queue_ids[port->id].txdone, QUEUE_IRQ_SRC_NOT_EMPTY,
1061 		     hss_hdlc_txdone_irq, dev);
1062 	qmgr_enable_irq(queue_ids[port->id].txdone);
1063 
1064 	ports_open++;
1065 
1066 	hss_set_hdlc_cfg(port);
1067 	hss_config(port);
1068 
1069 	hss_start_hdlc(port);
1070 
1071 	/* we may already have RX data, enables IRQ */
1072 	napi_schedule(&port->napi);
1073 	return 0;
1074 
1075 err_unlock:
1076 	spin_unlock_irqrestore(&npe_lock, flags);
1077 err_destroy_queues:
1078 	destroy_hdlc_queues(port);
1079 	release_hdlc_queues(port);
1080 err_hdlc_close:
1081 	hdlc_close(dev);
1082 	return err;
1083 }
1084 
1085 static int hss_hdlc_close(struct net_device *dev)
1086 {
1087 	struct port *port = dev_to_port(dev);
1088 	unsigned long flags;
1089 	int i, buffs = RX_DESCS; /* allocated RX buffers */
1090 
1091 	spin_lock_irqsave(&npe_lock, flags);
1092 	ports_open--;
1093 	qmgr_disable_irq(queue_ids[port->id].rx);
1094 	netif_stop_queue(dev);
1095 	napi_disable(&port->napi);
1096 
1097 	hss_stop_hdlc(port);
1098 
1099 	while (queue_get_desc(queue_ids[port->id].rxfree, port, 0) >= 0)
1100 		buffs--;
1101 	while (queue_get_desc(queue_ids[port->id].rx, port, 0) >= 0)
1102 		buffs--;
1103 
1104 	if (buffs)
1105 		printk(KERN_CRIT "%s: unable to drain RX queue, %i buffer(s)"
1106 		       " left in NPE\n", dev->name, buffs);
1107 
1108 	buffs = TX_DESCS;
1109 	while (queue_get_desc(queue_ids[port->id].tx, port, 1) >= 0)
1110 		buffs--; /* cancel TX */
1111 
1112 	i = 0;
1113 	do {
1114 		while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0)
1115 			buffs--;
1116 		if (!buffs)
1117 			break;
1118 	} while (++i < MAX_CLOSE_WAIT);
1119 
1120 	if (buffs)
1121 		printk(KERN_CRIT "%s: unable to drain TX queue, %i buffer(s) "
1122 		       "left in NPE\n", dev->name, buffs);
1123 #if DEBUG_CLOSE
1124 	if (!buffs)
1125 		printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i);
1126 #endif
1127 	qmgr_disable_irq(queue_ids[port->id].txdone);
1128 
1129 	if (port->plat->close)
1130 		port->plat->close(port->id, dev);
1131 	spin_unlock_irqrestore(&npe_lock, flags);
1132 
1133 	destroy_hdlc_queues(port);
1134 	release_hdlc_queues(port);
1135 	hdlc_close(dev);
1136 	return 0;
1137 }
1138 
1139 
1140 static int hss_hdlc_attach(struct net_device *dev, unsigned short encoding,
1141 			   unsigned short parity)
1142 {
1143 	struct port *port = dev_to_port(dev);
1144 
1145 	if (encoding != ENCODING_NRZ)
1146 		return -EINVAL;
1147 
1148 	switch(parity) {
1149 	case PARITY_CRC16_PR1_CCITT:
1150 		port->hdlc_cfg = 0;
1151 		return 0;
1152 
1153 	case PARITY_CRC32_PR1_CCITT:
1154 		port->hdlc_cfg = PKT_HDLC_CRC_32;
1155 		return 0;
1156 
1157 	default:
1158 		return -EINVAL;
1159 	}
1160 }
1161 
1162 
1163 static int hss_hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1164 {
1165 	const size_t size = sizeof(sync_serial_settings);
1166 	sync_serial_settings new_line;
1167 	sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
1168 	struct port *port = dev_to_port(dev);
1169 	unsigned long flags;
1170 	int clk;
1171 
1172 	if (cmd != SIOCWANDEV)
1173 		return hdlc_ioctl(dev, ifr, cmd);
1174 
1175 	switch(ifr->ifr_settings.type) {
1176 	case IF_GET_IFACE:
1177 		ifr->ifr_settings.type = IF_IFACE_V35;
1178 		if (ifr->ifr_settings.size < size) {
1179 			ifr->ifr_settings.size = size; /* data size wanted */
1180 			return -ENOBUFS;
1181 		}
1182 		memset(&new_line, 0, sizeof(new_line));
1183 		new_line.clock_type = port->clock_type;
1184 		new_line.clock_rate = 2048000; /* FIXME */
1185 		new_line.loopback = port->loopback;
1186 		if (copy_to_user(line, &new_line, size))
1187 			return -EFAULT;
1188 		return 0;
1189 
1190 	case IF_IFACE_SYNC_SERIAL:
1191 	case IF_IFACE_V35:
1192 		if(!capable(CAP_NET_ADMIN))
1193 			return -EPERM;
1194 		if (copy_from_user(&new_line, line, size))
1195 			return -EFAULT;
1196 
1197 		clk = new_line.clock_type;
1198 		if (port->plat->set_clock)
1199 			clk = port->plat->set_clock(port->id, clk);
1200 
1201 		if (clk != CLOCK_EXT && clk != CLOCK_INT)
1202 			return -EINVAL;	/* No such clock setting */
1203 
1204 		if (new_line.loopback != 0 && new_line.loopback != 1)
1205 			return -EINVAL;
1206 
1207 		port->clock_type = clk; /* Update settings */
1208 		/* FIXME port->clock_rate = new_line.clock_rate */;
1209 		port->loopback = new_line.loopback;
1210 
1211 		spin_lock_irqsave(&npe_lock, flags);
1212 
1213 		if (dev->flags & IFF_UP)
1214 			hss_config(port);
1215 
1216 		if (port->loopback || port->carrier)
1217 			netif_carrier_on(port->netdev);
1218 		else
1219 			netif_carrier_off(port->netdev);
1220 		spin_unlock_irqrestore(&npe_lock, flags);
1221 
1222 		return 0;
1223 
1224 	default:
1225 		return hdlc_ioctl(dev, ifr, cmd);
1226 	}
1227 }
1228 
1229 /*****************************************************************************
1230  * initialization
1231  ****************************************************************************/
1232 
1233 static const struct net_device_ops hss_hdlc_ops = {
1234 	.ndo_open       = hss_hdlc_open,
1235 	.ndo_stop       = hss_hdlc_close,
1236 	.ndo_change_mtu = hdlc_change_mtu,
1237 	.ndo_start_xmit = hdlc_start_xmit,
1238 	.ndo_do_ioctl   = hss_hdlc_ioctl,
1239 };
1240 
1241 static int __devinit hss_init_one(struct platform_device *pdev)
1242 {
1243 	struct port *port;
1244 	struct net_device *dev;
1245 	hdlc_device *hdlc;
1246 	int err;
1247 
1248 	if ((port = kzalloc(sizeof(*port), GFP_KERNEL)) == NULL)
1249 		return -ENOMEM;
1250 
1251 	if ((port->npe = npe_request(0)) == NULL) {
1252 		err = -ENODEV;
1253 		goto err_free;
1254 	}
1255 
1256 	if ((port->netdev = dev = alloc_hdlcdev(port)) == NULL) {
1257 		err = -ENOMEM;
1258 		goto err_plat;
1259 	}
1260 
1261 	SET_NETDEV_DEV(dev, &pdev->dev);
1262 	hdlc = dev_to_hdlc(dev);
1263 	hdlc->attach = hss_hdlc_attach;
1264 	hdlc->xmit = hss_hdlc_xmit;
1265 	dev->netdev_ops = &hss_hdlc_ops;
1266 	dev->tx_queue_len = 100;
1267 	port->clock_type = CLOCK_EXT;
1268 	port->clock_rate = 2048000;
1269 	port->id = pdev->id;
1270 	port->dev = &pdev->dev;
1271 	port->plat = pdev->dev.platform_data;
1272 	netif_napi_add(dev, &port->napi, hss_hdlc_poll, NAPI_WEIGHT);
1273 
1274 	if ((err = register_hdlc_device(dev)))
1275 		goto err_free_netdev;
1276 
1277 	platform_set_drvdata(pdev, port);
1278 
1279 	printk(KERN_INFO "%s: HSS-%i\n", dev->name, port->id);
1280 	return 0;
1281 
1282 err_free_netdev:
1283 	free_netdev(dev);
1284 err_plat:
1285 	npe_release(port->npe);
1286 err_free:
1287 	kfree(port);
1288 	return err;
1289 }
1290 
1291 static int __devexit hss_remove_one(struct platform_device *pdev)
1292 {
1293 	struct port *port = platform_get_drvdata(pdev);
1294 
1295 	unregister_hdlc_device(port->netdev);
1296 	free_netdev(port->netdev);
1297 	npe_release(port->npe);
1298 	platform_set_drvdata(pdev, NULL);
1299 	kfree(port);
1300 	return 0;
1301 }
1302 
1303 static struct platform_driver ixp4xx_hss_driver = {
1304 	.driver.name	= DRV_NAME,
1305 	.probe		= hss_init_one,
1306 	.remove		= hss_remove_one,
1307 };
1308 
1309 static int __init hss_init_module(void)
1310 {
1311 	if ((ixp4xx_read_feature_bits() &
1312 	     (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS)) !=
1313 	    (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS))
1314 		return -ENODEV;
1315 
1316 	spin_lock_init(&npe_lock);
1317 
1318 	return platform_driver_register(&ixp4xx_hss_driver);
1319 }
1320 
1321 static void __exit hss_cleanup_module(void)
1322 {
1323 	platform_driver_unregister(&ixp4xx_hss_driver);
1324 }
1325 
1326 MODULE_AUTHOR("Krzysztof Halasa");
1327 MODULE_DESCRIPTION("Intel IXP4xx HSS driver");
1328 MODULE_LICENSE("GPL v2");
1329 MODULE_ALIAS("platform:ixp4xx_hss");
1330 module_init(hss_init_module);
1331 module_exit(hss_cleanup_module);
1332