1 /* 2 * Moxa C101 synchronous serial card driver for Linux 3 * 4 * Copyright (C) 2000-2003 Krzysztof Halasa <khc@pm.waw.pl> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of version 2 of the GNU General Public License 8 * as published by the Free Software Foundation. 9 * 10 * For information see <http://www.kernel.org/pub/linux/utils/net/hdlc/> 11 * 12 * Sources of information: 13 * Hitachi HD64570 SCA User's Manual 14 * Moxa C101 User's Manual 15 */ 16 17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 18 19 #include <linux/module.h> 20 #include <linux/kernel.h> 21 #include <linux/capability.h> 22 #include <linux/slab.h> 23 #include <linux/types.h> 24 #include <linux/string.h> 25 #include <linux/errno.h> 26 #include <linux/init.h> 27 #include <linux/moduleparam.h> 28 #include <linux/netdevice.h> 29 #include <linux/hdlc.h> 30 #include <linux/delay.h> 31 #include <asm/io.h> 32 33 #include "hd64570.h" 34 35 36 static const char* version = "Moxa C101 driver version: 1.15"; 37 static const char* devname = "C101"; 38 39 #undef DEBUG_PKT 40 #define DEBUG_RINGS 41 42 #define C101_PAGE 0x1D00 43 #define C101_DTR 0x1E00 44 #define C101_SCA 0x1F00 45 #define C101_WINDOW_SIZE 0x2000 46 #define C101_MAPPED_RAM_SIZE 0x4000 47 48 #define RAM_SIZE (256 * 1024) 49 #define TX_RING_BUFFERS 10 50 #define RX_RING_BUFFERS ((RAM_SIZE - C101_WINDOW_SIZE) / \ 51 (sizeof(pkt_desc) + HDLC_MAX_MRU) - TX_RING_BUFFERS) 52 53 #define CLOCK_BASE 9830400 /* 9.8304 MHz */ 54 #define PAGE0_ALWAYS_MAPPED 55 56 static char *hw; /* pointer to hw=xxx command line string */ 57 58 59 typedef struct card_s { 60 struct net_device *dev; 61 spinlock_t lock; /* TX lock */ 62 u8 __iomem *win0base; /* ISA window base address */ 63 u32 phy_winbase; /* ISA physical base address */ 64 sync_serial_settings settings; 65 int rxpart; /* partial frame received, next frame invalid*/ 66 unsigned short encoding; 67 unsigned short parity; 68 u16 rx_ring_buffers; /* number of buffers in a ring */ 69 u16 tx_ring_buffers; 70 u16 buff_offset; /* offset of first buffer of first channel */ 71 u16 rxin; /* rx ring buffer 'in' pointer */ 72 u16 txin; /* tx ring buffer 'in' and 'last' pointers */ 73 u16 txlast; 74 u8 rxs, txs, tmc; /* SCA registers */ 75 u8 irq; /* IRQ (3-15) */ 76 u8 page; 77 78 struct card_s *next_card; 79 }card_t; 80 81 typedef card_t port_t; 82 83 static card_t *first_card; 84 static card_t **new_card = &first_card; 85 86 87 #define sca_in(reg, card) readb((card)->win0base + C101_SCA + (reg)) 88 #define sca_out(value, reg, card) writeb(value, (card)->win0base + C101_SCA + (reg)) 89 #define sca_inw(reg, card) readw((card)->win0base + C101_SCA + (reg)) 90 91 /* EDA address register must be set in EDAL, EDAH order - 8 bit ISA bus */ 92 #define sca_outw(value, reg, card) do { \ 93 writeb(value & 0xFF, (card)->win0base + C101_SCA + (reg)); \ 94 writeb((value >> 8 ) & 0xFF, (card)->win0base + C101_SCA + (reg + 1));\ 95 } while(0) 96 97 #define port_to_card(port) (port) 98 #define log_node(port) (0) 99 #define phy_node(port) (0) 100 #define winsize(card) (C101_WINDOW_SIZE) 101 #define win0base(card) ((card)->win0base) 102 #define winbase(card) ((card)->win0base + 0x2000) 103 #define get_port(card, port) (card) 104 static void sca_msci_intr(port_t *port); 105 106 107 static inline u8 sca_get_page(card_t *card) 108 { 109 return card->page; 110 } 111 112 static inline void openwin(card_t *card, u8 page) 113 { 114 card->page = page; 115 writeb(page, card->win0base + C101_PAGE); 116 } 117 118 119 #include "hd64570.c" 120 121 122 static inline void set_carrier(port_t *port) 123 { 124 if (!(sca_in(MSCI1_OFFSET + ST3, port) & ST3_DCD)) 125 netif_carrier_on(port_to_dev(port)); 126 else 127 netif_carrier_off(port_to_dev(port)); 128 } 129 130 131 static void sca_msci_intr(port_t *port) 132 { 133 u8 stat = sca_in(MSCI0_OFFSET + ST1, port); /* read MSCI ST1 status */ 134 135 /* Reset MSCI TX underrun and CDCD (ignored) status bit */ 136 sca_out(stat & (ST1_UDRN | ST1_CDCD), MSCI0_OFFSET + ST1, port); 137 138 if (stat & ST1_UDRN) { 139 /* TX Underrun error detected */ 140 port_to_dev(port)->stats.tx_errors++; 141 port_to_dev(port)->stats.tx_fifo_errors++; 142 } 143 144 stat = sca_in(MSCI1_OFFSET + ST1, port); /* read MSCI1 ST1 status */ 145 /* Reset MSCI CDCD status bit - uses ch#2 DCD input */ 146 sca_out(stat & ST1_CDCD, MSCI1_OFFSET + ST1, port); 147 148 if (stat & ST1_CDCD) 149 set_carrier(port); 150 } 151 152 153 static void c101_set_iface(port_t *port) 154 { 155 u8 rxs = port->rxs & CLK_BRG_MASK; 156 u8 txs = port->txs & CLK_BRG_MASK; 157 158 switch(port->settings.clock_type) { 159 case CLOCK_INT: 160 rxs |= CLK_BRG_RX; /* TX clock */ 161 txs |= CLK_RXCLK_TX; /* BRG output */ 162 break; 163 164 case CLOCK_TXINT: 165 rxs |= CLK_LINE_RX; /* RXC input */ 166 txs |= CLK_BRG_TX; /* BRG output */ 167 break; 168 169 case CLOCK_TXFROMRX: 170 rxs |= CLK_LINE_RX; /* RXC input */ 171 txs |= CLK_RXCLK_TX; /* RX clock */ 172 break; 173 174 default: /* EXTernal clock */ 175 rxs |= CLK_LINE_RX; /* RXC input */ 176 txs |= CLK_LINE_TX; /* TXC input */ 177 } 178 179 port->rxs = rxs; 180 port->txs = txs; 181 sca_out(rxs, MSCI1_OFFSET + RXS, port); 182 sca_out(txs, MSCI1_OFFSET + TXS, port); 183 sca_set_port(port); 184 } 185 186 187 static int c101_open(struct net_device *dev) 188 { 189 port_t *port = dev_to_port(dev); 190 int result; 191 192 result = hdlc_open(dev); 193 if (result) 194 return result; 195 196 writeb(1, port->win0base + C101_DTR); 197 sca_out(0, MSCI1_OFFSET + CTL, port); /* RTS uses ch#2 output */ 198 sca_open(dev); 199 /* DCD is connected to port 2 !@#$%^& - disable MSCI0 CDCD interrupt */ 200 sca_out(IE1_UDRN, MSCI0_OFFSET + IE1, port); 201 sca_out(IE0_TXINT, MSCI0_OFFSET + IE0, port); 202 203 set_carrier(port); 204 205 /* enable MSCI1 CDCD interrupt */ 206 sca_out(IE1_CDCD, MSCI1_OFFSET + IE1, port); 207 sca_out(IE0_RXINTA, MSCI1_OFFSET + IE0, port); 208 sca_out(0x48, IER0, port); /* TXINT #0 and RXINT #1 */ 209 c101_set_iface(port); 210 return 0; 211 } 212 213 214 static int c101_close(struct net_device *dev) 215 { 216 port_t *port = dev_to_port(dev); 217 218 sca_close(dev); 219 writeb(0, port->win0base + C101_DTR); 220 sca_out(CTL_NORTS, MSCI1_OFFSET + CTL, port); 221 hdlc_close(dev); 222 return 0; 223 } 224 225 226 static int c101_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 227 { 228 const size_t size = sizeof(sync_serial_settings); 229 sync_serial_settings new_line; 230 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync; 231 port_t *port = dev_to_port(dev); 232 233 #ifdef DEBUG_RINGS 234 if (cmd == SIOCDEVPRIVATE) { 235 sca_dump_rings(dev); 236 printk(KERN_DEBUG "MSCI1: ST: %02x %02x %02x %02x\n", 237 sca_in(MSCI1_OFFSET + ST0, port), 238 sca_in(MSCI1_OFFSET + ST1, port), 239 sca_in(MSCI1_OFFSET + ST2, port), 240 sca_in(MSCI1_OFFSET + ST3, port)); 241 return 0; 242 } 243 #endif 244 if (cmd != SIOCWANDEV) 245 return hdlc_ioctl(dev, ifr, cmd); 246 247 switch(ifr->ifr_settings.type) { 248 case IF_GET_IFACE: 249 ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL; 250 if (ifr->ifr_settings.size < size) { 251 ifr->ifr_settings.size = size; /* data size wanted */ 252 return -ENOBUFS; 253 } 254 if (copy_to_user(line, &port->settings, size)) 255 return -EFAULT; 256 return 0; 257 258 case IF_IFACE_SYNC_SERIAL: 259 if(!capable(CAP_NET_ADMIN)) 260 return -EPERM; 261 262 if (copy_from_user(&new_line, line, size)) 263 return -EFAULT; 264 265 if (new_line.clock_type != CLOCK_EXT && 266 new_line.clock_type != CLOCK_TXFROMRX && 267 new_line.clock_type != CLOCK_INT && 268 new_line.clock_type != CLOCK_TXINT) 269 return -EINVAL; /* No such clock setting */ 270 271 if (new_line.loopback != 0 && new_line.loopback != 1) 272 return -EINVAL; 273 274 memcpy(&port->settings, &new_line, size); /* Update settings */ 275 c101_set_iface(port); 276 return 0; 277 278 default: 279 return hdlc_ioctl(dev, ifr, cmd); 280 } 281 } 282 283 284 285 static void c101_destroy_card(card_t *card) 286 { 287 readb(card->win0base + C101_PAGE); /* Resets SCA? */ 288 289 if (card->irq) 290 free_irq(card->irq, card); 291 292 if (card->win0base) { 293 iounmap(card->win0base); 294 release_mem_region(card->phy_winbase, C101_MAPPED_RAM_SIZE); 295 } 296 297 free_netdev(card->dev); 298 299 kfree(card); 300 } 301 302 static const struct net_device_ops c101_ops = { 303 .ndo_open = c101_open, 304 .ndo_stop = c101_close, 305 .ndo_change_mtu = hdlc_change_mtu, 306 .ndo_start_xmit = hdlc_start_xmit, 307 .ndo_do_ioctl = c101_ioctl, 308 }; 309 310 static int __init c101_run(unsigned long irq, unsigned long winbase) 311 { 312 struct net_device *dev; 313 hdlc_device *hdlc; 314 card_t *card; 315 int result; 316 317 if (irq<3 || irq>15 || irq == 6) /* FIXME */ { 318 pr_err("invalid IRQ value\n"); 319 return -ENODEV; 320 } 321 322 if (winbase < 0xC0000 || winbase > 0xDFFFF || (winbase & 0x3FFF) !=0) { 323 pr_err("invalid RAM value\n"); 324 return -ENODEV; 325 } 326 327 card = kzalloc(sizeof(card_t), GFP_KERNEL); 328 if (card == NULL) 329 return -ENOBUFS; 330 331 card->dev = alloc_hdlcdev(card); 332 if (!card->dev) { 333 pr_err("unable to allocate memory\n"); 334 kfree(card); 335 return -ENOBUFS; 336 } 337 338 if (request_irq(irq, sca_intr, 0, devname, card)) { 339 pr_err("could not allocate IRQ\n"); 340 c101_destroy_card(card); 341 return -EBUSY; 342 } 343 card->irq = irq; 344 345 if (!request_mem_region(winbase, C101_MAPPED_RAM_SIZE, devname)) { 346 pr_err("could not request RAM window\n"); 347 c101_destroy_card(card); 348 return -EBUSY; 349 } 350 card->phy_winbase = winbase; 351 card->win0base = ioremap(winbase, C101_MAPPED_RAM_SIZE); 352 if (!card->win0base) { 353 pr_err("could not map I/O address\n"); 354 c101_destroy_card(card); 355 return -EFAULT; 356 } 357 358 card->tx_ring_buffers = TX_RING_BUFFERS; 359 card->rx_ring_buffers = RX_RING_BUFFERS; 360 card->buff_offset = C101_WINDOW_SIZE; /* Bytes 1D00-1FFF reserved */ 361 362 readb(card->win0base + C101_PAGE); /* Resets SCA? */ 363 udelay(100); 364 writeb(0, card->win0base + C101_PAGE); 365 writeb(0, card->win0base + C101_DTR); /* Power-up for RAM? */ 366 367 sca_init(card, 0); 368 369 dev = port_to_dev(card); 370 hdlc = dev_to_hdlc(dev); 371 372 spin_lock_init(&card->lock); 373 dev->irq = irq; 374 dev->mem_start = winbase; 375 dev->mem_end = winbase + C101_MAPPED_RAM_SIZE - 1; 376 dev->tx_queue_len = 50; 377 dev->netdev_ops = &c101_ops; 378 hdlc->attach = sca_attach; 379 hdlc->xmit = sca_xmit; 380 card->settings.clock_type = CLOCK_EXT; 381 382 result = register_hdlc_device(dev); 383 if (result) { 384 pr_warn("unable to register hdlc device\n"); 385 c101_destroy_card(card); 386 return result; 387 } 388 389 sca_init_port(card); /* Set up C101 memory */ 390 set_carrier(card); 391 392 netdev_info(dev, "Moxa C101 on IRQ%u, using %u TX + %u RX packets rings\n", 393 card->irq, card->tx_ring_buffers, card->rx_ring_buffers); 394 395 *new_card = card; 396 new_card = &card->next_card; 397 return 0; 398 } 399 400 401 402 static int __init c101_init(void) 403 { 404 if (hw == NULL) { 405 #ifdef MODULE 406 pr_info("no card initialized\n"); 407 #endif 408 return -EINVAL; /* no parameters specified, abort */ 409 } 410 411 pr_info("%s\n", version); 412 413 do { 414 unsigned long irq, ram; 415 416 irq = simple_strtoul(hw, &hw, 0); 417 418 if (*hw++ != ',') 419 break; 420 ram = simple_strtoul(hw, &hw, 0); 421 422 if (*hw == ':' || *hw == '\x0') 423 c101_run(irq, ram); 424 425 if (*hw == '\x0') 426 return first_card ? 0 : -EINVAL; 427 }while(*hw++ == ':'); 428 429 pr_err("invalid hardware parameters\n"); 430 return first_card ? 0 : -EINVAL; 431 } 432 433 434 static void __exit c101_cleanup(void) 435 { 436 card_t *card = first_card; 437 438 while (card) { 439 card_t *ptr = card; 440 card = card->next_card; 441 unregister_hdlc_device(port_to_dev(ptr)); 442 c101_destroy_card(ptr); 443 } 444 } 445 446 447 module_init(c101_init); 448 module_exit(c101_cleanup); 449 450 MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); 451 MODULE_DESCRIPTION("Moxa C101 serial port driver"); 452 MODULE_LICENSE("GPL v2"); 453 module_param(hw, charp, 0444); 454 MODULE_PARM_DESC(hw, "irq,ram:irq,..."); 455